text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
LICENCE
-------
Copyright 2015 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import cPickle
import logging
import multiprocessing.pool
import numpy
import os.path as osp
import svm
import svmutil
import smqtk_config
from smqtk.indexing import Indexer
from smqtk.utils import safe_create_dir, SimpleTimer
from smqtk.utils.distance_functions import histogram_intersection_distance
def _svm_model_hik_helper(i, j, i_feat, j_feat):
"""
HIK async compute helper
"""
log = logging.getLogger("_svm_model_hik_helper")
log.debug("Computing HIK for [%d, %d]", i, j)
# noinspection PyUnresolvedReferences
ij_hik = histogram_intersection_distance(i_feat, j_feat)
return ij_hik
class SVMIndexerHIK (Indexer):
"""
Indexer using SVM classification model with Platt scaling
Inherited from progenitor ALADDIN project
"""
# Pick lowest % intersecting elements as auto-bg
AUTO_NEG_PERCENT = 0.10
def __init__(self, data_dir):
self.data_dir = osp.join(smqtk_config.DATA_DIR, data_dir)
# Array of UIDs in the index the UID refers to in these internal
# structures
#: :type: list[collections.Hashable]
self._uid_array = None
#: :type: dict[object, int]
self._uid2idx_map = None
# Matrix of features
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = None
# Distance kernel matrix
#: :type: numpy.core.multiarray.ndarray
self._distance_mat = None
# Templated to take the W1 integer weight argument, which should be
# floor(num_negatives / num_positives), which a min value of 1
# - The ``-t 5`` parameter value is unique to the custom libSVM
# implementation we build.
self.svm_train_params = '-q -t 5 -b 1 -c 2 -w1 %f -g 0.0078125'
if self.has_model_files():
self._load_model_files()
@property
def uid_list_filepath(self):
return osp.join(self.data_dir, "uid_list.pickle")
@property
def feature_mat_filepath(self):
return osp.join(self.data_dir, "feature_mat.npy")
@property
def distance_mat_filepath(self):
return osp.join(self.data_dir, 'hik_distance_kernel.npy')
def has_model_files(self):
return (osp.isfile(self.uid_list_filepath)
and osp.isfile(self.feature_mat_filepath)
and osp.isfile(self.distance_mat_filepath))
def _load_model_files(self):
with open(self.uid_list_filepath, 'rb') as ifile:
#: :type: list[collections.Hashable]
self._uid_array = cPickle.load(ifile)
#: :type: numpy.core.multiarray.ndarray
self._feature_mat = numpy.load(self.feature_mat_filepath)
#: :type: numpy.core.multiarray.ndarray
self._distance_mat = numpy.load(self.distance_mat_filepath)
# Mapping of element UID to array/matrix index position
#: :type: dict of int
self._uid2idx_map = {}
for idx, uid in enumerate(self._uid_array):
self._uid2idx_map[uid] = idx
def has_model(self):
"""
:return: True if this indexer has a valid initialized model for
extension and ranking (or doesn't need one to perform those tasks).
:rtype: bool
"""
return (
self._uid_array is not None
and self._feature_mat is not None
and 0 not in self._feature_mat.shape # has dimensionality
and self._distance_mat is not None
and 0 not in self._distance_mat.shape # has dimensionality
)
def generate_model(self, descriptor_map, parallel=None, **kwargs):
"""
Generate this indexers data-model using the given features,
saving it to files in the configured data directory.
:raises RuntimeError: Precaution error when there is an existing data
model for this indexer. Manually delete or move the existing
model before computing another one.
Specific implementations may error on other things. See the specific
implementations for more details.
:raises ValueError: The given feature map had no content.
:param descriptor_map: Mapping of integer IDs to feature data. All feature
data must be of the same size!
:type descriptor_map: dict of (int, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores.
:type parallel: int
"""
if self.has_model():
raise RuntimeError("WARNING: This implementation already has a "
"model generated. These can take a long time to "
"generate, thus we require external manual "
"removal of modal files before we will generate "
"a new model.")
num_features = len(descriptor_map)
ordered_uids = sorted(descriptor_map.keys())
sample_feature = descriptor_map[ordered_uids[0]]
feature_len = len(sample_feature)
# Pre-allocating arrays
#: :type: list[collections.Hashable]
self._uid_array = []
self._feature_mat = numpy.zeros(
(num_features, feature_len), dtype=sample_feature.dtype
)
self._distance_mat = numpy.zeros(
(num_features, num_features), dtype=sample_feature.dtype
)
with SimpleTimer("Populating feature matrix", self.log.info):
for i, (uid, feat) in enumerate(descriptor_map.iteritems()):
self._uid_array.append(uid)
self._feature_mat[i] = feat
with SimpleTimer("Computing HI matrix kernel", self.log.info):
# Using [process] Pool here with large sets eats far too much RAM.
# Using a ThreadPool here is actually much slower. Not sure why?
for i in range(num_features):
for j in range(i, num_features):
self._distance_mat[i, j] = self._distance_mat[j, i] = \
histogram_intersection_distance(self._feature_mat[i],
self._feature_mat[j])
with SimpleTimer("Saving data files", self.log.info):
safe_create_dir(self.data_dir)
with open(self.uid_list_filepath, 'wb') as ofile:
cPickle.dump(self._uid_array, ofile)
numpy.save(self.feature_mat_filepath, self._feature_mat)
numpy.save(self.distance_mat_filepath, self._distance_mat)
# TODO: destruct and reload matrices in memmap mode
# - see numpy.load() doc-string
def extend_model(self, uid_feature_map, parallel=None):
"""
Extend, in memory, the current model with the given feature elements.
Online extensions are not saved to data files.
NOTE: For now, if there is currently no data model created for this
indexer / descriptor combination, we will error. In the future, I
would imagine a new model would be created.
:raises RuntimeError: No current model.
:param uid_feature_map: Mapping of integer IDs to features to extend this
indexer's model with.
:type uid_feature_map: dict of (collections.Hashable, numpy.core.multiarray.ndarray)
:param parallel: Optionally specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores. Not all implementation support parallel model extension.
:type parallel: int
"""
if not self.has_model():
raise RuntimeError("No model available for this indexer.")
# Shortcut when we're not given anything to actually process
if not uid_feature_map:
self.log.debug("No new features to extend")
return
# Check UID intersection
with SimpleTimer("Checking UID uniqueness", self.log.debug):
cur_uids = set(self._uid_array)
intersection = cur_uids.intersection(uid_feature_map.keys())
if intersection:
raise ValueError("The following IDs are already present in the "
"indexer's model: %s" % tuple(intersection))
# Check feature consistency
# - Assuming that there is are least one feature in our current model...
with SimpleTimer("Checking input feature shape", self.log.debug):
example_feat = self._feature_mat[0]
for feat in uid_feature_map.values():
if feat.shape[0] != example_feat.shape[0]:
raise ValueError("One or more features provided are not of "
"the correct shape! Found %s when we "
"require %s"
% (feat.shape, example_feat.shape[1]))
del example_feat # Deleting so we can resize later in the function
# Extend data structures
# - UID and Feature matrix can be simply resized in-place as we are
# strictly adding to the end of the structure in memory.
# - distance matrix, since we're adding new columns in addition to rows,
# need to create a new matrix of the desired shape, copying in
# existing into new matrix.
self.log.debug("Sorting feature UIDs")
new_uids = sorted(uid_feature_map.keys())
self.log.debug("Calculating before and after sizes.")
num_features_before = self._feature_mat.shape[0]
num_features_after = num_features_before + len(uid_feature_map)
with SimpleTimer("Resizing uid/feature matrices", self.log.debug):
self._feature_mat.resize((num_features_after,
self._feature_mat.shape[1]))
# Calculate distances for new features to all others
# - for-each new feature row, calc distance to all features in rows
# before it + itself
# - r is the index of the current new feature
# (num_features_before <= r < num_features_after)
# c is the index of the feature we are computing the distance to
# (0 <= c <= r)
# - Expanding and copying kernel matrix while computing distance to not
# waste time waiting for computations to finish
pool = multiprocessing.Pool(processes=parallel)
hid_map = {}
with SimpleTimer("Adding to matrices, submitting HI work",
self.log.debug):
for r in range(num_features_before, num_features_after):
r_uid = new_uids[r-num_features_before]
self._uid_array.append(r_uid)
assert len(self._uid_array) == r+1
self._uid2idx_map[r_uid] = r
self._feature_mat[r] = uid_feature_map[r_uid]
for c in range(r+1):
hid_map[r, c] = pool.apply_async(
histogram_intersection_distance,
args=(self._feature_mat[r], self._feature_mat[c])
)
pool.close()
# Expanding kernel matrix in local memory while async processing is
# going on.
# noinspection PyNoneFunctionAssignment
with SimpleTimer("'Resizing' kernel matrix", self.log.debug):
new_dm = numpy.ndarray((num_features_after, num_features_after),
dtype=self._distance_mat.dtype)
new_dm[:num_features_before,
:num_features_before] = self._distance_mat
self._distance_mat = new_dm
with SimpleTimer("Collecting dist results into matrix", self.log.debug):
for (r, c), dist in hid_map.iteritems():
d = dist.get()
self._distance_mat[r, c] = self._distance_mat[c, r] = d
pool.join()
def _least_similar_uid(self, uid, N=1):
"""
Return an array of N UIDs that are least similar to the feature for the
given UID. If N is greater than the total number of elements in this
indexer's model, we return a list of T ordered elements, where T is
the total number of in the model. I.e. we return an ordered list of all
UIDs by least similarity (the given UID will be the last element in the
list).
:param uid: UID to find the least similar UIDs for.
:type uid: int
:return: List of min(N, T) least similar UIDs.
:rtype: list of int
"""
i = self._uid2idx_map[uid]
z = zip(self._uid_array, self._distance_mat[i])
# Sort by least similarity, pick top N
return [e[0] for e in sorted(z, key=lambda f: f[1], reverse=1)[:N]]
def _pick_auto_negatives(self, pos_uids):
"""
Pick automatic negative UIDs based on distances from the given positive
UIDs.
:param pos_uids: List of positive UIDs
:type pos_uids: list of int
:return: List of automatically chosen negative UIDs
:rtype: set of int
"""
# Pick automatic negatives that are the most distant elements from
# given positive elements.
#: :type: set of int
auto_neg = set()
n = max(1, int(len(self._uid_array) * self.AUTO_NEG_PERCENT))
for p_UID in pos_uids:
auto_neg.update(self._least_similar_uid(p_UID, n))
# Cancel out any auto-picked negatives that conflict with given positive
# UIDs.
auto_neg.difference_update(pos_uids)
self.log.debug("Post auto-negative selection: %s", auto_neg)
return auto_neg
def rank(self, pos_ids, neg_ids=()):
"""
Rank the current model, returning a mapping of element IDs to a
ranking valuation. This valuation should be a probability in the range
of [0, 1], where 1.0 is the highest rank and 0.0 is the lowest rank.
:raises RuntimeError: No current model.
:return: Mapping of ingest ID to a rank.
:rtype: dict of (int, float)
:param pos_ids: List of positive data IDs. Required.
:type pos_ids: list of int
:param neg_ids: List of negative data IDs. Optional.
:type neg_ids: list of int
:return: Mapping of ingest ID to a rank.
:rtype: dict of (int, float)
"""
if not self.has_model():
raise RuntimeError("No model available for this indexer.")
# Automatically support the negative IDs with the most distance UIDs
# from the provided positive UIDs.
# if len(neg_ids) == 0:
# neg_ids = self._pick_auto_negatives(pos_ids)
neg_ids = set(neg_ids).union(self._pick_auto_negatives(pos_ids))
#
# SVM model training
#
uid_list = sorted(set.union(set(pos_ids), neg_ids))
feature_len = self._feature_mat.shape[1]
# positive label: 1, negative label: 0
bool2label = {1: 1, 0: 0}
labels = [bool2label[uid in pos_ids] for uid in uid_list]
train_features = \
self._feature_mat[list(self._uid2idx_map[uid] for uid in uid_list), :]
self.log.debug("Creating SVM problem")
svm_problem = svm.svm_problem(labels, train_features.tolist())
self.log.debug("Creating SVM model")
w1_weight = max(1.0, len(neg_ids)/float(len(pos_ids)))
svm_model = svmutil.svm_train(svm_problem,
self.svm_train_params % w1_weight)
if svm_model.l == 0:
raise RuntimeError("SVM Model learning failed")
# Finding associated clip IDs of trained support vectors
self.log.debug("Finding clip IDs for support vectors")
hash2feature_idx = dict([(hash(tuple(f)), r)
for r, f in enumerate(self._feature_mat)])
svm_sv_idxs = []
tmp_list = [0] * feature_len
for r in range(svm_model.nSV[0] + svm_model.nSV[1]):
for c in range(feature_len):
tmp_list[c] = svm_model.SV[r][c].value
svm_sv_idxs.append(hash2feature_idx[hash(tuple(tmp_list))])
#
# Platt Scaling for probability ranking
#
# Features associated to support vectors in trained model
self.log.debug("Forming data for Platt Scaling")
# We need the distances between support vectors to all features
test_kernel = self._distance_mat[svm_sv_idxs, :]
weights = numpy.array(svm_model.get_sv_coef()).flatten()
margins = (numpy.mat(weights) * test_kernel).A[0]
self.log.debug("Performing Platt scaling")
rho = svm_model.rho[0]
probA = svm_model.probA[0]
probB = svm_model.probB[0]
#: :type: numpy.core.multiarray.ndarray
probs = 1.0 / (1.0 + numpy.exp((margins - rho) * probA + probB))
# Test if the probability of an adjudicated positive is below a
# threshold. If it is, invert probabilities.
# * Find lowest ranking positive example
# * Test if the probability valuation falls in the lower 50% of all
# probabilities.
pos_probs = numpy.array(
[probs[self._uid2idx_map[uid]] for uid in pos_ids]
)
pos_mean_prob = pos_probs.sum() / pos_probs.size
total_mean_prob = probs.sum() / probs.size
if pos_mean_prob < total_mean_prob:
probs = 1.0 - probs
probability_map = dict(zip(self._uid_array, probs))
return probability_map
def reset(self):
"""
Reset this indexer to its original state, i.e. removing any model
extension that may have occurred.
:raises RuntimeError: Unable to reset due to lack of available model.
"""
if not self.has_model():
raise RuntimeError("No model available for this indexer to reset "
"to.")
self._load_model_files()
INDEXER_CLASS = SVMIndexerHIK
| {
"content_hash": "4ce6a29818e22d5110df0ea0c4b99ce1",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 92,
"avg_line_length": 40.18818380743983,
"alnum_prop": 0.5980071871937276,
"repo_name": "anguoyang/SMQTK",
"id": "dc6fe6209c16e69291da8c07ce47059ad595b777",
"size": "18366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/smqtk/indexing/svm_hik.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "168608"
},
{
"name": "C++",
"bytes": "1555888"
},
{
"name": "CMake",
"bytes": "133035"
},
{
"name": "CSS",
"bytes": "173954"
},
{
"name": "Cuda",
"bytes": "138262"
},
{
"name": "HTML",
"bytes": "353039"
},
{
"name": "Java",
"bytes": "197569"
},
{
"name": "JavaScript",
"bytes": "1953035"
},
{
"name": "Makefile",
"bytes": "8196"
},
{
"name": "Matlab",
"bytes": "46934"
},
{
"name": "Perl",
"bytes": "3476237"
},
{
"name": "Perl6",
"bytes": "286157"
},
{
"name": "Python",
"bytes": "2120427"
},
{
"name": "Shell",
"bytes": "4944"
},
{
"name": "TeX",
"bytes": "149162"
}
],
"symlink_target": ""
} |
from melta.dynamic.propertyMaker import PropertyMaker
property_maker = PropertyMaker()
class Person:
pass
person1 = Person()
property_maker.buildProperty(person1, "edad", 20) \
.buildProperty(person1, "altura", 180) \
.buildProperty(person1, "sexo", "male")
class House:
pass
house1 = House()
property_maker.buildProperty(house1, "antiguedad", 32) \
.buildProperty(house1, "tipo_casa", "bungalow") \
.buildProperty(house1, "mt2", 360)
house2 = House()
property_maker.buildProperty(house2, "building_age", 34) \
.buildProperty(house2, "material", "brick") \
.buildProperty(house2, "sq2mts", 453) | {
"content_hash": "245d406cbc50e194b811fcc49034a80e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 21.96551724137931,
"alnum_prop": 0.695447409733124,
"repo_name": "bossiernesto/melta",
"id": "d28a29efc70a7d2411d043c08d1caf4cddbaa722",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/fixture/class_repositories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73516"
}
],
"symlink_target": ""
} |
"""ThreatConnect TI Campaign"""
# standard library
from typing import TYPE_CHECKING
# first-party
from tcex.api.tc.v2.threat_intelligence.mappings.group.group import Group
if TYPE_CHECKING:
# first-party
from tcex.api.tc.v2.threat_intelligence.threat_intelligence import ThreatIntelligence
class Campaign(Group):
"""Unique API calls for Campaign API Endpoints
Args:
ti (ThreatIntelligence): An instance of the ThreatIntelligence Class.
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
first_seen (str, kwargs): The first seen datetime expression for this Group.
"""
def __init__(self, ti: 'ThreatIntelligence', **kwargs):
"""Initialize Class Properties."""
super().__init__(
ti, sub_type='Campaign', api_entity='campaign', api_branch='campaigns', **kwargs
)
def first_seen(self, first_seen):
"""Update the campaign with the new first_seen date.
Args:
first_seen (str): The first_seen date. Converted to %Y-%m-%dT%H:%M:%SZ date format
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._handle_error(910, [self.type])
first_seen = self._utils.any_to_datetime(first_seen).strftime('%Y-%m-%dT%H:%M:%SZ')
self._data['firstSeen'] = first_seen
request = {'firstSeen': first_seen}
return self.tc_requests.update(self.api_type, self.api_branch, self.unique_id, request)
| {
"content_hash": "4f04395b4c1debe0ecaa591be0443a98",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 95,
"avg_line_length": 36.93181818181818,
"alnum_prop": 0.648,
"repo_name": "ThreatConnect-Inc/tcex",
"id": "b5ba8d4ab51a0424d35396298372d71b1244c88b",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tcex/api/tc/v2/threat_intelligence/mappings/group/group_types/campaign.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2735042"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from helper_functions import *
from munkres import Munkres
import numpy as np
import scipy.spatial.distance as dist
import time
import cPickle as pickle
from collections import defaultdict
# Returns formatted string from list of tuples
def pt_str(pt):
return "({0:.4},{1:.4})".format(*pt)
def pts_str(pts):
return ", ".join(map(lambda pt: pt_str(pt), pts))
def splice_alg(pd_edges):
""" Implementation of the SPLICE algorithm
"""
pickups = pd_edges.keys()
deliveries = pd_edges.values()
# Compute Hungarian matching indices
c_mat = dist.cdist(deliveries, pickups)
# Connect dp_edges according to bipartite matching
dp_edges = {deliveries[d_ind]: pickups[p_ind] \
for (d_ind, p_ind) in Munkres().compute(c_mat)}
# Merge generated tours
cost = merge_tours(pd_edges, dp_edges)
return dp_edges, cost
def merge_tours(pd_edges, dp_edges, dist_f=dist.euclidean):
cost = 0.
pickups = pd_edges.keys()
deliveries = pd_edges.values()
# Setup beginning of merge
cur_p = pickups.pop()
tour_p = cur_p
start_p = cur_p
# While there are remaining pickups
while pickups:
# Follow through the d->p chain
cur_d = pd_edges[cur_p]
next_p = dp_edges[cur_d]
# If tour finished
if next_p == tour_p:
# Chose any random unvisited pickup
next_p = pickups.pop()
# Start new tour
tour_p = next_p
# Change dp to connect to new pickups
dp_edges[cur_d] = next_p
else:
# Mark pickup as visited
pickups.remove(next_p)
cur_p = next_p
dp_edges[pd_edges[cur_p]] = start_p
# Sum over all pd and dp edge costs
cost += reduce(lambda a, b: a + dist.euclidean(b,pd_edges[b]),
pd_edges, 0)
cost += reduce(lambda a, b: a + dist.euclidean(b,dp_edges[b]),
dp_edges, 0)
return cost
if __name__ == "__main__":
pass
| {
"content_hash": "3c027c89e08d239e792eee3ef908b860",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 66,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.599803343166175,
"repo_name": "adfriedm/Geometric-K-Server-Experiments",
"id": "b6b454dbef4a5c098d07906e9c7607a4131db81d",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "splice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "490990"
},
{
"name": "Python",
"bytes": "19155"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
import unittest
import os
import tempfile
from ..db import Database
class DBTests(unittest.TestCase):
def setUp(self):
self._db = os.path.join(tempfile.gettempdir(), 'pigar_test.db')
self._conn = Database(db=self._db)
def tearDown(self):
self._conn.close
os.remove(self._db)
def test_db(self):
self._conn.insert_package_with_imports('pigar', [])
row = self._conn.query_package('pigar')
self.assertEqual(row.id, 1)
self._conn.insert_package_with_imports('pigar', ['pigar'])
row = self._conn.query_all('pigar')
self.assertDictEqual(dict(row[0]),
{'name': 'pigar', 'package': 'pigar'})
rows = self._conn.query_package(None)
self.assertListEqual(rows, ['pigar'])
| {
"content_hash": "ea053753d879a66a4e5a812b670fea57",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 29.93103448275862,
"alnum_prop": 0.6071428571428571,
"repo_name": "Damnever/pigar",
"id": "9457612559cf0ef6de1170986145f164911b0e50",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pigar/tests/test_db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "300"
},
{
"name": "Python",
"bytes": "74137"
}
],
"symlink_target": ""
} |
import math
import pdb
import numpy as np
import Orange
from Orange.feature import Type as OType
class OverlapPenalty(object):
def __init__(self, domain, cdists, ddists, granularity=100):
"""
Args:
domain: Orange.Domain object
"""
self.domain = domain
self.cdists = cdists
self.ddists = ddists
self.granularity = granularity
self.funcs = {}
self.allcounts = {}
self.disc_nvals = {}
self.setup()
def setup(self):
funcs = {}
allcounts = {}
for attr in self.domain:
if attr.var_type == OType.Discrete:
func, counts = self.setup_discrete_attribute(attr)
else:
func, counts = self.setup_continuous_attribute(attr)
funcs[attr.name] = func
allcounts[attr.name] = counts
self.funcs, self.allcounts = funcs, allcounts
def setup_continuous_attribute(self, attr):
distribution = self.cdists[attr.name]
minv, maxv = distribution.min, distribution.max
if minv == maxv:
func = lambda v: 0
counts = np.zeros(1)
else:
def make_func(minv, block, gran):
def f(v):
return int(min(gran, max(0, math.ceil((v-minv)/block))))
return f
block = (maxv - minv) / float(self.granularity)
counts = np.zeros(self.granularity+1)
func = make_func(minv, block, self.granularity)
return func, counts
def setup_discrete_attribute(self, attr):
vals = self.ddists[attr.name].keys()
d = {val: idx for idx, val in enumerate(vals)}
# values will be passed in as indices into vals
def func(v):
if v < 0 or v > len(vals) or v is None:
return len(vals)
return v
counts = np.zeros(len(vals)+1)
self.disc_nvals[attr.name] = len(vals)
return func, counts
def reset_counts(self):
for counts in self.allcounts.values():
counts[:] = 0
def continuous_idxs(self, attr, minv, maxv):
if isinstance(attr, basestring):
name = attr
else:
name = attr.name
dist = self.cdists[name]
if minv <= dist.min and maxv >= dist.max:
return []
func = self.funcs[name]
return np.arange(func(minv),func(maxv)+1)
def discrete_idxs(self, attr, vals):
if isinstance(attr, basestring):
name = attr
else:
name = attr.name
if len(vals) == self.disc_nvals[name]:
return np.array([])
func = self.funcs[name]
return np.array(map(func, vals))
def __call__(self, clusters, min_weight=0.7):
"""
return weights to multiply to each cluster's influence
"""
penalties = self.penalties(clusters)
weights = 1. - penalties
weights[weights <= min_weight] = min_weight
return weights
def penalties(self, clusters):
"""
Compute a penalty for each cluster
Return is normalzed to [0, 1]
"""
self.reset_counts()
penalties = np.array(map(self.penalty, clusters))
if penalties.max() == 0:
return penalties
penalties /= penalties.max()
return penalties
def penalty(self, cluster):
totals = {}
for col, (minv, maxv) in zip(cluster.cols, zip(*cluster.bbox)):
idxs = self.continuous_idxs(col, minv, maxv)
if len(idxs):
totals[col] = self.allcounts[col][idxs]
self.allcounts[col][idxs] += .5
for col, vals in cluster.discretes.iteritems():
idxs = self.discrete_idxs(col, vals)
if len(idxs):
totals[col] = self.allcounts[col][idxs]
self.allcounts[col][idxs] += 1
smooth = lambda counts: max(0, (counts - 0.5).max())
return sum(map(smooth, totals.values()))
def create_clusters(n):
clusters = []
for i in xrange(n):
minv = random.random() * 70
maxv = minv + 10
bbox = ((minv,), (maxv,))
a = ['a'+str(j) for j in range(3)]
x = ['x'+str(j) for j in range(10)]
discretes = {
'a': nprand.choice(a, 2, replace=False),
'x': nprand.choice(x, 3, replace=False)
}
cluster = Cluster(bbox, 0, ['c'], discretes=discretes)
clusters.append(cluster)
return clusters
if __name__ == '__main__':
import pdb
import random
import timeit
import numpy.random as nprand
from scorpion.bottomup.cluster import Cluster
from scorpion.arch import *
rows = []
cols = ['a', 'x', 'c']
for i in xrange(100):
a = 'a'+str(i%3)
b = 'x'+str(i%10)
c = 100 * random.random()
rows.append([a, b, c])
table = create_orange_table(rows, cols)
domain = table.domain
cdists = dict(zip(cols, Orange.statistics.basic.Domain(table)))
ddists = dict(zip(cols, Orange.statistics.distribution.Domain(table)))
overlap = OverlapPenalty(domain, cdists, ddists)
clusters = create_clusters(20)
weights = overlap(clusters)
print weights
import profile, pstats, StringIO
for n in [10, 20, 50]:
clusters = create_clusters(n)
f = lambda: overlap(clusters)
print n, '\t', timeit.timeit(f, number=100)
continue
pr = profile.Profile()
pr.run("print n, '\t', timeit.timeit(f, number=100)")
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('time')
ps.print_stats()
print s.getvalue()
| {
"content_hash": "04146b8f2714e5a6abbd4a2bf2f5ef2d",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 72,
"avg_line_length": 26.76439790575916,
"alnum_prop": 0.6171752738654147,
"repo_name": "sirrice/scorpion",
"id": "bafc153915b1931bcb96c24ee61a774ee4794dae",
"size": "5112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scorpion/sigmod/overlap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389180"
},
{
"name": "R",
"bytes": "3041"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
from voicebase import settings
from voicebase.api.base import BaseApiEndpoint
class VoicebaseToken(object):
def __init__(self, user, token=None, obj=None):
if obj is None:
obj = {}
self.user = user
self._obj = obj
self.token = token
self.type = None
self.privileges = None
self.expires = None
self._parse_response(obj)
@property
def api(self):
return self.user.api
def _parse_response(self, obj):
if not obj:
return
if 'type' in obj:
self.type = obj.get('type', None)
if 'token' in obj:
self.token = obj.get('token', None)
self.privileges = obj.get('privileges', None)
self.expires = obj.get('expires', None)
def load_details(self):
"""
Loads in the token details.
HTTP GET on
"""
# TODO: Load from api
# TODO: call _parse_response to parse details out
def delete(self):
"""
Delete and revoke this token.
HTTP DELETE on /access/users/{userId}/tokens/{token}
"""
class VoicebaseUser(object):
def __init__(self, endpoint, obj=None):
self.endpoint = endpoint
self._obj = obj
self.id = None
self.email = None
self.tokens = []
self.parse()
def parse(self):
if self._obj is None:
return
self.id = self._obj.get('id')
self.email = self._obj.get('email')
self.tokens = [VoicebaseToken(self, t) for t in self._obj.get('tokens')]
def load_tokens(self):
"""
Returns all current tokens for a user
HTTP GET on /access/users/{userId}/tokens
Caches in self.tokens
:return: list of VoicebaseToken objects
"""
def create_token(self):
"""
Create a new token.
HTTP POST on /access/users/{userId}/tokens
:return: VoicebaseToken object
"""
class AccessEndpoint(BaseApiEndpoint):
"""
Access and user management operations.
"""
URLS = settings.URLS.access
def __init__(self, api):
super(AccessEndpoint, self).__init__(api)
self._users = {}
def users(self):
"""
View existing users.
HTTP GET on /access/users
Will cache users we get from the server.
:return: list of VoicebaseUser objects
"""
if not self._users:
#TODO: Load users!
pass
return self._users
def get_user(self, user_id):
"""
Details for a specific user.
Will pull from cached users first, or get and add to cached users.
:param username: the username or userId of the user
:return: VoicebaseUser
"""
if user_id in self._users:
return self._users.get(user_id)
else:
# Load user
# Save user in cache
return # return user
| {
"content_hash": "faaff5af9fa959398f5207357da4d588",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 80,
"avg_line_length": 25.71551724137931,
"alnum_prop": 0.5477707006369427,
"repo_name": "gdoermann/voicebase",
"id": "86cbc76c267fcd219ed2acca76bff62d7f914921",
"size": "2983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voicebase/api/access.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28026"
}
],
"symlink_target": ""
} |
import argparse, yaml, sys
from glob import glob
from fru import load
parser = argparse.ArgumentParser(description='print fru data of SOM/CC eeprom')
parser.add_argument('-b','--board', action='store', choices=['som','cc'], type=str, help='Enter som or cc')
parser.add_argument('-f','--field', action='store', nargs="+", type=str, help='enter fields to index using. (if entering one arg, it\'s assumed the field is from board area)')
parser.add_argument('-s','--sompath', type=str, nargs="?", default='/sys/bus/i2c/devices/*50/eeprom', help='enter path to SOM EEPROM')
parser.add_argument('-c','--ccpath', type=str, nargs="?", default='/sys/bus/i2c/devices/*51/eeprom', help='enter path to CC EEPROM')
args = parser.parse_args()
if args.board == 'som':
try:
som=glob(args.sompath)[0]
except:
sys.exit('\nsompath is incorrect:' + args.sompath)
elif args.board == 'cc':
try:
cc=glob(args.ccpath)[0]
except:
sys.exit('\nccpath is incorrect: ' + args.ccpath)
else:
try:
som=glob(args.sompath)[0]
cc=glob(args.ccpath)[0]
except:
sys.exit('\nOne of the following paths is wrong:\nsom path: ' + args.sompath + '\n' + 'cc path: ' + args.ccpath)
if args.field and args.board is None:
parser.error('\nIf entering a field, need board input as well')
elif args.board and args.field is None:
print(yaml.dump(load(eval(args.board)), default_flow_style=False, allow_unicode=True))
elif args.board and args.field:
try:
if len(args.field) == 1:
print(load(eval(args.board))['board'][args.field[0]])
else:
data = load(eval(args.board))
for field in args.field:
data = data[field]
print(data)
except KeyError:
print("ERROR: "+str(args.field)+" is not a valid input for field.\nmultiple key values can be provided to the field arg, ex. -f multirecord DC_Load_Record max_V\n\
If just one value is given, it is assumed the field is under the board area.\n")
else:
both={'som':load(som), 'cc':load(cc)}
print(yaml.dump(both,default_flow_style=False, allow_unicode=True))
| {
"content_hash": "40b81cf6d33e1ed3c4f8a81e12151680",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 175,
"avg_line_length": 42.92,
"alnum_prop": 0.6453867660764212,
"repo_name": "Xilinx/meta-petalinux",
"id": "eba2d7a1b58c8bb2510b2b117b3c7a793d584b49",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes-utils/fru-print/files/fru-print.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "95694"
},
{
"name": "C",
"bytes": "2379"
},
{
"name": "C++",
"bytes": "967"
},
{
"name": "CMake",
"bytes": "1205"
},
{
"name": "HTML",
"bytes": "2992"
},
{
"name": "Jupyter Notebook",
"bytes": "154005"
},
{
"name": "Makefile",
"bytes": "174"
},
{
"name": "NASL",
"bytes": "409"
},
{
"name": "PHP",
"bytes": "1127"
},
{
"name": "Pascal",
"bytes": "5610"
},
{
"name": "Python",
"bytes": "41192"
},
{
"name": "Shell",
"bytes": "132959"
}
],
"symlink_target": ""
} |
'''
"compile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import multiprocessing
import cocos
from MultiLanguage import MultiLanguage
import cocos_project
import os
import re
import sys
import shutil
import json
import build_web
import utils
class CCPluginCompile(cocos.CCPlugin):
"""
compiles a project
"""
BUILD_CONFIG_FILE = "build-cfg.json"
CFG_KEY_WIN32_COPY_FILES = "copy_files"
CFG_KEY_WIN32_MUST_COPY_FILES = "must_copy_files"
CFG_KEY_COPY_RESOURCES = "copy_resources"
CFG_KEY_MUST_COPY_RESOURCES = "must_copy_resources"
OUTPUT_DIR_NATIVE = "bin"
OUTPUT_DIR_SCRIPT_DEBUG = "simulator"
OUTPUT_DIR_SCRIPT_RELEASE = "publish"
WEB_PLATFORM_FOLDER_NAME = "html5"
PROJ_CFG_KEY_IOS_SIGN_ID = "ios_sign_id"
PROJ_CFG_KEY_ENGINE_DIR = "engine_dir"
BACKUP_SUFFIX = "-backup"
ENGINE_JS_DIRS = [
"frameworks/js-bindings/bindings/script",
"cocos/scripting/js-bindings/script"
]
@staticmethod
def plugin_name():
return "compile"
@staticmethod
def brief_description():
return MultiLanguage.get_string('COMPILE_BRIEF')
def _add_custom_options(self, parser):
from argparse import ArgumentParser
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help=MultiLanguage.get_string('COMPILE_ARG_MODE'))
parser.add_argument("-j", "--jobs", dest="jobs", type=int,
help=MultiLanguage.get_string('COMPILE_ARG_JOBS'))
parser.add_argument("-o", "--output-dir", dest="output_dir",
help=MultiLanguage.get_string('COMPILE_ARG_OUTPUT'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_ANDROID'))
group.add_argument("--ap", dest="android_platform",
help=MultiLanguage.get_string('COMPILE_ARG_AP'))
group.add_argument("--ndk-mode", dest="ndk_mode",
help=MultiLanguage.get_string('COMPILE_ARG_NDK_MODE'))
group.add_argument("--app-abi", dest="app_abi",
help=MultiLanguage.get_string('COMPILE_ARG_APP_ABI'))
group.add_argument("--ndk-toolchain", dest="toolchain",
help=MultiLanguage.get_string('COMPILE_ARG_TOOLCHAIN'))
group.add_argument("--ndk-cppflags", dest="cppflags",
help=MultiLanguage.get_string('COMPILE_ARG_CPPFLAGS'))
group.add_argument("--android-studio", dest="use_studio", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_STUDIO'))
group.add_argument("--no-apk", dest="no_apk", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_NO_APK'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_WIN'))
group.add_argument("--vs", dest="vs_version", type=int,
help=MultiLanguage.get_string('COMPILE_ARG_VS'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_WEB'))
group.add_argument("--source-map", dest="source_map", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_SOURCE_MAP'))
group.add_argument("--advanced", dest="advanced", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_ADVANCE'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_IOS_MAC'))
group.add_argument("-t", "--target", dest="target_name",
help=MultiLanguage.get_string('COMPILE_ARG_TARGET'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_IOS'))
group.add_argument("--sign-identity", dest="sign_id",
help=MultiLanguage.get_string('COMPILE_ARG_IOS_SIGN'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_LUA_JS'))
group.add_argument("--no-res", dest="no_res", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_NO_RES'))
group.add_argument("--compile-script", dest="compile_script", type=int, choices=[0, 1],
help=MultiLanguage.get_string('COMPILE_ARG_COMPILE_SCRIPT'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_LUA'))
group.add_argument("--lua-encrypt", dest="lua_encrypt", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT'))
group.add_argument("--lua-encrypt-key", dest="lua_encrypt_key",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT_KEY'))
group.add_argument("--lua-encrypt-sign", dest="lua_encrypt_sign",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT_SIGN'))
category = self.plugin_category()
name = self.plugin_name()
usage = "\n\t%%prog %s %s -p <platform> [-s src_dir][-m <debug|release>]" \
"\nSample:" \
"\n\t%%prog %s %s -p android" % (category, name, category, name)
def _check_custom_options(self, args):
# get the mode parameter
available_modes = [ 'release', 'debug' ]
self._mode = self.check_param(args.mode, 'debug', available_modes,
MultiLanguage.get_string('COMPILE_ERROR_WRONG_MODE_FMT',
available_modes))
# android arguments
available_ndk_modes = [ 'release', 'debug', 'none' ]
self._ndk_mode = self.check_param(args.ndk_mode, self._mode, available_ndk_modes,
MultiLanguage.get_string('COMPILE_ERROR_WRONG_NDK_MODE_FMT',
available_ndk_modes))
self._no_apk = args.no_apk
self.app_abi = None
if args.app_abi:
self.app_abi = " ".join(args.app_abi.split(":"))
self.cppflags = None
if args.cppflags:
self.cppflags = args.cppflags
self.ndk_toolchain = None
if args.toolchain:
self.ndk_toolchain = args.toolchain
self.use_studio = args.use_studio
# Win32 arguments
self.vs_version = args.vs_version
# iOS/Mac arguments
self.xcode_target_name = None
if args.target_name is not None:
self.xcode_target_name = args.target_name
if args.compile_script is not None:
self._compile_script = bool(args.compile_script)
else:
self._compile_script = (self._mode == "release")
self._ap = args.android_platform
if args.jobs is not None:
self._jobs = args.jobs
else:
self._jobs = self.get_num_of_cpu()
self._has_sourcemap = args.source_map
self._web_advanced = args.advanced
self._no_res = args.no_res
if args.output_dir is None:
self._output_dir = self._get_output_dir()
else:
if os.path.isabs(args.output_dir):
self._output_dir = args.output_dir
else:
self._output_dir = os.path.abspath(args.output_dir)
self._sign_id = args.sign_id
if self._project._is_lua_project():
self._lua_encrypt = args.lua_encrypt
self._lua_encrypt_key = args.lua_encrypt_key
self._lua_encrypt_sign = args.lua_encrypt_sign
self.end_warning = ""
self._gen_custom_step_args()
def check_param(self, value, default_value, available_values, error_msg, ignore_case=True):
if value is None:
return default_value
if ignore_case:
check_value = value.lower()
right_values = []
for v in available_values:
right_values.append(v.lower())
else:
check_value = value
right_values = available_values
if check_value in right_values:
return check_value
else:
raise cocos.CCPluginError(error_msg, cocos.CCPluginError.ERROR_WRONG_ARGS)
def get_num_of_cpu(self):
try:
return multiprocessing.cpu_count()
except Exception:
print MultiLanguage.get_string('COMPILE_DETECT_CPU_FAILED')
return 1
def _get_output_dir(self):
project_dir = self._project.get_project_dir()
cur_platform = self._platforms.get_current_platform()
if self._project._is_script_project():
if self._project._is_js_project() and self._platforms.is_web_active():
cur_platform = CCPluginCompile.WEB_PLATFORM_FOLDER_NAME
if self._mode == 'debug':
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_DEBUG, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_RELEASE, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_NATIVE, self._mode, cur_platform)
return output_dir
def _gen_custom_step_args(self):
self._custom_step_args = {
"project-path": self._project.get_project_dir(),
"platform-project-path": self._platforms.project_path(),
"build-mode": self._mode,
"output-dir": self._output_dir
}
if self._platforms.is_android_active():
self._custom_step_args["ndk-build-mode"] = self._ndk_mode
def _build_cfg_path(self):
cur_cfg = self._platforms.get_current_config()
if self._platforms.is_win32_active():
if cur_cfg.build_cfg_path is not None:
project_dir = self._project.get_project_dir()
ret = os.path.join(project_dir, cur_cfg.build_cfg_path)
else:
ret = self._platforms.project_path()
elif self._platforms.is_ios_active():
ret = os.path.join(self._platforms.project_path(), "ios")
elif self._platforms.is_mac_active():
ret = os.path.join(self._platforms.project_path(), "mac")
else:
ret = self._platforms.project_path()
return ret
def _update_build_cfg(self):
build_cfg_dir = self._build_cfg_path()
cfg_file_path = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.isfile(cfg_file_path):
return
key_of_copy = None
key_of_must_copy = None
if self._platforms.is_android_active():
from build_android import AndroidBuilder
key_of_copy = AndroidBuilder.CFG_KEY_COPY_TO_ASSETS
key_of_must_copy = AndroidBuilder.CFG_KEY_MUST_COPY_TO_ASSERTS
elif self._platforms.is_win32_active():
key_of_copy = CCPluginCompile.CFG_KEY_WIN32_COPY_FILES
key_of_must_copy = CCPluginCompile.CFG_KEY_WIN32_MUST_COPY_FILES
if key_of_copy is None and key_of_must_copy is None:
return
try:
outfile = None
open_file = open(cfg_file_path)
cfg_info = json.load(open_file)
open_file.close()
open_file = None
changed = False
if key_of_copy is not None:
if cfg_info.has_key(key_of_copy):
src_list = cfg_info[key_of_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_COPY_RESOURCES] = ret_list
del cfg_info[key_of_copy]
changed = True
if key_of_must_copy is not None:
if cfg_info.has_key(key_of_must_copy):
src_list = cfg_info[key_of_must_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES] = ret_list
del cfg_info[key_of_must_copy]
changed = True
if changed:
# backup the old-cfg
split_list = os.path.splitext(CCPluginCompile.BUILD_CONFIG_FILE)
file_name = split_list[0]
ext_name = split_list[1]
bak_name = file_name + "-for-v0.1" + ext_name
bak_file_path = os.path.join(build_cfg_dir, bak_name)
if os.path.exists(bak_file_path):
os.remove(bak_file_path)
os.rename(cfg_file_path, bak_file_path)
# write the new data to file
with open(cfg_file_path, 'w') as outfile:
json.dump(cfg_info, outfile, sort_keys = True, indent = 4)
outfile.close()
outfile = None
finally:
if open_file is not None:
open_file.close()
if outfile is not None:
outfile.close()
def _convert_cfg_list(self, src_list, build_cfg_dir):
ret = []
for element in src_list:
ret_element = {}
if str(element).endswith("/"):
sub_str = element[0:len(element)-1]
ret_element["from"] = sub_str
ret_element["to"] = ""
else:
element_full_path = os.path.join(build_cfg_dir, element)
if os.path.isfile(element_full_path):
to_dir = ""
else:
to_dir = os.path.basename(element)
ret_element["from"] = element
ret_element["to"] = to_dir
ret.append(ret_element)
return ret
def _is_debug_mode(self):
return self._mode == 'debug'
def _remove_file_with_ext(self, work_dir, ext):
file_list = os.listdir(work_dir)
for f in file_list:
full_path = os.path.join(work_dir, f)
if os.path.isdir(full_path):
self._remove_file_with_ext(full_path, ext)
elif os.path.isfile(full_path):
name, cur_ext = os.path.splitext(f)
if cur_ext == ext:
os.remove(full_path)
def compile_lua_scripts(self, src_dir, dst_dir, need_compile=None):
if not self._project._is_lua_project():
return
if need_compile is None:
need_compile = self._compile_script
if not need_compile and not self._lua_encrypt:
return
cocos_cmd_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "cocos")
rm_ext = ".lua"
compile_cmd = "\"%s\" luacompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
if not need_compile:
compile_cmd = "%s --disable-compile" % compile_cmd
if self._lua_encrypt:
add_para = ""
if self._lua_encrypt_key is not None:
add_para = "%s -k %s" % (add_para, self._lua_encrypt_key)
if self._lua_encrypt_sign is not None:
add_para = "%s -b %s" % (add_para, self._lua_encrypt_sign)
compile_cmd = "%s -e %s" % (compile_cmd, add_para)
# run compile command
self._run_cmd(compile_cmd)
# remove the source scripts
self._remove_file_with_ext(dst_dir, rm_ext)
def compile_js_scripts(self, src_dir, dst_dir):
if not self._project._is_js_project():
return
if not self._compile_script:
return
cocos_cmd_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "cocos")
rm_ext = ".js"
compile_cmd = "\"%s\" jscompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
# run compile command
self._run_cmd(compile_cmd)
# remove the source scripts
self._remove_file_with_ext(dst_dir, rm_ext)
def add_warning_at_end(self, warning_str):
if warning_str is None or len(warning_str) == 0:
return
self.end_warning = "%s\n%s" % (self.end_warning, warning_str)
def is_valid_path(self, p):
if (p is not None) and os.path.exists(p):
ret = True
else:
ret = False
return ret
def build_android(self):
if not self._platforms.is_android_active():
return
project_dir = self._project.get_project_dir()
build_mode = self._mode
output_dir = self._output_dir
# get the android project path
# if both proj.android & proj.android-studio existed, select the project path by --studio argument
# else, use the existed one.
cfg_obj = self._platforms.get_current_config()
proj_android_path = cfg_obj.proj_path
proj_studio_path = cfg_obj.studio_path
project_android_dir = None
using_studio = False
if self.is_valid_path(proj_android_path) and self.is_valid_path(proj_studio_path):
if self.use_studio:
project_android_dir = proj_studio_path
using_studio = True
else:
project_android_dir = proj_android_path
using_studio = False
elif self.is_valid_path(proj_android_path):
project_android_dir = proj_android_path
using_studio = False
elif self.is_valid_path(proj_studio_path):
project_android_dir = proj_studio_path
using_studio = True
if using_studio:
ide_name = 'Android Studio'
else:
ide_name = 'Eclipse'
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_ANDROID_PROJPATH_FMT', (ide_name, project_android_dir)))
from build_android import AndroidBuilder
builder = AndroidBuilder(self._verbose, project_android_dir,
self._no_res, self._project, using_studio)
args_ndk_copy = self._custom_step_args.copy()
target_platform = self._platforms.get_current_platform()
# update the project with the android platform
builder.update_project(self._ap)
if not self._project._is_script_project() or self._project._is_native_support():
if self._ndk_mode != "none":
# build native code
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_NATIVE'))
ndk_build_param = [
"-j%s" % self._jobs
]
if self.app_abi:
abi_param = "APP_ABI=\"%s\"" % self.app_abi
ndk_build_param.append(abi_param)
if self.ndk_toolchain:
toolchain_param = "NDK_TOOLCHAIN=%s" % self.ndk_toolchain
ndk_build_param.append(toolchain_param)
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_NDK_BUILD, target_platform, args_ndk_copy)
modify_mk = False
if using_studio:
app_mk = os.path.join(project_android_dir, "app/jni/Application.mk")
else:
app_mk = os.path.join(project_android_dir, "jni/Application.mk")
mk_content = None
if self.cppflags and os.path.exists(app_mk):
# record the content of Application.mk
f = open(app_mk)
mk_content = f.read()
f.close()
# Add cpp flags
f = open(app_mk, "a")
f.write("\nAPP_CPPFLAGS += %s" % self.cppflags)
f.close()
modify_mk = True
try:
builder.do_ndk_build(ndk_build_param, self._ndk_mode, self)
except Exception as e:
if e.__class__.__name__ == 'CCPluginError':
raise e
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_NDK_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# roll-back the Application.mk
if modify_mk:
f = open(app_mk, "w")
f.write(mk_content)
f.close()
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_NDK_BUILD, target_platform, args_ndk_copy)
# build apk
if not self._no_apk:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_APK'))
self.apk_path = builder.do_build_apk(build_mode, self._no_apk, output_dir, self._custom_step_args, self)
self.android_package, self.android_activity = builder.get_apk_info()
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def check_ios_mac_build_depends(self):
version = cocos.get_xcode_version()
if version <= '5':
message = MultiLanguage.get_string('COMPILE_ERROR_UPDATE_XCODE')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
cfg_obj = self._platforms.get_current_config()
if cfg_obj.proj_file is not None:
xcodeproj_name = cfg_obj.proj_file
name = os.path.basename(xcodeproj_name)
else:
name, xcodeproj_name = self.checkFileByExtention(".xcodeproj", self._platforms.project_path())
if not xcodeproj_name:
message = MultiLanguage.get_string('COMPILE_ERROR_XCODEPROJ_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
self.project_name = name
self.xcodeproj_name = xcodeproj_name
def _remove_res(self, target_path):
build_cfg_dir = self._build_cfg_path()
cfg_file = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if os.path.exists(cfg_file) and os.path.isfile(cfg_file):
# have config file
open_file = open(cfg_file)
cfg_info = json.load(open_file)
open_file.close()
if cfg_info.has_key("remove_res"):
remove_list = cfg_info["remove_res"]
for f in remove_list:
res = os.path.join(target_path, f)
if os.path.isdir(res):
# is a directory
if f.endswith('/'):
# remove files & dirs in it
for sub_file in os.listdir(res):
sub_file_fullpath = os.path.join(res, sub_file)
if os.path.isfile(sub_file_fullpath):
os.remove(sub_file_fullpath)
elif os.path.isdir(sub_file_fullpath):
shutil.rmtree(sub_file_fullpath)
else:
# remove the dir
shutil.rmtree(res)
elif os.path.isfile(res):
# is a file, remove it
os.remove(res)
def get_engine_dir(self):
engine_dir = self._project.get_proj_config(CCPluginCompile.PROJ_CFG_KEY_ENGINE_DIR)
if engine_dir is None:
proj_dir = self._project.get_project_dir()
if self._project._is_js_project():
check_dir = os.path.join(proj_dir, "frameworks", "cocos2d-x")
if os.path.isdir(check_dir):
# the case for jsb in cocos2d-x engine
engine_dir = check_dir
else:
# the case for jsb in cocos2d-js engine
engine_dir = proj_dir
elif self._project._is_lua_project():
engine_dir = os.path.join(proj_dir, "frameworks", "cocos2d-x")
else:
engine_dir = os.path.join(proj_dir, "cocos2d")
else:
engine_dir = os.path.join(self._project.get_project_dir(), engine_dir)
return engine_dir
def backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(backup_dir):
shutil.rmtree(backup_dir)
shutil.copytree(dir_path, backup_dir)
def reset_backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.rename(backup_dir, dir_path)
def get_engine_js_dir(self):
engine_js_dir = None
isFound = False
check_script_dir = os.path.join(self._project.get_project_dir(), "script")
if os.path.isdir(check_script_dir):
# JS script already copied into the project dir
engine_js_dir = check_script_dir
isFound = True
else:
for js_dir in CCPluginCompile.ENGINE_JS_DIRS:
engine_js_dir = os.path.join(self.get_engine_dir(), js_dir)
if os.path.isdir(engine_js_dir):
isFound = True
break
if isFound:
return engine_js_dir
else:
return None
def build_ios(self):
if not self._platforms.is_ios_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_MAC'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
if self._sign_id is not None:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_IOS_SIGN_FMT', self._sign_id))
self.use_sdk = 'iphoneos'
else:
self.use_sdk = 'iphonesimulator'
self.check_ios_mac_build_depends()
ios_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(ios_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(r"Begin PBXProject section.*End PBXProject section", contents, re.S)
if section is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targetName = None
if self.xcode_target_name is not None:
targetName = self.xcode_target_name
else:
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "iOS" in name or "-mobile" in name:
targetName = str.strip(name)
break
if targetName is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName)
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project, check whether compile scripts or not
need_reset_dir = False
if self._project._is_script_project():
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
if self._project._is_js_project() and self._compile_script:
# backup the source scripts
self.backup_dir(script_src_dir)
self.compile_js_scripts(script_src_dir, script_src_dir)
# js project need compile the js files in engine
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.backup_dir(engine_js_dir)
self.compile_js_scripts(engine_js_dir, engine_js_dir)
need_reset_dir = True
if self._project._is_lua_project() and self._lua_encrypt:
# on iOS, only invoke luacompile when lua encrypt is specified
self.backup_dir(script_src_dir)
self.compile_lua_scripts(script_src_dir, script_src_dir, False)
need_reset_dir = True
try:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"%s" % "-arch i386" if self.use_sdk == 'iphonesimulator' else '',
"-sdk",
"%s" % self.use_sdk,
"CONFIGURATION_BUILD_DIR=\"%s\"" % (output_dir),
"%s" % "VALID_ARCHS=\"i386\"" if self.use_sdk == 'iphonesimulator' else ''
])
if self._sign_id is not None:
command = "%s CODE_SIGN_IDENTITY=\"%s\"" % (command, self._sign_id)
self._run_cmd(command)
filelist = os.listdir(output_dir)
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
self._iosapp_path = os.path.join(output_dir, "%s.app" % targetName)
if self._no_res:
self._remove_res(self._iosapp_path)
if self._sign_id is not None:
# generate the ipa
app_path = os.path.join(output_dir, "%s.app" % targetName)
ipa_path = os.path.join(output_dir, "%s.ipa" % targetName)
ipa_cmd = "xcrun -sdk %s PackageApplication -v \"%s\" -o \"%s\"" % (self.use_sdk, app_path, ipa_path)
self._run_cmd(ipa_cmd)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# is script project & need reset dirs
if need_reset_dir:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.reset_backup_dir(engine_js_dir)
def build_mac(self):
if not self._platforms.is_mac_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_MAC'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
self.check_ios_mac_build_depends()
mac_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(mac_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(
r"Begin PBXProject section.*End PBXProject section",
contents,
re.S
)
if section is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targetName = None
if self.xcode_target_name is not None:
targetName = self.xcode_target_name
else:
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "Mac" in name or "-desktop" in name:
targetName = str.strip(name)
break
if targetName is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName)
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project, check whether compile scripts or not
need_reset_dir = False
if self._project._is_script_project():
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
if self._project._is_js_project() and self._compile_script:
# backup the source scripts
self.backup_dir(script_src_dir)
self.compile_js_scripts(script_src_dir, script_src_dir)
# js project need compile the js files in engine
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.backup_dir(engine_js_dir)
self.compile_js_scripts(engine_js_dir, engine_js_dir)
need_reset_dir = True
if self._project._is_lua_project() and (self._lua_encrypt or self._compile_script):
# on iOS, only invoke luacompile when lua encrypt is specified
self.backup_dir(script_src_dir)
self.compile_lua_scripts(script_src_dir, script_src_dir)
need_reset_dir = True
try:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"CONFIGURATION_BUILD_DIR=\"%s\"" % (output_dir)
])
self._run_cmd(command)
self.target_name = targetName
filelist = os.listdir(output_dir)
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
self._macapp_path = os.path.join(output_dir, "%s.app" % targetName)
if self._no_res:
resource_path = os.path.join(self._macapp_path, "Contents", "Resources")
self._remove_res(resource_path)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# is script project & need reset dirs
if need_reset_dir:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.reset_backup_dir(engine_js_dir)
# Get the required VS versions from the engine version of project
def get_required_vs_versions(self):
# get the engine version string
ret = []
# 1. get engine version from .cocos_project.json
engine_ver_str = self._project.get_proj_config(cocos_project.Project.KEY_ENGINE_VERSION)
# 2. engine version is not found. find from source file
if engine_ver_str is None:
engine_dir = self.get_engine_dir()
if engine_dir is not None:
engine_ver_str = utils.get_engine_version(engine_dir)
if engine_ver_str is None:
return ret
# get the float value of engine version
version_pattern = r'cocos2d-x[^0-9]*([\d]+)\.([\d]+)'
match = re.match(version_pattern, engine_ver_str)
if match:
major_ver = int(match.group(1))
minor_ver = int(match.group(2))
else:
major_ver = -1
minor_ver = -1
if major_ver < 0:
return ret
if (major_ver > 3) or (major_ver == 3 and minor_ver >= 7):
ret = [ 2013, 2015 ]
else:
ret = [ 2012, 2013 ]
return ret
def get_min_vs_version(self):
if self._platforms.is_wp8_active() or self._platforms.is_wp8_1_active() or self._platforms.is_metro_active():
# WP8 project required VS 2013
return 2013
else:
# win32 project required VS 2012
return 2012
def get_available_devenv(self, required_versions, min_ver, specify_vs_ver=None):
if required_versions is None or len(required_versions) == 0:
if specify_vs_ver is None:
# Not specify VS version, find newest version
needUpgrade, commandPath = utils.get_newest_devenv(min_ver)
else:
# Have specified VS version
if specify_vs_ver < min_ver:
# Specified version is lower than required, raise error
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_LOW_VS_VER'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
# Get the specified VS
commandPath = utils.get_devenv_path(specify_vs_ver)
if specify_vs_ver > min_ver:
needUpgrade = True
else:
needUpgrade = False
else:
needUpgrade = False
if specify_vs_ver is None:
# find VS in required versions
commandPath = None
for v in required_versions:
commandPath = utils.get_devenv_path(v)
if commandPath is not None:
break
else:
# use specified VS version
if specify_vs_ver in required_versions:
commandPath = utils.get_devenv_path(specify_vs_ver)
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_WRONG_VS_VER_FMT', specify_vs_ver),
cocos.CCPluginError.ERROR_WRONG_ARGS)
if commandPath is None:
message = MultiLanguage.get_string('COMPILE_ERROR_VS_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
return (needUpgrade, commandPath)
def get_available_msbuild(self, required_versions, min_ver, specify_vs_ver=None):
if required_versions is None or len(required_versions) == 0:
if specify_vs_ver is None:
# Not specify VS version, find newest version
commandPath = utils.get_newest_msbuild(min_ver)
else:
# Have specified VS version
if specify_vs_ver < min_ver:
# Specified version is lower than required, raise error
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_LOW_VS_VER'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
# Get the specified VS
commandPath = utils.get_msbuild_path(specify_vs_ver)
else:
if specify_vs_ver is None:
# find VS in required versions
commandPath = None
for v in required_versions:
commandPath = utils.get_msbuild_path(v)
if commandPath is not None:
break
else:
# use specified VS version
if specify_vs_ver in required_versions:
commandPath = utils.get_msbuild_path(specify_vs_ver)
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_WRONG_VS_VER_FMT', specify_vs_ver),
cocos.CCPluginError.ERROR_WRONG_ARGS)
if commandPath is None:
message = MultiLanguage.get_string('COMPILE_ERROR_VS_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
return commandPath
def build_vs_project(self, sln_file, project_name, build_mode, specify_vs_ver=None):
required_versions = self.get_required_vs_versions()
min_ver = self.get_min_vs_version()
if required_versions is None or len(required_versions) == 0:
msg_version = min_ver
else:
msg_version = required_versions
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_REQUIRED_VS_FMT', msg_version))
needUpgrade, commandPath = self.get_available_devenv(required_versions, min_ver, specify_vs_ver)
if os.path.exists(commandPath):
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % sln_file,
"/Upgrade"
])
self._run_cmd(commandUpgrade)
# build the project
commands = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % sln_file,
"/Build \"%s\"" % build_mode,
"/Project \"%s\"" % project_name
])
self._run_cmd(commands)
else:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_DEVENV_NOT_FOUND'))
msbuild_path = self.get_available_msbuild(required_versions, min_ver, specify_vs_ver)
if msbuild_path:
msbuild_path = os.path.join(msbuild_path, 'MSBuild.exe')
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_FIND_MSBUILD_FMT', msbuild_path))
job_number = 2
build_command = ' '.join([
'\"%s\"' % msbuild_path,
'\"%s\"' % sln_file,
'/target:%s' % project_name,
'/property:Configuration=%s' % build_mode,
'/maxcpucount:%s' % job_number
])
self._run_cmd(build_command)
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_MSDBUILD_NOT_FOUND'),
cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def build_win32(self):
if not self._platforms.is_win32_active():
return
if not cocos.os_is_win32():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_WIN'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
win32_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", win32_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# build the project
self.project_name = name
projectPath = os.path.join(win32_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
# copy files
build_folder_name = "%s.win32" % build_mode
build_folder_path = os.path.join(win32_projectdir, build_folder_name)
if not os.path.isdir(build_folder_path):
message = MultiLanguage.get_string('COMPILE_ERROR_BUILD_PATH_NOT_FOUND_FMT', build_folder_path)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# remove the files in output dir (keep the exe files)
if os.path.exists(output_dir):
output_files = os.listdir(output_dir)
for element in output_files:
ele_full_path = os.path.join(output_dir, element)
if os.path.isfile(ele_full_path):
base_name, file_ext = os.path.splitext(element)
if not file_ext == ".exe":
os.remove(ele_full_path)
elif os.path.isdir(ele_full_path):
shutil.rmtree(ele_full_path)
# create output dir if it not existed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if cfg_obj.exe_out_dir is None:
exe_out_dir = build_folder_path
else:
exe_out_dir = os.path.join(build_folder_path, cfg_obj.exe_out_dir)
# copy exe
files = os.listdir(exe_out_dir)
proj_exe_name = "%s.exe" % self.project_name
for filename in files:
if filename == proj_exe_name:
file_path = os.path.join(exe_out_dir, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
break
# copy dll
files = os.listdir(build_folder_path)
for filename in files:
name, ext = os.path.splitext(filename)
if ext == '.dll':
file_path = os.path.join(build_folder_path, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
# copy lua files & res
build_cfg_path = self._build_cfg_path()
build_cfg = os.path.join(build_cfg_path, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.exists(build_cfg):
message = MultiLanguage.get_string('COMPILE_ERROR_FILE_NOT_FOUND_FMT', build_cfg)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
f = open(build_cfg)
data = json.load(f)
if data.has_key(CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES):
if self._no_res:
fileList = data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES] + data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES]
for cfg in fileList:
cocos.copy_files_with_config(cfg, build_cfg_path, output_dir)
# check the project config & compile the script files
if self._project._is_js_project():
self.compile_js_scripts(output_dir, output_dir)
if self._project._is_lua_project():
self.compile_lua_scripts(output_dir, output_dir)
self.run_root = output_dir
def build_web(self):
if not self._platforms.is_web_active():
return
project_dir = self._platforms.project_path()
# store env for run
cfg_obj = self._platforms.get_current_config()
if cfg_obj.run_root_dir is not None:
self.run_root = cfg_obj.run_root_dir
else:
self.run_root = project_dir
if cfg_obj.sub_url is not None:
self.sub_url = cfg_obj.sub_url
else:
self.sub_url = '/'
output_dir = CCPluginCompile.OUTPUT_DIR_SCRIPT_RELEASE
if self._is_debug_mode():
output_dir = CCPluginCompile.OUTPUT_DIR_SCRIPT_DEBUG
if not self._web_advanced:
return
self.sub_url = '%s%s/%s/' % (self.sub_url, output_dir, CCPluginCompile.WEB_PLATFORM_FOLDER_NAME)
f = open(os.path.join(project_dir, "project.json"))
project_json = json.load(f)
f.close()
engine_dir = os.path.join(project_json["engineDir"])
realEngineDir = os.path.normpath(os.path.join(project_dir, engine_dir))
publish_dir = os.path.normpath(os.path.join(project_dir, output_dir, CCPluginCompile.WEB_PLATFORM_FOLDER_NAME))
# need to config in options of command
buildOpt = {
"outputFileName" : "game.min.js",
"debug": "true" if self._is_debug_mode() else "false",
"compilationLevel" : "advanced" if self._web_advanced else "simple",
"sourceMapOpened" : True if self._has_sourcemap else False
}
if os.path.exists(publish_dir):
shutil.rmtree(publish_dir)
os.makedirs(publish_dir)
# generate build.xml
build_web.gen_buildxml(project_dir, project_json, publish_dir, buildOpt)
outputJsPath = os.path.join(publish_dir, buildOpt["outputFileName"])
if os.path.exists(outputJsPath) == True:
os.remove(outputJsPath)
# call closure compiler
ant_root = cocos.check_environment_variable('ANT_ROOT')
ant_path = os.path.join(ant_root, 'ant')
self._run_cmd("%s -f %s" % (ant_path, os.path.join(publish_dir, 'build.xml')))
# handle sourceMap
sourceMapPath = os.path.join(publish_dir, "sourcemap")
if os.path.exists(sourceMapPath):
smFile = open(sourceMapPath)
try:
smContent = smFile.read()
finally:
smFile.close()
dir_to_replace = project_dir
if cocos.os_is_win32():
dir_to_replace = project_dir.replace('\\', '\\\\')
smContent = smContent.replace(dir_to_replace, os.path.relpath(project_dir, publish_dir))
smContent = smContent.replace(realEngineDir, os.path.relpath(realEngineDir, publish_dir))
smContent = smContent.replace('\\\\', '/')
smContent = smContent.replace('\\', '/')
smFile = open(sourceMapPath, "w")
smFile.write(smContent)
smFile.close()
# handle project.json
del project_json["engineDir"]
del project_json["modules"]
del project_json["jsList"]
project_json_output_file = open(os.path.join(publish_dir, "project.json"), "w")
project_json_output_file.write(json.dumps(project_json))
project_json_output_file.close()
# handle index.html
indexHtmlFile = open(os.path.join(project_dir, "index.html"))
try:
indexContent = indexHtmlFile.read()
finally:
indexHtmlFile.close()
reg1 = re.compile(r'<script\s+src\s*=\s*("|\')[^"\']*CCBoot\.js("|\')\s*><\/script>')
indexContent = reg1.sub("", indexContent)
mainJs = project_json.get("main") or "main.js"
indexContent = indexContent.replace(mainJs, buildOpt["outputFileName"])
indexHtmlOutputFile = open(os.path.join(publish_dir, "index.html"), "w")
indexHtmlOutputFile.write(indexContent)
indexHtmlOutputFile.close()
# copy res dir
if cfg_obj.copy_res is None:
dst_dir = os.path.join(publish_dir, 'res')
src_dir = os.path.join(project_dir, 'res')
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
else:
for cfg in cfg_obj.copy_res:
cocos.copy_files_with_config(cfg, project_dir, publish_dir)
# copy to the output directory if necessary
pub_dir = os.path.normcase(publish_dir)
out_dir = os.path.normcase(os.path.normpath(self._output_dir))
if pub_dir != out_dir:
cpy_cfg = {
"from" : pub_dir,
"to" : out_dir
}
cocos.copy_files_with_config(cpy_cfg, pub_dir, out_dir)
def build_linux(self):
if not self._platforms.is_linux_active():
return
#if not cocos.os_is_linux():
# raise cocos.CCPluginError("Please build on linux")
project_dir = self._project.get_project_dir()
cfg_obj = self._platforms.get_current_config()
if cfg_obj.cmake_path is not None:
cmakefile_dir = os.path.join(project_dir, cfg_obj.cmake_path)
else:
cmakefile_dir = project_dir
if self._project._is_lua_project():
cmakefile_dir = os.path.join(project_dir, 'frameworks')
# get the project name
if cfg_obj.project_name is not None:
self.project_name = cfg_obj.project_name
else:
f = open(os.path.join(cmakefile_dir, 'CMakeLists.txt'), 'r')
for line in f.readlines():
if "set(APP_NAME " in line:
self.project_name = re.search('APP_NAME ([^\)]+)\)', line).group(1)
break
if cfg_obj.build_dir is not None:
build_dir = os.path.join(project_dir, cfg_obj.build_dir)
else:
build_dir = os.path.join(project_dir, 'linux-build')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
with cocos.pushd(build_dir):
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
debug_state = 'ON' if self._is_debug_mode() else 'OFF'
self._run_cmd('cmake -DCMAKE_BUILD_TYPE=%s -DDEBUG_MODE=%s %s' % (build_mode, debug_state, os.path.relpath(cmakefile_dir, build_dir)))
with cocos.pushd(build_dir):
self._run_cmd('make -j%s' % self._jobs)
# move file
output_dir = self._output_dir
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
if cfg_obj.build_result_dir is not None:
result_dir = os.path.join(build_dir, 'bin', cfg_obj.build_result_dir)
else:
result_dir = os.path.join(build_dir, 'bin')
cocos.copy_files_in_dir(result_dir, output_dir)
self.run_root = output_dir
if self._no_res:
res_dir = os.path.join(output_dir, "Resources")
self._remove_res(res_dir)
if self._project._is_script_project() and self._compile_script:
cocos.Logging.warning(MultiLanguage.get_string('COMPILE_WARNING_NOT_SUPPORT_COMPILE_SCRIPT'))
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def get_wp8_product_id(self, manifest_file):
# get the product id from manifest
from xml.dom import minidom
ret = None
try:
doc_node = minidom.parse(manifest_file)
root_node = doc_node.documentElement
app_node = root_node.getElementsByTagName("App")[0]
ret = app_node.attributes["ProductID"].value
ret = ret.strip("{}")
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_MANIFEST_PARSE_FAILED_FMT', manifest_file),
cocos.CCPluginError.ERROR_PARSE_FILE)
return ret
def build_wp8(self):
if not self._platforms.is_wp8_active():
return
proj_path = self._project.get_project_dir()
sln_path = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", sln_path)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
wp8_projectdir = cfg_obj.wp8_proj_path
# build the project
self.project_name = name
projectPath = os.path.join(sln_path, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
# copy files
build_folder_path = os.path.join(wp8_projectdir, cfg_obj.build_folder_path, build_mode)
if not os.path.isdir(build_folder_path):
message = MultiLanguage.get_string('COMPILE_ERROR_BUILD_PATH_NOT_FOUND_FMT', build_folder_path)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# create output dir if it not existed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# copy xap
files = os.listdir(build_folder_path)
proj_xap_name = "%s_%s_x86.xap" % (self.project_name, build_mode)
for filename in files:
if filename == proj_xap_name:
file_path = os.path.join(build_folder_path, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
break
# get the manifest file path
manifest_file = os.path.join(wp8_projectdir, cfg_obj.manifest_path)
self.product_id = self.get_wp8_product_id(manifest_file)
self.run_root = output_dir
self.xap_file_name = proj_xap_name
def build_wp8_1(self):
if not self._platforms.is_wp8_1_active():
return
wp8_1_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", wp8_1_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
name = "%s.WindowsPhone" % name
# build the project
self.project_name = name
projectPath = os.path.join(wp8_1_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
def build_metro(self):
if not self._platforms.is_metro_active():
return
metro_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", metro_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
name = "%s.Windows" % name
# build the project
self.project_name = name
projectPath = os.path.join(metro_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
def checkFileByExtention(self, ext, path):
filelist = os.listdir(path)
for fullname in filelist:
name, extention = os.path.splitext(fullname)
if extention == ext:
return name, fullname
return (None, None)
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_MODE_FMT', self._mode))
self._update_build_cfg()
target_platform = self._platforms.get_current_platform()
args_build_copy = self._custom_step_args.copy()
language = self._project.get_language()
action_str = 'compile_%s' % language
target_str = 'compile_for_%s' % target_platform
cocos.DataStatistic.stat_event('compile', action_str, target_str)
# invoke the custom step: pre-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_BUILD, target_platform, args_build_copy)
self.build_android()
self.build_ios()
self.build_mac()
self.build_win32()
self.build_web()
self.build_linux()
self.build_wp8()
self.build_wp8_1()
self.build_metro()
# invoke the custom step: post-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_BUILD, target_platform, args_build_copy)
if len(self.end_warning) > 0:
cocos.Logging.warning(self.end_warning)
| {
"content_hash": "5bb139718c11a97c02116b5fcc17d610",
"timestamp": "",
"source": "github",
"line_count": 1552,
"max_line_length": 146,
"avg_line_length": 42.0444587628866,
"alnum_prop": 0.5541354420486414,
"repo_name": "tianxiawuzhei/cocos-quick-cpp",
"id": "0e3550d64c091d69d3f2c58500bc8190d69b76c5",
"size": "65513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publibs/cocos2dx/tools/cocos2d-console/plugins/plugin_compile/project_compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3045"
},
{
"name": "C",
"bytes": "8698821"
},
{
"name": "C++",
"bytes": "37233468"
},
{
"name": "CMake",
"bytes": "296289"
},
{
"name": "Csound Document",
"bytes": "16092"
},
{
"name": "GLSL",
"bytes": "146423"
},
{
"name": "HTML",
"bytes": "124487"
},
{
"name": "Java",
"bytes": "749389"
},
{
"name": "JavaScript",
"bytes": "8302288"
},
{
"name": "Lua",
"bytes": "3866164"
},
{
"name": "Makefile",
"bytes": "110508"
},
{
"name": "Objective-C",
"bytes": "3210120"
},
{
"name": "Objective-C++",
"bytes": "877662"
},
{
"name": "Python",
"bytes": "1047921"
},
{
"name": "Ruby",
"bytes": "2623"
},
{
"name": "Shell",
"bytes": "57475"
}
],
"symlink_target": ""
} |
from kay.routing import (
ViewGroup, Rule
)
view_groups = [
ViewGroup(
Rule('/', endpoint='index', view='config.views.index'),
)
]
| {
"content_hash": "9b791cb691889571ae5e714d96446dcd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 63,
"avg_line_length": 16.88888888888889,
"alnum_prop": 0.5855263157894737,
"repo_name": "yosukesuzuki/deep-link-app",
"id": "2097fba1a2462e417da65eac95f5e5594023d55f",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/config/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1704"
},
{
"name": "CoffeeScript",
"bytes": "3819"
},
{
"name": "HTML",
"bytes": "42909"
},
{
"name": "JavaScript",
"bytes": "153058"
},
{
"name": "Python",
"bytes": "781189"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import re
from StringIO import StringIO
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from commcare_translations import load_translations
from corehq.apps.app_manager import app_strings
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import no_conflict_require_POST, \
require_can_edit_apps
from corehq.apps.app_manager.translations import \
expected_bulk_app_sheet_headers, expected_bulk_app_sheet_rows, \
process_bulk_app_translation_upload
from corehq.apps.translations import system_text_sources
from corehq.util.spreadsheets.excel import WorkbookJSONReader
from couchexport.export import export_raw
from couchexport.models import Format
from couchexport.shortcuts import export_response
from dimagi.utils.decorators.view import get_file
from dimagi.utils.logging import notify_exception
def get_index_for_defaults(langs):
try:
return langs.index("en")
except ValueError:
return 0
@no_conflict_require_POST
@require_can_edit_apps
@get_file("bulk_upload_file")
def upload_bulk_ui_translations(request, domain, app_id):
def _html_message(header_text, messages):
message = header_text + "<br>"
for prop in messages:
message += "<li>%s</li>" % prop
return message
success = False
try:
app = get_app(domain, app_id)
trans_dict, error_properties, warnings = process_ui_translation_upload(
app, request.file
)
if error_properties:
message = _html_message(_("Upload failed. We found problems with the following translations:"),
error_properties)
messages.error(request, message, extra_tags='html')
else:
# update translations only if there were no errors
app.translations = dict(trans_dict)
app.save()
success = True
if warnings:
message = _html_message(_("Upload succeeded, but we found following issues for some properties"),
warnings)
messages.warning(request, message, extra_tags='html')
except Exception:
notify_exception(request, 'Bulk Upload Translations Error')
messages.error(request, _("Something went wrong! Update failed. We're looking into it"))
if success:
messages.success(request, _("UI Translations Updated!"))
return HttpResponseRedirect(reverse('app_languages', args=[domain, app_id]))
@require_can_edit_apps
def download_bulk_ui_translations(request, domain, app_id):
app = get_app(domain, app_id)
temp = build_ui_translation_download_file(app)
return export_response(temp, Format.XLS_2007, "translations")
@require_can_edit_apps
def download_bulk_app_translations(request, domain, app_id):
app = get_app(domain, app_id)
headers = expected_bulk_app_sheet_headers(app)
rows = expected_bulk_app_sheet_rows(app)
temp = StringIO()
data = [(k, v) for k, v in rows.iteritems()]
export_raw(headers, data, temp)
return export_response(temp, Format.XLS_2007, "bulk_app_translations")
@no_conflict_require_POST
@require_can_edit_apps
@get_file("bulk_upload_file")
def upload_bulk_app_translations(request, domain, app_id):
app = get_app(domain, app_id)
msgs = process_bulk_app_translation_upload(app, request.file)
app.save()
for msg in msgs:
# Add the messages to the request object.
# msg[0] should be a function like django.contrib.messages.error .
# mes[1] should be a string.
msg[0](request, msg[1])
return HttpResponseRedirect(
reverse('app_languages', args=[domain, app_id])
)
def process_ui_translation_upload(app, trans_file):
workbook = WorkbookJSONReader(trans_file)
translations = workbook.get_worksheet(title='translations')
commcare_ui_strings = load_translations('en', 2).keys()
default_trans = get_default_translations_for_download(app)
lang_with_defaults = app.langs[get_index_for_defaults(app.langs)]
trans_dict = defaultdict(dict)
# Use this to hard fail and not update any translations
error_properties = []
# Use this to pass warnings without failing hard
warnings = []
for row in translations:
if row["property"] not in commcare_ui_strings:
# Add a warning for unknown properties, but still add them to the translation dict
warnings.append(row["property"] + " is not a known CommCare UI string, but we added it anyway")
for lang in app.langs:
if row.get(lang):
all_parameters = re.findall("\$.*?}", row[lang])
for param in all_parameters:
if not re.match("\$\{[0-9]+}", param):
error_properties.append(row["property"] + ' - ' + row[lang])
if not (lang_with_defaults == lang and
row[lang] == default_trans.get(row["property"], "")):
trans_dict[lang].update({row["property"]: row[lang]})
return trans_dict, error_properties, warnings
def build_ui_translation_download_file(app):
properties = tuple(["property"] + app.langs + ["platform"])
temp = StringIO()
headers = (("translations", properties),)
row_dict = {}
for i, lang in enumerate(app.langs):
index = i + 1
trans_dict = app.translations.get(lang, {})
for prop, trans in trans_dict.iteritems():
if prop not in row_dict:
row_dict[prop] = [prop]
num_to_fill = index - len(row_dict[prop])
row_dict[prop].extend(["" for i in range(num_to_fill)] if num_to_fill > 0 else [])
row_dict[prop].append(trans)
rows = row_dict.values()
all_prop_trans = get_default_translations_for_download(app)
rows.extend([[t] for t in sorted(all_prop_trans.keys()) if t not in row_dict])
def fillrow(row):
num_to_fill = len(properties) - len(row)
row.extend(["" for i in range(num_to_fill)] if num_to_fill > 0 else [])
return row
def add_default(row):
row_index = get_index_for_defaults(app.langs) + 1
if not row[row_index]:
# If no custom translation exists, replace it.
row[row_index] = all_prop_trans.get(row[0], "")
return row
def add_sources(row):
platform_map = {
"CommCareAndroid": "Android",
"CommCareJava": "Java",
"ODK": "Android",
"JavaRosa": "Java",
}
source = system_text_sources.SOURCES.get(row[0], "")
row[-1] = platform_map.get(source, "")
return row
rows = [add_sources(add_default(fillrow(row))) for row in rows]
data = (("translations", tuple(rows)),)
export_raw(headers, data, temp)
return temp
def get_default_translations_for_download(app):
return app_strings.CHOICES[app.translation_strategy].get_default_translations('en')
| {
"content_hash": "60915b0bc44260de8bc2beebdd765db5",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 109,
"avg_line_length": 36.881443298969074,
"alnum_prop": 0.643885394828791,
"repo_name": "qedsoftware/commcare-hq",
"id": "cb04c8e0a327572f845f0b70d64420dbf9d55554",
"size": "7155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/views/translations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""
Run variety of evaluation metrics on Logistic Regression classifier.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import argparse, sys, os
from utils import gen_predictor
sys.path.append(os.path.abspath('./utils'))
from evaluate import design_matrix, run_eval, get_misses
def prefix():
return "lreg"
def title():
return "Logistic Regression"
def pred(test_filename, train_filename, param_filename):
X, y = design_matrix(test_filename, train_filename)
predictor = gen_predictor(param_filename)
probs = predictor(X)
return probs, y
def eval(test_filename, train_filename, param_filename, method, **kwparams):
X, y = design_matrix(test_filename, train_filename)
predictor = gen_predictor(param_filename)
probs = predictor(X)
return run_eval(probs, y, method, **kwparams)
def miss(test_filename, train_filename, param_filename):
fn = test_filename.replace("-data", "-index")
meta = None
if os.path.exists(fn):
with open(fn, "r") as idxfile:
meta = idxfile.readlines()
X, y, df = design_matrix(test_filename, train_filename, get_df=True)
predictor = gen_predictor(param_filename)
probs = predictor(X)
indices = get_misses(probs, y)
for i in indices:
print "----------------"
if meta: print "META:",",".join(meta[i].split(",")).strip()
print df.ix[i]
return indices
def evaluate(test_filename, train_filename, param_filename, method):
# Average calibrations and discriminations
if method == "caldis":
eval(test_filename, train_filename, param_filename, 1)
# Plot each method on same calibration plot
elif method == "calplot":
eval(test_filename, train_filename, param_filename, 2, test_file=test_filename)
# Average precision, recall, and F1 scores
elif method == "report":
eval(test_filename, train_filename, param_filename, 3)
# Plot each method on same ROC plot
elif method == "roc":
eval(test_filename, train_filename, param_filename, 4, pltshow=True)
# Average confusion matrix
elif method == "confusion":
eval(test_filename, train_filename, param_filename, 5)
# Method not defined:
elif method == "misses":
miss(test_filename, train_filename, param_filename)
else:
raise Exception("Invalid method given")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--train', nargs=1, default=["./models/test/training-data-completed.csv"],
help="Filename for training set")
parser.add_argument('-T', '--test', nargs=1, default=["./models/test/testing-data.csv"],
help="Filename for testing set")
parser.add_argument('-p', '--param', nargs=1, default=["./models/test/lreg-params"],
help="Filename for neural network parameters")
parser.add_argument('-m', '--method', nargs=1, default=["report"],
help="Evaluation method: caldis, calplot, report, roc, confusion, misses")
args = parser.parse_args()
evaluate(args.test[0], args.train[0], args.param[0], args.method[0]) | {
"content_hash": "662672258788533c79b3cfc0b4d9764d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 104,
"avg_line_length": 39.675,
"alnum_prop": 0.6493383742911153,
"repo_name": "broadinstitute/ebola-predictor",
"id": "2177411c60dfce04cd7d4cdc1b4c2351e1f784f4",
"size": "3174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lreg/eval.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "273299"
},
{
"name": "R",
"bytes": "3987"
}
],
"symlink_target": ""
} |
"""
Shortest path algorithms for unweighted graphs.
"""
import networkx as nx
__all__ = ['bidirectional_shortest_path',
'single_source_shortest_path',
'single_source_shortest_path_length',
'single_target_shortest_path',
'single_target_shortest_path_length',
'all_pairs_shortest_path',
'all_pairs_shortest_path_length',
'predecessor']
def single_source_shortest_path_length(G, source, cutoff=None):
"""Compute the shortest path lengths from source to all reachable nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dict
Dict keyed by node to shortest path length to source.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_shortest_path_length(G, 0)
>>> length[4]
4
>>> for node in length:
... print('{}: {}'.format(node, length[node]))
0: 0
1: 1
2: 2
3: 3
4: 4
See Also
--------
shortest_path_length
"""
if source not in G:
raise nx.NodeNotFound('Source {} is not in G'.format(source))
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1}
return dict(_single_shortest_path_length(G.adj, nextlevel, cutoff))
def _single_shortest_path_length(adj, firstlevel, cutoff):
"""Yields (node, level) in a breadth first search
Shortest Path Length helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
cutoff : int or float
level at which we stop the process
"""
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
nextlevel = firstlevel # dict of nodes to check at next level
while nextlevel and cutoff >= level:
thislevel = nextlevel # advance to next level
nextlevel = {} # and start a new list (fringe)
for v in thislevel:
if v not in seen:
seen[v] = level # set the level of vertex v
nextlevel.update(adj[v]) # add neighbors of v
yield (v, level)
level += 1
del seen
def single_target_shortest_path_length(G, target, cutoff=None):
"""Compute the shortest path lengths to target from all reachable nodes.
Parameters
----------
G : NetworkX graph
target : node
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : iterator
(source, shortest path length) iterator
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> length = dict(nx.single_target_shortest_path_length(G, 4))
>>> length[0]
4
>>> for node in range(5):
... print('{}: {}'.format(node, length[node]))
0: 4
1: 3
2: 2
3: 1
4: 0
See Also
--------
single_source_shortest_path_length, shortest_path_length
"""
if target not in G:
raise nx.NodeNotFound('Target {} is not in G'.format(target))
if cutoff is None:
cutoff = float('inf')
# handle either directed or undirected
adj = G.pred if G.is_directed() else G.adj
nextlevel = {target: 1}
return _single_shortest_path_length(adj, nextlevel, cutoff)
def all_pairs_shortest_path_length(G, cutoff=None):
"""Computes the shortest path lengths between all nodes in `G`.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Notes
-----
The iterator returned only has reachable node pairs.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_shortest_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print('1 - {}: {}'.format(node, length[1][node]))
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
"""
length = single_source_shortest_path_length
# TODO This can be trivially parallelized.
for n in G:
yield (n, length(G, n, cutoff=cutoff))
def bidirectional_shortest_path(G, source, target):
"""Returns a list of nodes in a shortest path between source and target.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
Returns
-------
path: list
List of nodes in a path from source to target.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
shortest_path
Notes
-----
This algorithm is used by shortest_path(G, source, target).
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
# call helper to do the real work
results = _bidirectional_pred_succ(G, source, target)
pred, succ, w = results
# build path from pred+w+succ
path = []
# from source to w
while w is not None:
path.append(w)
w = pred[w]
path.reverse()
# from w to target
w = succ[path[-1]]
while w is not None:
path.append(w)
w = succ[w]
return path
def _bidirectional_pred_succ(G, source, target):
"""Bidirectional shortest path helper.
Returns (pred, succ, w) where
pred is a dictionary of predecessors from w to the source, and
succ is a dictionary of successors from w to the target.
"""
# does BFS from both source and target and meets in the middle
if target == source:
return ({target: None}, {source: None}, source)
# handle either directed or undirected
if G.is_directed():
Gpred = G.pred
Gsucc = G.succ
else:
Gpred = G.adj
Gsucc = G.adj
# predecesssor and successors in search
pred = {source: None}
succ = {target: None}
# initialize fringes, start with forward
forward_fringe = [source]
reverse_fringe = [target]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
this_level = forward_fringe
forward_fringe = []
for v in this_level:
for w in Gsucc[v]:
if w not in pred:
forward_fringe.append(w)
pred[w] = v
if w in succ: # path found
return pred, succ, w
else:
this_level = reverse_fringe
reverse_fringe = []
for v in this_level:
for w in Gpred[v]:
if w not in succ:
succ[w] = v
reverse_fringe.append(w)
if w in pred: # found path
return pred, succ, w
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
def single_source_shortest_path(G, source, cutoff=None):
"""Compute shortest path between source
and all other nodes reachable from source.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_shortest_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path
"""
if source not in G:
raise nx.NodeNotFound("Source {} not in G".format(source))
def join(p1, p2):
return p1 + p2
if cutoff is None:
cutoff = float('inf')
nextlevel = {source: 1} # list of nodes to check at next level
paths = {source: [source]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join))
def _single_shortest_path(adj, firstlevel, paths, cutoff, join):
"""Returns shortest paths
Shortest Path helper function
Parameters
----------
adj : dict
Adjacency dict or view
firstlevel : dict
starting nodes, e.g. {source: 1} or {target: 1}
paths : dict
paths for starting nodes, e.g. {source: [source]}
cutoff : int or float
level at which we stop the process
join : function
function to construct a path from two partial paths. Requires two
list inputs `p1` and `p2`, and returns a list. Usually returns
`p1 + p2` (forward from source) or `p2 + p1` (backward from target)
"""
level = 0 # the current level
nextlevel = firstlevel
while nextlevel and cutoff > level:
thislevel = nextlevel
nextlevel = {}
for v in thislevel:
for w in adj[v]:
if w not in paths:
paths[w] = join(paths[v], [w])
nextlevel[w] = 1
level += 1
return paths
def single_target_shortest_path(G, target, cutoff=None):
"""Compute shortest path to target from all nodes that reach target.
Parameters
----------
G : NetworkX graph
target : node label
Target node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> path = nx.single_target_shortest_path(G, 4)
>>> path[0]
[0, 1, 2, 3, 4]
Notes
-----
The shortest path is not necessarily unique. So there can be multiple
paths between the source and each target node, all of which have the
same 'shortest' length. For each target node, this function returns
only one of those paths.
See Also
--------
shortest_path, single_source_shortest_path
"""
if target not in G:
raise nx.NodeNotFound("Target {} not in G".format(target))
def join(p1, p2):
return p2 + p1
# handle undirected graphs
adj = G.pred if G.is_directed() else G.adj
if cutoff is None:
cutoff = float('inf')
nextlevel = {target: 1} # list of nodes to check at next level
paths = {target: [target]} # paths dictionary (paths to key from source)
return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join))
def all_pairs_shortest_path(G, cutoff=None):
"""Compute shortest paths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer, optional
Depth at which to stop the search. Only paths of length at most
`cutoff` are returned.
Returns
-------
lengths : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_shortest_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
See Also
--------
floyd_warshall()
"""
# TODO This can be trivially parallelized.
for n in G:
yield (n, single_source_shortest_path(G, n, cutoff=cutoff))
def predecessor(G, source, target=None, cutoff=None, return_seen=None):
"""Returns dict of predecessors for the path from source to all nodes in G
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path. If provided only predecessors between
source and target are returned
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred : dictionary
Dictionary, keyed by node, of predecessors in the shortest path.
Examples
--------
>>> G = nx.path_graph(4)
>>> list(G)
[0, 1, 2, 3]
>>> nx.predecessor(G, 0)
{0: [], 1: [0], 2: [1], 3: [2]}
"""
if source not in G:
raise nx.NodeNotFound("Source {} not in G".format(source))
level = 0 # the current level
nextlevel = [source] # list of nodes to check at next level
seen = {source: level} # level (number of hops) when seen in BFS
pred = {source: []} # predecessor dictionary
while nextlevel:
level = level + 1
thislevel = nextlevel
nextlevel = []
for v in thislevel:
for w in G[v]:
if w not in seen:
pred[w] = [v]
seen[w] = level
nextlevel.append(w)
elif (seen[w] == level): # add v to predecessor list if it
pred[w].append(v) # is at the correct level
if (cutoff and cutoff <= level):
break
if target is not None:
if return_seen:
if target not in pred:
return ([], -1) # No predecessor
return (pred[target], seen[target])
else:
if target not in pred:
return [] # No predecessor
return pred[target]
else:
if return_seen:
return (pred, seen)
else:
return pred
| {
"content_hash": "f33ef769a33438903bf1a6f88db34799",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 79,
"avg_line_length": 27.52591170825336,
"alnum_prop": 0.5664876926295237,
"repo_name": "sserrot/champion_relationships",
"id": "c10666cbdc7b85fff979ed862478f064842c5cb1",
"size": "14596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/networkx/algorithms/shortest_paths/unweighted.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from Crypto.Cipher import AES
import base64
import hashlib
import pkcs7
from AesCiphers import Ciphers
class ECB(Ciphers):
def __init__(self):
pass
def Encrypt(self, PlainText, SecurePassword):
pw_bytes = SecurePassword.encode('utf-8')
text_bytes = PlainText.encode('utf-8')
hash = Hash()
key = hash.HashAlgo(pw_bytes)
cipher = AES.new(key, AES.MODE_ECB)
pad_text = pkcs7.encode(text_bytes)
msg = cipher.encrypt(pad_text)
EncodeMsg = base64.b64encode(msg).decode("utf-8")
return EncodeMsg
def Decrypt(self, Encrypted, SecurePassword):
decodbase64 = base64.b64decode(Encrypted.encode("utf-8"))
pw_bytes = SecurePassword.encode('utf-8')
hash = Hash()
key = hash.HashAlgo(pw_bytes)
cipher = AES.new(key, AES.MODE_ECB)
msg = cipher.decrypt(decodbase64)
pad_text = pkcs7.decode(msg)
decryptedString = pad_text.decode('utf-8')
return decryptedString;
class Hash:
def HashAlgo(self, pwBytes):
m = hashlib.md5()
m.update(pwBytes)
key = m.digest()
return key;
| {
"content_hash": "fb6a4c80cecdf195b8edae5e35a7dfad",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 26.976744186046513,
"alnum_prop": 0.6163793103448276,
"repo_name": "PanagiotisDrakatos/Light_IoT_CryptoDevice",
"id": "17d3b57f6302be4d739d111083869d170358183b",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonClient/Ciphers/Symmetric/AesECB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "7711739"
},
{
"name": "Java",
"bytes": "119791"
},
{
"name": "JavaScript",
"bytes": "29746"
},
{
"name": "Python",
"bytes": "34964"
}
],
"symlink_target": ""
} |
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Wyloguj się").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Wyloguj się")) > 0
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
| {
"content_hash": "8831adeca84227f1a8419ceaa7ee6a13",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 31.42222222222222,
"alnum_prop": 0.5671852899575672,
"repo_name": "PrzemyslawUrbanczyk/pu_zadanie1",
"id": "3d34c0a7b6b7097929cfa716b005fe7d8d7b52a8",
"size": "1416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40584"
}
],
"symlink_target": ""
} |
import time
import pypinyin
from pinyindict import *
'''
node|[0] |[1] |[2] |[3] |[4] |[5] |[6] |[7]
----|-------|------|-----|------|----|-----|-------|-------
|keyword|weight|mtime|parent|type|level|weight2|children
note: only leaf node has weight and mtime, other node's is 0 and 0
'''
KEYWORD = 0
WEIGHT = 1
MTIME = 2
PARENT = 3
TYPE =4
LEVEL=5
WEIGHT2 = 6
CHILDREN = 7
NORMAL_NODE = 0
PINYIN_NODE = 1
INDEX_FLOOR = 4
tree_nodes = [['root', 0, 0, -1, NORMAL_NODE, 0, 0, []]]
node_index = {}
'''
update weight upword till root
'''
def add_weight(node_id, weight):
node = tree_nodes[node_id]
diff = node[WEIGHT2]
node[WEIGHT2] = max(weight, diff)
diff = node[WEIGHT2] - diff
if diff == 0:
return
node[WEIGHT] += diff
node[MTIME] = time.time()
while node[PARENT] != 0:
node = tree_nodes[node[PARENT]]
node[WEIGHT] += diff
'''
new node from tire tree
@keyword: tire keyword
@parent: parent node id
'''
def new_node(keyword, parent=0, ntype=NORMAL_NODE):
level = tree_nodes[parent][LEVEL] + 1
node = [keyword, 0, 0, parent, ntype, level, 0, []]
node_id = len(tree_nodes)
tree_nodes.append(node)
#update parent node, add into node index if nessory
parent_node = tree_nodes[parent]
if parent > 0:#first level has no children list
if ntype == PINYIN_NODE:
parent_node[CHILDREN].append(node_id)
else:
parent_node[CHILDREN].insert(0, node_id)
if len(parent_node[CHILDREN]) == INDEX_FLOOR:
#too much children, need node index
for nid in parent_node[CHILDREN]:
if tree_nodes[nid][TYPE] == NORMAL_NODE:
node_index[str(parent)+'-'+tree_nodes[nid][KEYWORD]] = nid
else:
node_index[str(parent)+'_'+tree_nodes[nid][KEYWORD]] = nid
elif parent == 0 or len(parent_node[CHILDREN]) > INDEX_FLOOR:
if ntype == NORMAL_NODE:
node_index[str(parent)+'-'+keyword] = node_id
else:
node_index[str(parent)+'_'+keyword] = node_id
return node_id
'''
match 'word' from children
@node_id: parent id
@word: keyword to be matched
从node_id 向下匹配字符word,
一直到返回最深的孩子节点
'''
def child_match(node_id, word, node_type):
child_id = 0
node = tree_nodes[node_id]
if node_id == 0 or len(node[CHILDREN]) >= INDEX_FLOOR:
if node_type == NORMAL_NODE:
key = str(node_id)+'-'+word
else:
key = str(node_id)+'_'+word
if key in node_index:
child_id = node_index[key]
else:
for nid in node[CHILDREN]:
if tree_nodes[nid][TYPE] != node_type:
continue
if tree_nodes[nid][KEYWORD] == word:
child_id = nid
break
return child_id
'''
max prefix match, if pinyin node match, stop and return
@words: word list
@parent: begin from this node to match
从parent=0 根节点开始依次匹配每个字符
每成功一个节点向下一级,字符向后一个
'''
def max_prefix_match(words, types, parent=0):
node_id = parent
level = 0
while True:
nid = child_match(node_id, words[level], types[level])
if nid == 0:
break
level += 1
node_id = nid
if tree_nodes[nid][TYPE] == PINYIN_NODE:
break
if level == len(words):
break
return level, node_id
'''
convert term to words, split ascii string into
pinyin list completely if possible, if not, ascii
string as one word
'''
def term2words(term, py_split=False):
term = unicode(term).lower()
words = []
types = []
estr = ''
for word in term:
if ord(word)>=19904 and ord(word)<=40895:
if estr != '':
pylist = None
if py_split:
pylist = estr2pinyin(estr)
if pylist != None:
words += pylist
for _ in pylist:
types.append(PINYIN_NODE)
else:
return [], []
else:
for c in estr:
words.append(c)
types.append(NORMAL_NODE)
estr = ''
words.append(word)
types.append(NORMAL_NODE)
elif ord(word)>=ord('a') and ord(word)<=ord('z'):
estr += word
elif ord(word)>=ord('0') and ord(word)<=ord('9'):
estr += word
if estr != '':
pylist = None
if py_split:
pylist = estr2pinyin(estr)
if pylist != None:
words += pylist
for _ in pylist:
types.append(PINYIN_NODE)
else:
return [], []
else:
for c in estr:
words.append(c)
types.append(NORMAL_NODE)
return words, types
def push_pinyin_node(parent, child, py):
py_node = child_match(parent, py, PINYIN_NODE)
if py_node == 0:
py_node = new_node(py, parent, PINYIN_NODE)
tree_nodes[py_node][CHILDREN].append(child)
else:
conflict = False
for cid in tree_nodes[py_node][CHILDREN]:
if cid == child:
conflict = True
break
if conflict == False:
tree_nodes[py_node][CHILDREN].append(child)
'''
add one term into tire tree
@term: words list, UNICODE
'''
def add_term(term, weight):
words, types = term2words(term)
if len(words) == 0: #avoid '......'
return
#max prefix match
level, node_id = max_prefix_match(words, types)
# 如果全部存在这个字符序列,则更新 node_id
if level == len(words):#exist already
add_weight(node_id, weight)#may lead to parent weight bigger than weight sum of all children
else:
for word in words[level:]:
#insert normal node
parent = node_id
node_id = new_node(word, parent)
if len(word)==1 and ord(word)>=19904 and ord(word)<=40895:
#insert pinyin node
pys = pypinyin.pinyin(word, style=pypinyin.NORMAL, heteronym=True)
for py in pys[0]:
#complete pinyin
push_pinyin_node(parent, node_id, py)
push_pinyin_node(parent, node_id, py[0])
if py[0]=='c' or py[0]=='s' or py[0]=='z':
if py[1] == 'h':
push_pinyin_node(parent, node_id, py[:2])
add_weight(node_id, weight)
'''
match max prefix and return suggestion list
'''
def match_words(words, types, parent=0):
last_match_list = [parent] #can be return as max prefix match result because some prefix matched
max_match_list = [parent] #try match more prefix
isbottom = False
while len(max_match_list) > 0:
last_match_list = max_match_list
if isbottom:
break
max_match_list = []
for nid in last_match_list:
idx = tree_nodes[nid][LEVEL] - tree_nodes[parent][LEVEL]
level, max_node_id = max_prefix_match(words[idx:], types[idx:], nid)
if tree_nodes[max_node_id][LEVEL] == len(words):#match the whole words
isbottom = True
if level == 0: #match fail
continue
elif tree_nodes[max_node_id][TYPE] == PINYIN_NODE: #match pinyin node
for child in tree_nodes[max_node_id][CHILDREN]:
max_match_list.append(child)
else: #match normal node
max_match_list.append(max_node_id)
return last_match_list
def suggest(parent, suggestion_num):
if len(tree_nodes[parent][CHILDREN]) == 0:
return [parent]
result = []
tot_weight = float(int(tree_nodes[parent][WEIGHT]*10000))/10000
sum_weight = tree_nodes[parent][WEIGHT2]
have_suggestion_num = 0
for child in tree_nodes[parent][CHILDREN]:
node = tree_nodes[child]
if node[TYPE] == PINYIN_NODE:
break
sum_weight += node[WEIGHT]
num = int(sum_weight*suggestion_num/tot_weight)
this_suggestion_num = num - have_suggestion_num
have_suggestion_num = num
if this_suggestion_num > 0:
result += suggest(child, this_suggestion_num)
return result
TERM_TYPE_HANZI=1
TERM_TYPE_ESTR=2
TERM_TYPE_MIX=3
def get_suggestion(term, suggestion_num=8):
candidate_list = []
term = unicode(term)
term_type = TERM_TYPE_HANZI
if len(term) == len(str(term)):
term_type = TERM_TYPE_ESTR
elif len(term)*3 == len(str(term)):
term_type = TERM_TYPE_HANZI
else:
term_type = TERM_TYPE_MIX
#direct match
direct_level = 0
direct_words, direct_types = term2words(term)
if len(direct_words) == 0: #avoid '......'
return []
direct_list = match_words(direct_words, direct_types)
direct_level = tree_nodes[direct_list[0]][LEVEL]
if direct_level == len(direct_words):
candidate_list += direct_list
#pinyin match
pinyin_level = 0
pinyin_words, pinyin_types = term2words(term, True)
if term_type != TERM_TYPE_HANZI and len(pinyin_words) > 0: #have valid pinyin in words
pinyin_list = match_words(pinyin_words, pinyin_types)
pinyin_level = tree_nodes[pinyin_list[0]][LEVEL]
if pinyin_level == len(pinyin_words):
candidate_list += pinyin_list
if len(candidate_list) == 0:
#direct-pinyin
if direct_level > 0:#have matched some prefix
dpy_words, dpy_types = term2words(term[direct_level:], True)
if len(dpy_words) > 0 and dpy_types[0] == 1:
for nid in direct_list:
dpy_list = match_words(dpy_words, dpy_types, nid)
dpy_level = tree_nodes[dpy_list[0]][LEVEL]
if dpy_level == direct_level + len(dpy_words):
candidate_list += dpy_list
#pinyin-direct match
if pinyin_level > 0 and pinyin_types[0] == 1:#start with pinyin and have matched some pinyin
pyd_level = 0
for i in range(pinyin_level):
pyd_level += len(pinyin_words[i])
pyd_words, pyd_types = term2words(term[pyd_level:])
for nid in pinyin_list:
pyd_list = match_words(pyd_words, pyd_types, nid)
pyd_level = tree_nodes[pyd_list[0]][LEVEL]
if pyd_level == pinyin_level + len(pyd_words):
candidate_list += pyd_list
result = []
tot_weight = 0
sum_weight = 0
have_suggestion_num = 0
for node_id in candidate_list:
tot_weight += tree_nodes[node_id][WEIGHT]
tot_weight = float(int(tot_weight*10000))/10000
for node_id in candidate_list:
sum_weight += tree_nodes[node_id][WEIGHT]
num = int(sum_weight*suggestion_num/tot_weight)
this_suggestion_num = num - have_suggestion_num
have_suggestion_num = num
if this_suggestion_num > 0:
result += suggest(node_id, this_suggestion_num)
#if len(result) < suggestion_num: #relation term suggestion
suggestion = []
for leaf in result:
node_id = leaf
sug = ''
while node_id != 0:
sug = tree_nodes[node_id][KEYWORD] + sug
node_id = tree_nodes[node_id][PARENT]
suggestion.append(sug)
return suggestion
if __name__ == '__main__':
add_term('我的祖国', 1)
add_term('我的世界', 2)
add_term('我的shi姐', 3)
a = get_suggestion('我的')
for s in a:
print s,
print ''
a = get_suggestion('wd')
for s in a:
print s,
print ''
a = get_suggestion('wod')
for s in a:
print s,
print ''
a = get_suggestion('wde')
for s in a:
print s,
print ''
a = get_suggestion('wds')
for s in a:
print s,
print ''
a = get_suggestion('wdsh')
for s in a:
print s,
print ''
a = get_suggestion('wdj')
for s in a:
print s,
print ''
a = get_suggestion('w')
for s in a:
print s,
print ''
| {
"content_hash": "b122a497a233d59bcc2ab214a34d1173",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 100,
"avg_line_length": 30.405940594059405,
"alnum_prop": 0.5403777271247151,
"repo_name": "zhaochl/python-utils",
"id": "9e4fde1a1354b2026b951169a89c3914d91e4630",
"size": "12483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sug/demo/tire_tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "149595"
},
{
"name": "Batchfile",
"bytes": "178"
},
{
"name": "C",
"bytes": "3350564"
},
{
"name": "C#",
"bytes": "55626"
},
{
"name": "C++",
"bytes": "137569"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "8098"
},
{
"name": "CSS",
"bytes": "2267"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "HTML",
"bytes": "190373"
},
{
"name": "M4",
"bytes": "12659"
},
{
"name": "Makefile",
"bytes": "110268"
},
{
"name": "Module Management System",
"bytes": "15237"
},
{
"name": "Objective-C",
"bytes": "21403"
},
{
"name": "Pascal",
"bytes": "70297"
},
{
"name": "Perl",
"bytes": "3895"
},
{
"name": "Python",
"bytes": "1945280"
},
{
"name": "Roff",
"bytes": "43432"
},
{
"name": "SAS",
"bytes": "16043"
},
{
"name": "Shell",
"bytes": "813423"
},
{
"name": "Smalltalk",
"bytes": "5908"
},
{
"name": "WebAssembly",
"bytes": "14000"
}
],
"symlink_target": ""
} |
from django.core import mail
from kitsune.kbadge.tests import AwardFactory, BadgeFactory
from kitsune.sumo.tests import TestCase
class AwardNotificationTests(TestCase):
def test_notification(self):
# Note: Need to do this import here so the
# notify_award_recipient function handles the
# badge_was_awarded signal. This works fine in production
# because badges gets loaded by kitsune.kbadge in startup.
from kitsune.kbadge import badges # noqa
new_badge = BadgeFactory()
# Check the mail queue first.
self.assertEqual(0, len(mail.outbox))
# Create an award and save it. This triggers the notification.
AwardFactory(description="yay!", badge=new_badge)
self.assertEqual(1, len(mail.outbox))
# TODO: test contents--not doing that now because it's a
# mockup.
| {
"content_hash": "9bf915e408b6d6730d1f839104beffa4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 33.65384615384615,
"alnum_prop": 0.6845714285714286,
"repo_name": "mozilla/kitsune",
"id": "0f789bd811814b2d105331a76a05eb8659728955",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "kitsune/kbadge/tests/test_awards.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1156"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "HTML",
"bytes": "535448"
},
{
"name": "JavaScript",
"bytes": "658477"
},
{
"name": "Jinja",
"bytes": "4837"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "Nunjucks",
"bytes": "68656"
},
{
"name": "Python",
"bytes": "2827116"
},
{
"name": "SCSS",
"bytes": "240092"
},
{
"name": "Shell",
"bytes": "10759"
},
{
"name": "Svelte",
"bytes": "26864"
}
],
"symlink_target": ""
} |
# Documentation:
# qute://help/configuring.html
# qute://help/settings.html
# Don't load autoconfig.yml (this is required as of v2.0.0)
config.load_autoconfig(False)
# Name of the session to load by default
c.session.default_name = "default"
# Automatically save the current session
c.auto_save.session = True
# Disable <video> autoplay
c.content.autoplay = False
config.set("content.autoplay", True, "play.google.com/music/*")
# Prevent websites from asking for geolocation
c.content.geolocation = False
# Disable host blocking; this is redundant as I handle host blocking at the
# gateway level for all devices on my LAN and locally, using /etc/hosts, for
# non-static devices (e.g. laptops)
c.content.blocking.enabled = False
# Enable JavaScript
c.content.javascript.enabled = True
config.set("content.javascript.enabled", True, "file://*")
config.set("content.javascript.enabled", True, "chrome://*/*")
config.set("content.javascript.enabled", True, "qute://*/*")
# Set default content.notifications behavior for specific URLs
c.content.notifications.enabled = False
config.set("content.notifications.enabled", True, "calendar.google.com")
config.set("content.notifications.enabled", True, "play.google.com")
config.set("content.notifications.enabled", True, "music.youtube.com")
config.set("content.notifications.enabled", True, "messages.google.com")
config.set("content.notifications.enabled", True, "www.fastmail.com")
# Set default content.register_protocol_handler behavior for specific URLs
c.content.register_protocol_handler = False
config.set("content.register_protocol_handler", True, "calendar.google.com")
config.set("content.register_protocol_handler", True, "www.fastmail.com")
# Which interfaces to expose via WebRTC
c.content.webrtc_ip_handling_policy = "default-public-interface-only"
# Directory to save downloads
c.downloads.location.directory = "~/downloads"
# Open new tabs in the background
c.tabs.background = True
# Open new tabs at the end of the stack
c.tabs.new_position.related = "last"
# Page to open if :open -t/-b/-w is used without URL.
c.url.default_page = "about:blank"
# Set colors for private browsing mode
# Note that "private browsing mode" above refers the Qutebrowser's internal
# "private mode", and as such these settings only apply to windows opened in
# private mode (`open -p`) within a session using this configuration file.
c.colors.statusbar.command.private.bg = "#7A378B"
c.colors.statusbar.command.private.fg = "#FDF8FF"
c.colors.statusbar.private.bg = "#68228B"
c.colors.statusbar.private.fg = "#FDF8FF"
# This ensures that any changes to the default value for `url.searchengines` are
# not propagated without updating the value below, explicitly.
c.url.searchengines = {
"DEFAULT": "https://duckduckgo.com/?q={}",
"!a": "https://www.amazon.com/s?k={}",
"!ao": "https://www.amazon.com/gp/your-account/order-history/ref=ppx_yo_dt_b_search?search={}",
"!ap": "https://www.archlinux.org/packages/?q={}",
"!aur": "https://aur.archlinux.org/packages/?K={}",
"!aw": "https://wiki.archlinux.org/index.php?search={}",
"!cb": "https://www.crunchbase.com/textsearch?q={}",
"!crates": "https://crates.io/search?q={}",
"!domain": "https://domains.google.com/registrar/search?searchTerm={}",
"!evo": "https://www.evo.com/shop?text={}",
"!gh": "https://github.com/search?q={}",
"!gm": "https://play.google.com/store/search?c=movies&q={}",
"!li": "https://www.linkedin.com/search/results/all/?keywords={}",
"!lic": "https://www.linkedin.com/search/results/people/?facetNetwork=%5B%22F%22%5D&keywords={}",
"!lim": "https://www.linkedin.com/messaging/?searchTerm={}",
"!lip": "https://www.linkedin.com/search/results/people/?keywords={}",
"!lij": "https://www.linkedin.com/jobs/search/?keywords={}",
"!mdn": "https://developer.mozilla.org/en-US/search?q={}",
"!music": "https://music.youtube.com/search?q={}",
"!od": "https://www.opendota.com/search?q={}",
"!r": "https://www.reddit.com/search?q={}",
"!rsw": "https://runescape.wiki/w/Special:Search?search={}",
"!rust": "https://doc.rust-lang.org/std/?search={}",
"!rustn": "https://doc.rust-lang.org/nightly/std/?search={}",
"!tf": "https://registry.terraform.io/search/?q={}",
"!tfm": "https://registry.terraform.io/search/modules?q={}",
"!tfp": "https://registry.terraform.io/search/providers?q={}",
"!w": "https://en.wikipedia.org/w/index.php?search={}",
"!wh": "https://www.wowhead.com/search?q={}",
"!yt": "https://www.youtube.com/results?search_query={}",
"!yth": "https://www.youtube.com/feed/history?query={}",
"!ytm": "https://music.youtube.com/search?q={}",
}
# Hide window decoration (e.g. Gnome title bar)
c.window.hide_decoration = True
# Format the window title
c.window.title_format = "{perc}{current_title}"
################################################################################
# Aliases
################################################################################
c.aliases = {
"read": "spawn --userscript readability-js",
}
################################################################################
# Keybinds
#
# The default method for binding keys to different modes is fairly cumbersome
# when binding lots of keys to different modes. The `keybinds` dict below is
# has a single top-level key for each supported mode, whose value is another
# dict of key:command mappings.
################################################################################
keybinds = {
"normal": {
"<alt+0>": "tab-focus 10",
"<alt+9>": "tab-focus 9",
"<ctrl+j>": "mode-enter passthrough",
"<ctrl+pgdown>": None,
"<ctrl+pgup>": None,
"<ctrl+shift+n>": None,
"<ctrl+shift+o>": "set-cmd-text -s :open -p",
"<ctrl+shift+w>": None,
"<ctrl+v>": None,
"<ctrl+w>": None,
"g$": None,
"g^": None,
"po": "open -p -- {clipboard}",
"pt": "open -t -- {clipboard}",
},
"caret": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
"command": {
"<Tab>": None,
"<ctrl+j>": "mode-leave",
"<ctrl+n>": "completion-item-focus next",
"<ctrl+p>": "completion-item-focus prev",
"<ctrl+shift+n>": "completion-item-focus --history next",
"<ctrl+shift+p>": "completion-item-focus --history prev",
"<escape>": None,
},
"hint": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
"insert": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
"passthrough": {
"<ctrl+j>": "mode-leave",
"<shift+escape>": None,
},
"prompt": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
"register": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
"yesno": {
"<ctrl+j>": "mode-leave",
"<escape>": None,
},
}
for mode, binds in keybinds.items():
for k, v in binds.items():
if v is None:
config.unbind(k, mode=f"{mode}")
else:
config.bind(k, v, mode=f"{mode}")
| {
"content_hash": "9627afa35afd764b55e85929fe3d4368",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 101,
"avg_line_length": 38.1436170212766,
"alnum_prop": 0.6053549016873518,
"repo_name": "bddenhartog/dotfiles",
"id": "0fdce9aa2870e591365e526011096e45de2185cb",
"size": "7171",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "qutebrowser/.config/qutebrowser/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "18864"
},
{
"name": "Vim script",
"bytes": "1525"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
import social.apps.django_app.default.models as sm
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
def home(request):
#logout(request)
uname=""
if request.method == 'POST' and 'submit' in request.POST:
submit = request.POST['submit']
if submit=="sign-out":
logout(request)
if '_auth_user_id' in request.session:
uname=sm.UserSocialAuth.objects.get(
user_id=int(request.session['_auth_user_id'])
).user
request.session['uname']=str(uname)
return render(request,'home.html',{'uname': uname})
def show(request):
return HttpResponse("Hello %s"%request.session['uname']) | {
"content_hash": "8a935193f2edad6c39c2e04a80cc07c3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 34.9,
"alnum_prop": 0.7106017191977078,
"repo_name": "wasit7/PythonDay",
"id": "253b301b47792dee20b00a142f854d6066e6de58",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/mysite3/myapp/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "16"
},
{
"name": "CSS",
"bytes": "48579"
},
{
"name": "HTML",
"bytes": "36397"
},
{
"name": "JavaScript",
"bytes": "95386"
},
{
"name": "Jupyter Notebook",
"bytes": "227765"
},
{
"name": "Python",
"bytes": "75250"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from urllib.parse import unquote
from colander import All, Boolean as BaseBoolean, drop, Length, Mapping, null, Number, OneOf, required, Sequence, Tuple
from pyramid.compat import is_nonstr_iter
from pyramid.settings import asbool
from translationstring import TranslationString
from zope.interface import implementer
from ines import DEFAULT_METHODS, MARKER
from ines.authorization import Everyone, NotAuthenticated
from ines.convert import camelcase, to_string, maybe_list
from ines.interfaces import ISchemaView
from ines.route import lookup_for_route_params, lookup_for_route_permissions
from ines.views.fields import FilterByType, OneOfWithDescription
from ines.utils import different_values
@implementer(ISchemaView)
class SchemaView(object):
def __init__(
self,
schema_route_name,
route_name=None,
list_route_name=None,
title=None,
description=None,
csv_route_name=None,
request_methods=None,
postman_folder_name=None):
self.schema_route_name = schema_route_name
self.route_name = route_name
self.list_route_name = list_route_name
self.csv_route_name = csv_route_name
self.title = title
self.description = description
self.request_methods = request_methods or DEFAULT_METHODS
self.postman_folder_name = postman_folder_name
def __call__(self, context, request):
return self.get_schema_nodes(request)
def get_route_names(self):
route_names = []
if self.route_name:
route_names.append(self.route_name)
if self.list_route_name:
route_names.append(self.list_route_name)
if self.csv_route_name:
route_names.append(self.csv_route_name)
return route_names
def validate_permission(self, request, permissions):
permissions = maybe_list(permissions)
if request.authenticated:
return any((p in permissions for p in request.authenticated.get_principals()))
else:
return bool(Everyone in permissions or NotAuthenticated in permissions)
def get_schema_nodes(self, request):
cache_key = 'schema build cache %s' % self.schema_route_name
schema_expire_cache = request.settings.get('schema_expire_cache', MARKER)
nodes = request.cache.get(cache_key, MARKER, expire=schema_expire_cache)
if nodes is MARKER:
nodes = defaultdict(dict)
global_types = defaultdict(list)
global_models = defaultdict(list)
keep_types_keys = defaultdict(set)
keep_models_keys = defaultdict(set)
to_translate = defaultdict(list)
for route_name in self.get_route_names():
info = self.get_route_info(request, route_name)
if not info:
continue
intr_route, url, url_keys = info
url_keys = [camelcase(k) for k in url_keys]
schemas = request.registry.config.lookup_input_schema(route_name, self.request_methods)
schemas.extend(request.registry.config.lookup_output_schema(route_name, self.request_methods))
for schema in schemas:
fields = []
types = defaultdict(list)
models = defaultdict(list)
if schema.schema:
details = self.construct_structure(
request,
schema.schema,
schema.schema_type,
types,
models,
to_translate)
if isinstance(details, dict):
fields.append(details)
else:
fields.extend(details)
if schema.schema_type == 'request' and schema.fields_schema:
details = self.construct_structure(
request,
schema.fields_schema,
schema.schema_type,
types,
models,
to_translate)
if isinstance(details, dict):
fields.append(details)
else:
fields.extend(details)
if schema.route_name != self.csv_route_name:
key = schema.request_method.lower()
if key == 'get' and schema.route_name == self.list_route_name:
key = 'list'
else:
key = 'csv'
nodes[key][schema.schema_type] = fields
nodes[key]['routeName'] = route_name
nodes[key]['method'] = schema.request_method.upper()
nodes[key]['url'] = url
nodes[key]['urlKeys'] = url_keys
nodes[key]['renderer'] = schema.renderer.lower()
if types:
keep_types_keys[key].update(types.keys())
for k, values in types.items():
global_types[k].extend(values)
if models:
keep_models_keys[key].update(models.keys())
for k, values in models.items():
global_models[k].extend(values)
if global_types:
if to_translate:
to_translate['fieldTypes'] = defaultdict(set)
nodes['fieldTypes'] = lookup_common_fields(
global_types,
to_translate,
ignore_key='fieldType',
is_field_type=True)
nodes['keep_types_keys'] = keep_types_keys
if global_models:
if to_translate:
to_translate['models'] = defaultdict(set)
nodes['models'] = lookup_common_fields(global_models, to_translate, ignore_key='model')
nodes['keep_models_keys'] = keep_models_keys
if to_translate:
nodes['to_translate'] = to_translate
request.cache.put(cache_key, nodes, expire=schema_expire_cache)
permissions_cache = {}
types_keys = set()
types = nodes.pop('fieldTypes', None)
keep_types_keys = nodes.pop('keep_types_keys', None)
models_keys = set()
models = nodes.pop('models', None)
keep_models_keys = nodes.pop('keep_models_keys', None)
to_translate = nodes.pop('to_translate', None)
fields_translation = {}
models_translation = {}
if to_translate:
translator = request.translator
fields_translation = to_translate.pop('fieldTypes', fields_translation)
models_translation = to_translate.pop('models', fields_translation)
for key, details in nodes.items():
route_name = details['routeName']
if route_name not in permissions_cache:
info = self.get_route_info(request, route_name)
permissions_cache[route_name] = lookup_for_route_permissions(request.registry, info[0])
method_permissions = maybe_list(permissions_cache[route_name].get(details['method']))
if not self.validate_permission(request, method_permissions):
nodes.pop(key)
continue
if keep_types_keys:
types_keys.update(keep_types_keys[key])
if keep_models_keys:
models_keys.update(keep_models_keys[key])
if types_keys:
nodes['fieldTypes'] = {}
for k in types_keys:
nodes['fieldTypes'][k] = details = types[k]
field_fields = fields_translation.get(k)
if field_fields:
for field in field_fields:
if field == 'options':
for option in details[field]:
if option['text']:
option['text'] = translator(option['text'])
else:
details[field] = translator(details[field])
if models_keys:
nodes['models'] = {}
for k in models_keys:
nodes['models'][k] = details = models[k]
model_fields = models_translation.get(k)
if model_fields:
for field in model_fields:
if field == 'options':
for option in details[field]:
if option['text']:
option['text'] = translator(option['text'])
else:
details[field] = translator(details[field])
if to_translate:
for key, values in to_translate.items():
for value in values:
text_value = value.get(key)
if text_value:
value[key] = translator(text_value)
print(text_value, value[key])
return nodes
def get_route_info(self, request, route_name):
intr_route = request.registry.introspector.get('routes', route_name)
if intr_route is not None:
route = intr_route['object']
params = {k: '{{%s}}' % camelcase(k) for k in lookup_for_route_params(route)}
url = '%s%s' % (request.application_url, unquote(route.generate(params)))
return intr_route, url, params.keys()
def construct_structure(self, request, schema, schema_type, types, models, to_translate, parent_name=None):
if isinstance(schema.typ, Sequence):
child = schema.children[0]
if not schema.name:
schema = child
name = camelcase(schema.name)
details = {
'model': name,
'type': 'sequence',
'title': schema.title,
'description': schema.description or ''}
models[name].append(details)
if isinstance(schema.title, TranslationString):
to_translate['title'].append(details)
if isinstance(schema.description, TranslationString):
to_translate['description'].append(details)
# Find and add child
child_details = self.construct_structure(
request,
child,
schema_type,
types,
models,
to_translate,
parent_name=schema.name)
if isinstance(details, dict):
if isinstance(child.typ, Mapping):
details['type'] = 'model'
details.update(child_details)
else:
details['fields'] = [child_details]
else:
details['fields'] = child_details
return details
elif isinstance(schema.typ, Tuple):
raise NotImplementedError('Tuple type need to be implemented')
elif isinstance(schema.typ, Mapping):
fields = []
for child in schema.children:
fields.append(self.construct_structure(
request,
child,
schema_type,
types,
models,
to_translate,
parent_name=schema.name))
name = schema.name or parent_name
if not name:
return fields
name = camelcase(name)
details = {
'type': 'model',
'title': schema.title,
'description': schema.description or '',
'fields': fields,
'model': name}
models[name].append(details)
if isinstance(schema.title, TranslationString):
to_translate['title'].append(details)
if isinstance(schema.description, TranslationString):
to_translate['description'].append(details)
return details
else:
name = camelcase(schema.name)
details = {
'fieldType': name,
'title': schema.title,
'description': schema.description or ''}
if isinstance(schema.title, TranslationString):
to_translate['title'].append(details)
if isinstance(schema.description, TranslationString):
to_translate['description'].append(details)
if hasattr(schema, 'model_reference'):
model = schema.model_reference['model']
model_key = schema.model_reference.get('key') or 'key'
model_query = schema.model_reference.get('query') or 'name'
model_application = schema.model_reference.get('application_name') or request.application_name
details['modelReference'] = {
'applicationName': model_application,
'schemaName': schema.model_reference['schema'],
'key': camelcase(model[model_key].name),
'model': camelcase(model.name),
'queryField': camelcase(model[model_query].name)}
types[name].append(details)
if isinstance(schema.typ, FilterByType):
for cls in schema.typ.__class__.__mro__[1:]:
if cls is not FilterByType:
details['type'] = str(cls.__name__).lower()
break
details['filter'] = True
elif hasattr(schema, 'schema_type_name'):
details['type'] = camelcase(schema.schema_type_name)
else:
details['type'] = get_colander_type_name(schema.typ)
request_validation = []
if schema.validator:
if isinstance(schema.validator, All):
validators = schema.validator.validators
elif not is_nonstr_iter(schema.validator):
validators = [schema.validator]
else:
validators = schema.validator
for validator in validators:
if isinstance(validator, OneOfWithDescription):
details['options'] = []
add_option = details['options'].append
save_to_translate = False
for choice, description in validator.choices_with_descripton:
add_option({'value': choice, 'text': description})
save_to_translate = save_to_translate or isinstance(description, TranslationString)
if save_to_translate:
to_translate['options'].append(details)
elif isinstance(validator, OneOf):
details['options'] = []
add_option = details['options'].append
for choice in validator.choices:
add_option({
'value': choice,
'text': choice.replace('_', ' ').title()})
else:
if isinstance(validator, Length):
validation_option = {}
if validator.min is not None:
validation_option['min'] = validator.min
if validator.max is not None:
validation_option['max'] = validator.max
else:
validation_option = True
request_validation.append((validator, validation_option))
if hasattr(schema, 'use_when'):
details['useWhen'] = {camelcase(k): v for k, v in schema.use_when.items()}
if schema_type == 'request':
validation = {}
if schema.required:
validation['required'] = True
if request_validation:
for validator, validation_option in request_validation:
validation[get_colander_type_name(validator)] = validation_option
if validation:
details['validation'] = validation
default = schema.missing
else:
if schema.missing is drop:
details['maybeNotSent'] = True
default = schema.default
if (default is not drop
and default is not required
and default is not null):
if isinstance(schema.typ, Number):
default = schema.typ.num(default)
elif isinstance(schema.typ, BaseBoolean):
default = asbool(default)
details['default'] = default
return details
def get_colander_type_name(node):
if hasattr(node, 'schema_type_name'):
return node.schema_type_name
else:
return camelcase(str(node.__class__.__name__).lower())
def lookup_common_fields(values, to_translate, ignore_key=None, is_field_type=False):
result = defaultdict(dict)
dict_key = 'models'
if is_field_type:
dict_key = 'fieldTypes'
for name, name_list in values.items():
if not name_list:
continue
all_keys = set()
for k in name_list:
all_keys.update(k.keys())
if ignore_key in all_keys:
all_keys.remove(ignore_key)
for key in all_keys:
check_translation = None
if key in to_translate:
check_translation = to_translate[key]
value = MARKER
value_idx = None
for i, name_options in enumerate(name_list):
other_value = name_options.get(key, MARKER)
if other_value is MARKER:
value_idx = None
break
elif value is MARKER:
value = other_value
value_idx = i
elif not different_values(value, other_value):
if check_translation:
idx = lookup_id(check_translation, name_options)
if idx is not None:
check_translation.pop(idx)
name_options.pop(key)
if value_idx is not None:
details = name_list[value_idx]
if check_translation:
idx = lookup_id(check_translation, details)
if idx is not None:
check_translation.pop(idx)
to_translate[dict_key][name].add(key)
result[name][key] = details.pop(key)
return result
def lookup_id(values, value):
for i, v in enumerate(values):
if id(v) == id(value):
return i
| {
"content_hash": "419d0af4c5998970905380e3e0ee6ab8",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 119,
"avg_line_length": 40.14462809917355,
"alnum_prop": 0.5086978898610396,
"repo_name": "hugobranquinho/ines",
"id": "8710b3b1a93109982a4efb8f28006e9298f82285",
"size": "19455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ines/views/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "435125"
}
],
"symlink_target": ""
} |
import os
import sys
import platform
import time
import aiml
import marshal
import glob
import time
import operator
import csv
# AIML Directory
saiml = "/PATH/sam/aiml/"
#saiml = "C:\\PATH\\sam\\aiml\\"
# brain
k = aiml.Kernel()
# setpreds() function
def setpreds():
with open(saiml + 'preds.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
#print((row[0]), (row[1]))
k.setBotPredicate((row[0]), (row[1]))
plat = platform.machine()
osys = os.name
print "Sam for " + osys
print "System Architecture " + plat
#print "Memory " + psutil.virtual_memory()
k.setBotPredicate("architecture", plat)
k.setBotPredicate("os", osys)
# get_oldest_file() function
def get_oldest_file(files, _invert=False):
""" Find and return the oldest file of input file names.
Only one wins tie. Values based on time distance from present.
Use of `_invert` inverts logic to make this a youngest routine,
to be used more clearly via `get_youngest_file`.
"""
gt = operator.lt if _invert else operator.gt
# Check for empty list.
if not files:
return None
# Raw epoch distance.
now = time.time()
# Select first as arbitrary sentinel file, storing name and age.
oldest = files[0], now - os.path.getmtime(files[0])
# Iterate over all remaining files.
for f in files[1:]:
age = now - os.path.getmtime(f)
if gt(age, oldest[1]):
# Set new oldest.
oldest = f, age
# Return just the name of oldest file.
return oldest[0]
# learn() function
def learn(aimlfiles):
if not aimlfiles:
k.learn(saiml + "xfind.aiml")
for f in aimlfiles[1:]:
k.learn(f)
# brain() function
def brain():
aimlfiles = glob.glob(saiml + "*.aiml")
learn(aimlfiles)
setpreds()
if os.path.isfile(saiml + "sam.ses"):
sessionFile = file(saiml + "sam.ses", "rb")
session = marshal.load(sessionFile)
sessionFile.close()
for pred,value in session.items():
k.setPredicate(pred, value, "sam")
else:
setpreds()
k.saveBrain(saiml + "sam.brn")
if __name__ == "__main__":
brain() | {
"content_hash": "a2af6bdfdc14394d366d12fae3cbc31c",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 68,
"avg_line_length": 25.448275862068964,
"alnum_prop": 0.6147244805781391,
"repo_name": "theneverworks/Sam",
"id": "33cc7effbe7b93a4a503915db9702217557a65cb",
"size": "2856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/braincompiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53814"
}
],
"symlink_target": ""
} |
import csv
import random
import glob
import os
import sys
import time
import math
import numpy
import pylab
N_MONTH = 4
N_DAY_PER_MONTH = 31
BASE_MONTH = 4
TYPE_LENGTH = 4
class User(object):
def __init__(self, id, info):
self.id = id;
self.brands = info.keys()
self.data = dict()
self.test_label = set()
self.train_label = set()
self.weight = [1, 1, 1, 1]
for brandID in self.brands:
brand = info[brandID]
for month, day, action in brand:
p = (month - BASE_MONTH) * 12
if day > 10:
p += 4
elif day > 20:
p += 8
if month == BASE_MONTH + N_MONTH - 1:
if action == 1:
self.test_label.add(brandID)
else:
if not brandID in self.data:
self.data[brandID] = [0, 0, 0, 0]
self.data[brandID][action] += 1
self.weight[action] += 1
self.train_label.add(brandID)
total_buy_action = float(self.weight[1])
self.weight = [1 / (self.weight[idx] / total_buy_action) for idx, num in enumerate(self.weight)]
for brand in self.data.keys():
self.data[brand] = [num * weight for num, weight in zip(self.data[brand], self.weight)]
def __str__(self):
return str(self.id) + ' ' + str(len(self.bands))
class Graph(object):
def __init__(self):
self.adjList = dict()
def addEdge(self, u, v):
if u not in self.adjList:
self.adjList[u] = set()
if v not in self.adjList:
self.adjList[v] = set()
self.adjList[u].add(v)
self.adjList[v].add(u)
class BFS(object):
def __init__(self, graph):
self.graph = graph
self.components = []
isVisited = set()
for u in graph.adjList.keys():
if u in isVisited: continue
comp = set()
comp.add(u)
isVisited.add(u)
queue = []
queue.append(u)
while len(queue) > 0:
u = queue.pop(0)
for v in graph.adjList[u]:
if v not in isVisited:
queue.append(v)
isVisited.add(v)
comp.add(v)
self.components.append(comp)
if __name__ == '__main__':
userInfo = dict()
with open('/home/pumpkin/Documents/project/tmall/dataset/clean.csv', 'rb') as csvfile:
# with open('/home/pumpkin/Documents/project/tmall/dataset/demo.csv', 'rb') as csvfile:
user_table = dict()
brand_table = dict()
user_counter = 0
brand_counter = 0
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
userID, brandID, actionType, month, day = [int(field) for field in row]
if not userID in user_table:
user_table[userID] = user_counter
user_counter += 1
if not brandID in brand_table:
brand_table[brandID] = brand_counter
brand_counter += 1
userID = user_table[userID]
brandID = brand_table[brandID]
if not userID in userInfo:
userInfo[userID] = dict()
user = userInfo[userID]
if brandID not in user:
user[brandID] = []
if month in (4, 5, 6):
day = day - 14
else:
day = day - 15
if day <= 0:
month -= 1
day += 31
band = user[brandID]
band.append((month, day, actionType))
users = []
for (userID, info) in userInfo.iteritems():
users.append(User(userID, info))
g = Graph()
for user in users:
for brand in user.train_label:
g.addEdge(user.id, brand + user_counter)
bfs = BFS(g)
sorted(bfs.components, key=lambda e: len(e), reverse=True)
print len(bfs.components)
print bfs.components[1]
# users = [user for user in users if user.id is in bfs.components[0]]
# clean = bfs.components[1].union(bfs.components[2])
clean = bfs.components[1]
user_table_inv = { v: k for k, v in user_table.items() }
brand_table_inv = { v: k for k, v in brand_table.items() }
for user in clean:
if user < user_counter:
print user_table_inv[user]
else:
print brand_table_inv[user - user_counter]
| {
"content_hash": "9e7be9785a75e15cfe238f2f26463076",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 104,
"avg_line_length": 30.104575163398692,
"alnum_prop": 0.5019539730785931,
"repo_name": "wait4pumpkin/tmall",
"id": "848ddb1779b076a8546795a571b7954f34af5d13",
"size": "4655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solution/analysis/comp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "16119"
},
{
"name": "Python",
"bytes": "251882"
},
{
"name": "Ruby",
"bytes": "780"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_spanner_database
description:
- A Cloud Spanner Database which is hosted on a Spanner instance.
short_description: Creates a GCP Database
version_added: '2.7'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
required: true
type: str
extra_statements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement, the
database is not created.'
required: false
type: list
instance:
description:
- The instance to create the database on.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_spanner_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
type: dict
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instances.databases)'
- 'Official Documentation: U(https://cloud.google.com/spanner/)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a instance
gcp_spanner_instance:
name: instance-database
display_name: My Spanner Instance
node_count: 2
labels:
cost_center: ti-1700004
config: regional-us-central1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instance
- name: create a database
gcp_spanner_database:
name: webstore
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- A unique identifier for the database, which cannot be changed after the instance
is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].
returned: success
type: str
extraStatements:
description:
- 'An optional list of DDL statements to run inside the newly created database.
Statements can create tables, indexes, etc. These statements execute atomically
with the creation of the database: if there is an error in any statement, the
database is not created.'
returned: success
type: list
instance:
description:
- The instance to create the database on.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
extra_statements=dict(type='list', elements='str'),
instance=dict(required=True, type='dict'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/spanner.admin']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if 'instance' in module.params and 'name' in module.params['instance']:
module.params['instance']['name'] = module.params['instance']['name'].split('/')[-1]
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'spanner')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link):
module.fail_json(msg="Spanner objects can't be updated to ensure data safety")
def delete(module, link):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {u'name': module.params.get('name'), u'extraStatements': module.params.get('extra_statements')}
request = encode_request(request, module)
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'spanner')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name'), 'name': module.params['name']}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases/{name}".format(**res)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://spanner.googleapis.com/v1/projects/{project}/instances/{instance}/databases".format(**res)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {u'name': module.params.get('name'), u'extraStatements': module.params.get('extra_statements')}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://spanner.googleapis.com/v1/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response)
if op_result is None:
return {}
status = navigate_hash(op_result, ['done'])
wait_done = wait_for_completion(status, op_result, module)
raise_if_errors(wait_done, ['error'], module)
return navigate_hash(wait_done, ['response'])
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while not status:
raise_if_errors(op_result, ['error'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, False)
status = navigate_hash(op_result, ['done'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
def decode_response(response, module):
if not response:
return response
if 'name' not in response:
return response
if '/operations/' in response['name']:
return response
response['name'] = response['name'].split('/')[-1]
return response
def encode_request(request, module):
request['create_statement'] = "CREATE DATABASE `{0}`".format(module.params['name'])
del request['name']
return request
if __name__ == '__main__':
main()
| {
"content_hash": "311b484e25601db1f41061e06d2ec3cb",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 148,
"avg_line_length": 32.100558659217874,
"alnum_prop": 0.6406195614340411,
"repo_name": "thaim/ansible",
"id": "e6e55691100fc5de0aed791d41c87b9c013c1f9a",
"size": "12229",
"binary": false,
"copies": "3",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/google/gcp_spanner_database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
"""Show apps report."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import click
import pandas as pd
from treadmill import cli
from treadmill.cli.scheduler import fetch_report, print_report
from treadmill import restclient
def init():
"""Return top level command handler."""
@click.command()
@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)
@click.option('--match', help='Server name pattern match')
@click.option('--partition', help='Partition name pattern match')
@click.option('--full', is_flag=True, default=False)
@click.pass_context
def apps(ctx, match, partition, full):
"""View apps report."""
report = fetch_report(ctx.obj.get('api'), 'apps', match, partition)
# Replace integer N/As
for col in ['identity', 'expires', 'lease', 'data_retention']:
report.loc[report[col] == -1, col] = ''
# Convert to datetimes
for col in ['expires']:
report[col] = pd.to_datetime(report[col], unit='s')
# Convert to timedeltas
for col in ['lease', 'data_retention']:
report[col] = pd.to_timedelta(report[col], unit='s')
report = report.fillna('')
if not full:
report = report[[
'instance', 'allocation', 'partition', 'server',
'mem', 'cpu', 'disk'
]]
print_report(report)
return apps
| {
"content_hash": "b055b4908558d62179c203e52fd7b560",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 31.520833333333332,
"alnum_prop": 0.6100462656972901,
"repo_name": "bretttegart/treadmill",
"id": "4cdf2f68940022bc418529909c08b2c4c4ee0f1c",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python/treadmill/cli/scheduler/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "2975485"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56911"
}
],
"symlink_target": ""
} |
import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_summary_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', 'raw_sizes', 'compression_ratios', 'durations', 'max_errors'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract_acl_pre_v06 = (0, 1, 3, 5, 7)
columns_to_extract_acl_post_v06 = (1, 2, 4, 6, 8)
columns_to_extract_ue4 = (0, 1, 3, 4, 5)
output_csv_dir = os.getcwd()
output_csv_file_path_ratios = os.path.join(output_csv_dir, 'compression_ratios.csv')
output_csv_file_path_ratios_by_raw_size = os.path.join(output_csv_dir, 'compression_ratios_by_raw_size.csv')
output_csv_file_path_max_errors = os.path.join(output_csv_dir, 'max_errors.csv')
output_csv_file_path_max_errors_by_raw_size = os.path.join(output_csv_dir, 'max_errors_by_raw_size.csv')
output_csv_file_path_ratio_vs_max_error = os.path.join(output_csv_dir, 'ratio_vs_max_error.csv')
output_csv_file_path_durations = os.path.join(output_csv_dir, 'durations.csv')
output_csv_data_ratios = []
output_csv_data_ratios_by_raw_size = []
output_csv_data_max_errors = []
output_csv_data_max_errors_by_raw_size = []
output_csv_data_ratio_vs_max_error = []
output_csv_data_durations = []
output_csv_headers = []
for entry in input_sjson_data['inputs']:
if entry['version'] >= 0.6:
if entry['source'] == 'acl':
columns_to_extract = columns_to_extract_acl_post_v06
else:
columns_to_extract = columns_to_extract_ue4
else:
if entry['source'] == 'acl':
columns_to_extract = columns_to_extract_acl_pre_v06
else:
columns_to_extract = columns_to_extract_ue4
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=input_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding = 'utf-8')
csv_data = csv_data[best_variable_data_mask]
csv_data_ratios = numpy.sort(csv_data, order='compression_ratios')['compression_ratios']
csv_data_ratios_by_raw_size = numpy.sort(csv_data, order='raw_sizes')['compression_ratios']
csv_data_max_errors = numpy.sort(csv_data, order='max_errors')['max_errors']
csv_data_max_errors_by_raw_size = numpy.sort(csv_data, order='raw_sizes')['max_errors']
csv_data_ratio_by_max_error = numpy.sort(csv_data, order='max_errors')['compression_ratios']
csv_data_durations = numpy.sort(csv_data, order='durations')['durations']
output_csv_data_ratios.append(csv_data_ratios)
output_csv_data_ratios_by_raw_size.append(csv_data_ratios_by_raw_size)
output_csv_data_max_errors.append(csv_data_max_errors)
output_csv_data_max_errors_by_raw_size.append(csv_data_max_errors_by_raw_size)
output_csv_data_ratio_vs_max_error.append(csv_data_max_errors)
output_csv_data_ratio_vs_max_error.append(csv_data_ratio_by_max_error)
output_csv_data_durations.append(csv_data_durations)
output_csv_headers.append(entry['header'])
output_csv_data_ratios = numpy.column_stack(output_csv_data_ratios)
output_csv_data_ratios_by_raw_size = numpy.column_stack(output_csv_data_ratios_by_raw_size)
output_csv_data_max_errors = numpy.column_stack(output_csv_data_max_errors)
output_csv_data_max_errors_by_raw_size = numpy.column_stack(output_csv_data_max_errors_by_raw_size)
output_csv_data_ratio_vs_max_error = numpy.column_stack(output_csv_data_ratio_vs_max_error)
output_csv_data_durations = numpy.column_stack(output_csv_data_durations)
with open(output_csv_file_path_ratios, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_ratios, delimiter=',', fmt=('%f'))
with open(output_csv_file_path_ratios_by_raw_size, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_ratios_by_raw_size, delimiter=',', fmt=('%f'))
with open(output_csv_file_path_max_errors, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_max_errors, delimiter=',', fmt=('%f'))
with open(output_csv_file_path_max_errors_by_raw_size, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_max_errors_by_raw_size, delimiter=',', fmt=('%f'))
with open(output_csv_file_path_ratio_vs_max_error, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_ratio_vs_max_error, delimiter=',', fmt=('%f'))
with open(output_csv_file_path_durations, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data_durations, delimiter=',', fmt=('%f'))
| {
"content_hash": "c55754b808bbb5590a35448480461c93",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 123,
"avg_line_length": 43.7906976744186,
"alnum_prop": 0.6909187466808284,
"repo_name": "nfrechette/acl",
"id": "49611c5c6e4db501534ed257e2b9053fe90a441e",
"size": "5649",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/graph_generation/gen_summary_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1780"
},
{
"name": "C",
"bytes": "4894"
},
{
"name": "C++",
"bytes": "2144431"
},
{
"name": "CMake",
"bytes": "42607"
},
{
"name": "Java",
"bytes": "2871"
},
{
"name": "Python",
"bytes": "131416"
},
{
"name": "Shell",
"bytes": "3679"
}
],
"symlink_target": ""
} |
class LoginResult(object):
"""
Enum class so everyone can agree on a common language for logging in
"""
LOGGED_IN = 0
NO_SUCH_USER = 1
INCORRECT_PASSWORD = 2
| {
"content_hash": "5f2cadef217dc0277fd15b0895e5a670",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 72,
"avg_line_length": 26,
"alnum_prop": 0.6373626373626373,
"repo_name": "pbraunstein/trackercise",
"id": "06395d1f4cfca1daf4b6869607763859383f4973",
"size": "182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/brain/user_management/login_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16360"
},
{
"name": "JavaScript",
"bytes": "2356"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "141017"
},
{
"name": "Shell",
"bytes": "5507"
},
{
"name": "TypeScript",
"bytes": "43836"
}
],
"symlink_target": ""
} |
__author__ = "Simone Campagna"
__all__ = ['COLORMAPS']
from mayavi.core.lut_manager import lut_mode_list
COLORMAPS = lut_mode_list()
| {
"content_hash": "0b7e287d4487775739cf0719383d7fc3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 49,
"avg_line_length": 19.428571428571427,
"alnum_prop": 0.6838235294117647,
"repo_name": "simone-campagna/rubik",
"id": "9f34f349527e26d53e607d1632d4350dd85d61e2",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rubik/visualizer/impl/mayavi_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "546787"
}
],
"symlink_target": ""
} |
import datetime
import warnings
from sys import getsizeof
import numpy as np
from pandas._libs import algos as libalgos, index as libindex, lib, Timestamp
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.dtypes.dtypes import (
ExtensionDtype, PandasExtensionDtype)
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_platform_int,
is_categorical_dtype,
is_object_dtype,
is_hashable,
is_iterator,
is_list_like,
pandas_dtype,
is_scalar)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.io.formats.printing import pprint_thing
from pandas.core.config import get_option
from pandas.core.indexes.base import (
Index, _ensure_index,
InvalidIndexError,
_index_shared_docs)
from pandas.core.indexes.frozen import (
FrozenNDArray, FrozenList, _ensure_frozen)
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype('object') << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat)
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex
Index : The base pandas Index type
Attributes
----------
names
levels
labels
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
set_levels
set_labels
to_hierarchical
to_frame
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
dtype=None, copy=False, name=None,
verify_integrity=True, _set_identity=True):
# compat with Index
if name is not None:
names = name
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, labels=None, levels=None):
"""
Parameters
----------
labels : optional list
Labels to check for validity. Defaults to current labels.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and labels don't match, if any label would
exceed level bounds, or there are any duplicate levels.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels = labels or self.labels
levels = levels or self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" %
([len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
if not level.is_unique:
raise ValueError("Level values must be unique: {values} on "
"level {level}".format(
values=[value for value in level],
level=i))
@property
def levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
@property
def labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(
_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for lev_idx, lab in zip(level, labels):
lev = self.levels[lev_idx]
new_labels[lev_idx] = _ensure_frozen(
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
if verify_integrity:
self._verify_integrity(labels=new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if labels is None:
labels = deepcopy(self.labels)
else:
if levels is None:
levels = self.levels
if labels is None:
labels = self.labels
return MultiIndex(levels=levels, labels=labels, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
labels=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError):
return False
contains = __contains__
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop('names', kwargs.pop('name', self.names))
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.labels)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('labels', ibase.default_pprint(self._labels,
max_seq_items=False))]
if com._any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_labels = []
# go through the levels and format them
for level, label in zip(self.levels, self.labels):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (label == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
label = label.values()
label[mask] = nan_index
new_levels.append(level)
new_labels.append(label)
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names,
sortorder=self.sortorder, verify_integrity=False)
return mi.values
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.labels[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
labels, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_labels, uniques = algos.factorize(indexer[mask],
sort=True)
labels = np.empty(len(indexer), dtype=indexer.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(labels)
return grouper, labels, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype('uint64')
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.labels, offsets)
return MultiIndexUIntEngine(self.levels, self.labels, offsets)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals.get_values()
if (isinstance(vals.dtype, (PandasExtensionDtype, ExtensionDtype))
or hasattr(vals, '_box_values')):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Parameters
----------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple([f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels)])
return hash_tuple(key)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [label == -1 for label in self.labels]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_labels = [label[~indexer] for label in self.labels]
return self.copy(labels=new_labels, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = com._values_from_object(series)
k = com._values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
values = self.levels[level]
labels = self.labels[level]
if unique:
labels = algos.unique(labels)
filled = algos.take_1d(values._values, labels,
fill_value=values._na_value)
values = values._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is None:
return super(MultiIndex, self).unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
.. versionadded:: 0.20.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
"""
from pandas import DataFrame
result = DataFrame({(name or level):
self._get_level_values(level)
for name, level in
zip(self.names, range(len(self.levels)))},
copy=False)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [_ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if not is_list_like(arrays):
raise TypeError("Input must be a list / sequence of array-likes.")
elif is_iterator(arrays):
arrays = list(arrays)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.arrays.categorical import _factorize_from_iterables
labels, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if not is_list_like(tuples):
raise TypeError('Input must be a list / sequence of tuple-likes.')
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.arrays.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels, labels, sortorder=sortorder, names=names)
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(new_levels, new_labels,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
create a new MultiIndex from the current that removing
unused levels, meaning that they are not expressed in the labels
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
labels=[[0, 0], [0, 1]])
"""
new_levels = []
new_labels = []
changed = False
for lev, lab in zip(self.levels, self.labels):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(lab + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "lab" when all items are found:
uniques = algos.unique(lab)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# labels get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
label_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
label_mapping[uniques] = np.arange(len(uniques)) - has_na
lab = label_mapping[lab]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_labels.append(lab)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_labels(new_labels, validate=False)
return result
@property
def nlevels(self):
"""Integer number of levels in this MultiIndex."""
return len(self.levels)
@property
def levshape(self):
"""A tuple with the length of each level."""
return tuple(len(x) for x in self.levels)
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
labels=[label for label in self.labels],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.labels, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, labels=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.labels]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(FrozenNDArray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.labels]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(repeats)
for label in self.labels], names=self.names,
sortorder=self.sortorder, verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
def drop(self, labels, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
except Exception:
pass
inds = []
for label in labels:
try:
loc = self.get_loc(label)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~algos.isin(self.labels[i], values)
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi.swaplevel(0, 1)
MultiIndex(levels=[['bb', 'aa'], ['a', 'b']],
labels=[[0, 1, 0, 1], [0, 0, 1, 1]])
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.arrays import Categorical
def cats(label):
return np.arange(np.array(label).max() + 1 if len(label) else 0,
dtype=label.dtype)
return [Categorical.from_codes(label, cats(label), ordered=True)
for label in self.labels]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.labels[lev] for lev in level],
orders=ascending)
# level ordering
else:
labels = list(self.labels)
shape = list(self.levshape)
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = _ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return _ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._engine.get_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
indexer = self._engine.get_indexer(target)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = _ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise Exception("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lex-sorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
Notes
------
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
See also
--------
Index.get_loc : get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
# _get_level_indexer returns an empty slice if the key has
# been dropped from the MultiIndex
if isinstance(loc, slice) and loc.start == loc.stop:
raise KeyError(key)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
return (self._engine.get_loc(key), None)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
labels = self.labels[level]
def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
# given the inputs and the labels/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(labels):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = labels.take(_ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(labels), dtype=bool)
m[np.in1d(labels, r,
assume_unique=Index(labels).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if isinstance(loc, slice):
return loc
elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a given label/slice/list/mask or a sequence of such as
an array of integers.
Parameters
----------
seq : label/slice/list/mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
locs : array of integers suitable for passing to iloc
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b')
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']])
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')])
array([2], dtype=int64)
See also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
"""
from .numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
return indexer._ndarray_values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com._values_from_object(_ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
slabels = self.labels[i]
slabels = slabels[slabels != -1]
svalues = algos.take_nd(np.asarray(self.levels[i]._values),
slabels, allow_fill=False)
olabels = other.labels[i]
olabels = olabels[olabels != -1]
ovalues = algos.take_nd(
np.asarray(other.levels[i]._values),
olabels, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(svalues) == 0 and len(ovalues) == 0:
continue
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
uniq_tuples = lib.fast_unique_multiple([self._ndarray_values,
other._ndarray_values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._ndarray_values
other_tuples = other._ndarray_values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=self.levels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=self.levels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self._ndarray_values) -
set(other._ndarray_values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = '> 1 ndim Categorical are not supported at this time'
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
msg = ('Setting {cls} dtype to anything other than object '
'is not supported').format(cls=self.__class__)
raise TypeError(msg)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return sum(name == n for n in self.names) > 1
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
| {
"content_hash": "3bf263d590f8cbb456bda5462b872772",
"timestamp": "",
"source": "github",
"line_count": 2964,
"max_line_length": 79,
"avg_line_length": 35.47402159244265,
"alnum_prop": 0.5303152789005658,
"repo_name": "ryfeus/lambda-packs",
"id": "9a4aa15f4cc25d46d5743626a6e0975bb3b13105",
"size": "105182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tensorflow_Pandas_Numpy/source3.6/pandas/core/indexes/multi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from PyQt5.QtCore import QObject
from duniterpy.api import errors
from sakia.errors import NoPeerAvailable
from sakia.data.processors import IdentitiesProcessor, ContactsProcessor
import logging
class SearchUserModel(QObject):
"""
The model of Navigation component
"""
def __init__(self, parent, app):
"""
:param sakia.gui.search_user.controller.NetworkController parent: the controller
:param sakia.app.Application app: the app
"""
super().__init__(parent)
self.app = app
self.identities_processor = IdentitiesProcessor.instanciate(app)
self.contacts_processor = ContactsProcessor.instanciate(app)
self._nodes = list()
self._current_identity = None
def contacts(self):
return self.contacts_processor.contacts()
def identity(self):
"""
Get current identity selected
:rtype: sakia.core.registry.Identity
"""
return self._current_identity
def user_nodes(self):
"""
Gets user nodes
:return:
"""
return [n.uid for n in self._nodes]
async def find_user(self, text):
"""
Search for a user
:param text:
:return:
"""
try:
self._nodes = await self.identities_processor.lookup(self.app.currency, text)
except errors.DuniterError as e:
if e.ucode == errors.NO_MATCHING_IDENTITY:
self._nodes = list()
else:
logging.debug(str(e))
except NoPeerAvailable as e:
logging.debug(str(e))
except BaseException as e:
logging.debug(str(e))
def select_identity(self, index):
"""
Select an identity from a node index
:param index:
:return:
"""
if index < 0 or index >= len(self._nodes):
self._current_identity = None
return False
self._current_identity = self._nodes[index]
return True
def clear(self):
self._current_identity = None
self._nodes = list() | {
"content_hash": "0f93eea7cb6c2e862fe3a27a566b54a0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 89,
"avg_line_length": 27.82894736842105,
"alnum_prop": 0.5839243498817966,
"repo_name": "ucoin-io/cutecoin",
"id": "2afcabe9bf9865f7e9439bdf60bebc083fca113e",
"size": "2115",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sakia/gui/sub/search_user/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2475"
},
{
"name": "JavaScript",
"bytes": "1594"
},
{
"name": "PowerShell",
"bytes": "3111"
},
{
"name": "Python",
"bytes": "718811"
},
{
"name": "Shell",
"bytes": "3983"
}
],
"symlink_target": ""
} |
import sys
import magicbot
class Injectable:
def __init__(self, num):
self.num = num
def test_simple_inject():
class Component1:
intvar = int
tupvar = tuple
injectable = Injectable
def execute(self):
pass
class Component2:
tupvar = tuple
component1 = Component1
def execute(self):
pass
class Bot(magicbot.MagicRobot):
intvar = 1
tupvar = (1,2)
component1 = Component1
component2 = Component2
def createObjects(self):
self.injectable = Injectable(42)
bot = Bot()
bot.robotInit()
assert bot.component1.intvar == 1
assert isinstance(bot.component1.injectable, Injectable)
assert bot.component1.injectable.num == 42
assert bot.component2.tupvar == (1,2)
assert bot.component2.component1 is bot.component1
def test_multilevel_inject():
class DuplicateComponent:
var = tuple
injectable = Injectable
def execute(self):
pass
class Bot(magicbot.MagicRobot):
dup1 = DuplicateComponent
dup1_var = (1,2)
dup2 = DuplicateComponent
dup2_var = (3,4)
dup3 = DuplicateComponent
dup3_var = (5,6)
def createObjects(self):
self.injectable = Injectable(42)
bot = Bot()
bot.robotInit()
assert bot.dup1.var == (1,2)
assert bot.dup2.var == (3,4)
assert bot.dup3.var == (5,6)
def test_inherited_inject():
class SuperComponent:
intvar = int
def execute(self):
pass
class Component(SuperComponent):
tupvar = tuple
def execute(self):
pass
class Bot(magicbot.MagicRobot):
component = Component
def createObjects(self):
self.intvar = 1
self.tupvar = (1, 2)
bot = Bot()
bot.robotInit()
assert bot.component.tupvar == (1, 2)
assert bot.component.intvar == 1
def test_botinherit_inject():
class Component:
def execute(self):
pass
class BotBase(magicbot.MagicRobot):
component_a = Component
def createObjects(self):
pass
class Bot(BotBase):
component_b = Component
bot = Bot()
bot.robotInit()
assert isinstance(bot.component_a, Component)
assert isinstance(bot.component_b, Component)
assert bot.component_a is not bot.component_b
# Variable annotations are a Python 3.6+ feature
if sys.version_info >= (3, 6):
def test_simple_annotation_inject():
from magicbot_annotation_tester import SimpleBot, Injectable
bot = SimpleBot()
bot.robotInit()
assert bot.component1.intvar == 1
assert isinstance(bot.component1.injectable, Injectable)
assert bot.component1.injectable.num == 42
assert bot.component2.tupvar == (1, 2)
assert bot.component2.component1 is bot.component1
def test_multilevel_annotation_inject():
from magicbot_annotation_tester import MultilevelBot
bot = MultilevelBot()
bot.robotInit()
assert bot.dup1 is not bot.dup2
assert bot.dup1.var == (1, 2)
assert bot.dup2.var == (3, 4)
assert bot.dup3.var == (5, 6)
def test_inherited_annotation_inject():
from magicbot_annotation_tester import InheritBot
bot = InheritBot()
bot.robotInit()
assert bot.component.tupvar == (1, 2)
assert bot.component.intvar == 1
def test_botinherit_annotation_inject():
from magicbot_annotation_tester import InheritedBot, DumbComponent
bot = InheritedBot()
bot.robotInit()
assert isinstance(bot.component_a, DumbComponent)
assert isinstance(bot.component_b, DumbComponent)
assert bot.component_a is not bot.component_b
def test_typehintedbot():
from magicbot_annotation_tester import TypeHintedBot, DumbComponent
bot = TypeHintedBot()
bot.robotInit()
assert isinstance(bot.component, DumbComponent)
assert bot.some_int == 1
assert bot.some_float == 0.5
def test_typehints_inject():
from magicbot_annotation_tester import TypeHintsBot, TypeHintedComponent, Injectable
bot = TypeHintsBot()
bot.robotInit()
assert isinstance(bot.component, TypeHintedComponent)
assert bot.component.some_int == 1
assert isinstance(bot.component.injectable, Injectable)
assert bot.component.injectable.num == 42
| {
"content_hash": "054b65bda12ed3c52254de7e61a8d770",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 92,
"avg_line_length": 24.158163265306122,
"alnum_prop": 0.5978880675818374,
"repo_name": "Twinters007/robotpy-wpilib-utilities",
"id": "72c9649a95d92d09fcbdef45146ecab0d46db030",
"size": "4735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_magicbot_injection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "250056"
},
{
"name": "Shell",
"bytes": "957"
}
],
"symlink_target": ""
} |
import sys
# Local imports
from seisflows.tools import msg
from seisflows.tools import unix
from seisflows.config import ParameterError, custom_import
from seisflows.workflow.base import base
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
optimize = sys.modules['seisflows_optimize']
class thrifty_inversion(custom_import('workflow', 'inversion')):
""" Thrifty inversion subclass
Provides savings over conventional inversion by carrying over forward
simulations from line search
The results of 'inversion' and 'thrifty_inversion' should be exactly the
same
"""
status = 0
def initialize(self):
if self.status == 0:
super(thrifty_inversion, self).initialize()
def clean(self):
# can forward simulations from line search be carried over?
self.update_status()
if self.status == 1:
unix.rm(PATH.GRAD)
unix.mv(PATH.FUNC, PATH.GRAD)
unix.mkdir(PATH.FUNC)
else:
super(thrifty_inversion, self).clean()
def update_status(self):
if PAR.LINESEARCH != 'Backtrack':
# only works for backtracking line search
self.status = 0
elif optimize.iter == PAR.BEGIN or optimize.restarted:
# even if backtracking line search is chosen, may not work on
# first iteration or following a restart
self.status = 0
elif optimize.iter == PAR.END:
# may not work after resuming saved workflow
self.status = 0
elif PATH.LOCAL:
# may not work if using local filesystems
self.status = 0
else:
self.status = 1
| {
"content_hash": "89c72fe1537aba85f5a0bfc4786c53ba",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 28.37704918032787,
"alnum_prop": 0.6302715193529752,
"repo_name": "rmodrak/seisflows",
"id": "d4814df4fbc67e901dbee4cd915794ca603bcbc7",
"size": "1881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seisflows/workflow/thrifty_inversion.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "298858"
},
{
"name": "Shell",
"bytes": "3799"
}
],
"symlink_target": ""
} |
from pyfbsdk import *
def CopyBlendShapeAnimation(srcmodel, dstmodel):
for lSrcProp in srcmodel.PropertyList:
if lSrcProp.IsAnimatable() and lSrcProp.GetDataTypeName().lower()=="shape":
lDstProp = dstmodel.PropertyList.Find( lSrcProp.Name )
if lDstProp:
lDstProp.SetAnimated(True)
lDstProp.GetAnimationNode().FCurve.KeyReplaceBy(lSrcProp.GetAnimationNode().FCurve)
def main():
models = FBModelList()
FBGetSelectedModels(models)
if len(models) == 2:
FBUndoManager().Undo(True)
models2 = FBModelList()
FBGetSelectedModels(models2)
if len(models2) == 1:
srcmodel = models2[0]
dstmodel = None
if srcmodel == models[0]:
dstmodel = models[1]
else:
dstmodel = models[0]
print srcmodel.Name
print dstmodel.Name
CopyBlendShapeAnimation(srcmodel, dstmodel)
#
main() | {
"content_hash": "c2794d143948ff1a06a5ba2d09f3dc58",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 99,
"avg_line_length": 26.023255813953487,
"alnum_prop": 0.5308310991957105,
"repo_name": "Neill3d/MoPlugs",
"id": "53b2b8f381e7a4fa765e646957b65dfc8fbab913",
"size": "1293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonScripts/Actions/CopyBlendShapeAnimation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "93535"
},
{
"name": "C#",
"bytes": "388924"
},
{
"name": "C++",
"bytes": "5588932"
},
{
"name": "GLSL",
"bytes": "395995"
},
{
"name": "Inno Setup",
"bytes": "12223"
},
{
"name": "Python",
"bytes": "514876"
}
],
"symlink_target": ""
} |
"""make dictionary of reverse descriptions"""
import sys
import os
import re
import shutil
import StringIO
import simplejson
import gzip
import tempfile
from optparse import OptionParser
from normalize_pdb import normalize_pdb
from itertools import combinations
from utils import *
CMD_PYTHON="/usr/bin/python"
if os.path.isfile("/opt/local/bin/python2.7"):
CMD_PYTHON = "/opt/local/bin/python2.7"
def parse_args():
"""setup program options parsing"""
parser = OptionParser(description="create JSON dictionary for reverse descriptions")
parser.add_option("-o", "--output-json", dest="output_json",
help="save dictionary to JSON file", metavar="DIR")
parser.add_option("-i", "--input", dest="input",
help="process given PDB files", metavar="FILE")
(options, args) = parser.parse_args()
return (parser, options, args)
def handle(options):
inp_fn = options.input
res_map = inp_fn.replace(".pdb.gz",".json.gz")
res = {}
(_fd,tmp_fn) = tempfile.mkstemp(".make-rev-dict.json")
for prg,prg_name in (('RV','rnaview'),('MC','mc-annotate'),('MO','moderna'),('FR','fr3d')):
for t in ('bp','stackings'):
if t=='bp' and prg=='MO':
continue
cmd = CMD_PYTHON + " ./extract-contacts.py --use-%(prg_name)s -i '%(inp_fn)s' --no-pdb --output-json=%(tmp_fn)s" % locals()
if t=='stackings':
cmd += " --extract-stackings"
if prg=='FR':
cmd += " --dont-normalize --residue-mapping=%(res_map)s" % locals()
os.system(cmd + "> /dev/null")
if os.path.exists(tmp_fn):
doublets = load_json(tmp_fn)
else:
print "missing doublets dict!"
doublets = []
os.unlink(tmp_fn)
if prg!='FR':
os.system(cmd+" --reverse-pdb > /dev/null")
rev_doublets = load_json(tmp_fn)
os.unlink(tmp_fn)
else:
rev_doublets = []
descriptions = {}
for d in doublets+rev_doublets:
id1 = d['resseq_1']
id2 = d['resseq_2']
if id1==id2:
continue
desc = d['desc']
key = (id1,id2)
if not descriptions.has_key(key):
descriptions[key] = desc
for (id1,id2),desc in descriptions.items():
rev_key = (id2,id1)
if descriptions.has_key(rev_key):
rev_desc = descriptions[rev_key]
key1 = "reverse/%s/%s/%s" % (prg, desc, rev_desc)
key2 = "reverse/%s/%s/%s" % (prg, rev_desc, desc)
if not res.has_key(key1):
res[key1] = 0
if not res.has_key(key2):
res[key2] = 0
res[key1] += 1
res[key2] += 1
save_json(options.output_json, res)
def main():
(parser, options, args) = parse_args()
if not options.input:
print "select input file"
parser.print_help()
exit(1)
if not options.output_json:
print "select output"
parser.print_help()
exit(1)
handle(options)
if __name__ == '__main__':
main()
| {
"content_hash": "cc745462d37c3b2e674897cad8b50764",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 135,
"avg_line_length": 32.92156862745098,
"alnum_prop": 0.5122096486003573,
"repo_name": "m4rx9/rna-pdb-tools",
"id": "9e41e7992bce23ca23385aa0e36022092e906f65",
"size": "3380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rna_tools/tools/clarna_play/ClaRNAlib/make-reverse-descriptions-dict.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34107"
},
{
"name": "Shell",
"bytes": "1130"
}
],
"symlink_target": ""
} |
import os
from uliweb import settings
from uliweb.contrib.upload import FileServing, UUIDFilenameConverter
from uliweb.orm import get_model
from uliweb.i18n import ugettext as _
class DBFileServing(FileServing):
def __init__(self, default_filename_converter_cls=UUIDFilenameConverter):
super(DBFileServing, self).__init__(default_filename_converter_cls=default_filename_converter_cls)
self.model = get_model('dbuploadfiles')
def get_filename(self, filename, filesystem=False, convert=False):
"""
Get the filename according to self.to_path, and if filesystem is False
then return unicode filename, otherwise return filesystem encoded filename
@param filename: relative filename, it'll be combine with self.to_path
@param filesystem: if True, then encoding the filename to filesystem
@param convert: if True, then convert filename with FilenameConverter class
"""
from uliweb.utils.common import safe_unicode
#make sure the filename is unicode
s = settings.GLOBAL
if convert:
_p, _f = os.path.split(filename)
_filename = os.path.join(_p, self.filename_convert(filename))
else:
_filename = filename
nfile = safe_unicode(_filename, s.HTMLPAGE_ENCODING)
return nfile
def get_file_record(self, fileid):
from uliweb import error
obj = self.model.get(self.model.c.slug==fileid)
if not obj:
error(_('File %s is not found!') % fileid)
return obj
def download(self, filename, action=None, x_filename='', real_filename=''):
"""
action will be "download", "inline"
and if the request.GET has 'action', then the action will be replaced by it.
"""
from uliweb.utils.common import safe_str
from uliweb.utils.filedown import filedown
from uliweb import request
from StringIO import StringIO
from uliweb.utils import files
action = request.GET.get('action', action)
fname = safe_str(filename)
if not x_filename:
x_filename = fname
if self.x_file_prefix:
x_filename = os.path.normpath(os.path.join(self.x_file_prefix, x_filename)).replace('\\', '/')
if not real_filename:
#if not real_filename, then get the file info from database
obj = self.get_file_record(filename)
fname = obj.filename.encode('utf8')
fileobj = StringIO(obj.content), obj.create_time, obj.size
#fileobj should be (filename, mtime, size)
else:
fileobj = None
s = settings.GLOBAL
real_filename = files.encode_filename(real_filename, to_encoding=s.FILESYSTEM_ENCODING)
return filedown(request.environ, fname, action=action,
x_sendfile=bool(self.x_sendfile), x_header_name=self.x_header_name,
x_filename=x_filename, real_filename=real_filename, fileobj=fileobj)
def save_file(self, filename, fobj, replace=False, convert=True):
path, _f = os.path.split(filename)
#get full path and converted filename
fname = self.get_filename(_f, True, convert=convert)
#save file to database
text = fobj.read()
obj = self.model(filename=_f, content=text, size=len(text), slug=fname, path=path)
obj.save()
return fname
def save_file_field(self, field, replace=False, filename=None):
filename = filename or field.data.filename
fname = self.save_file(filename, field.data.file, replace)
field.data.filename = fname
return fname
def save_image_field(self, field, resize_to=None, replace=False, filename=None):
from uliweb.utils.image import resize_image
if resize_to:
field.data.file = resize_image(field.data.file, resize_to)
filename = filename or field.data.filename
fname = self.save_file(filename, field.data.file, replace)
field.data.filename = fname
return fname
def delete_filename(self, filename):
obj = self.get_file_record(filename)
obj.delete()
def get_url(self, filename, query_para=None, **url_args):
"""
Return <a href="filename" title="filename"> tag
You should pass title and text to url_args, if not pass, then using filename
"""
from uliweb.core.html import Tag
obj = self.get_file_record(filename)
title = url_args.pop('title', obj.filename)
text = url_args.pop('text', title)
query_para = query_para or {}
return str(Tag('a', title, href=self.get_href(filename, **query_para), **url_args))
| {
"content_hash": "3ad50cf03da07c0d28d5906adde2b463",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 106,
"avg_line_length": 41.228813559322035,
"alnum_prop": 0.6197327852004111,
"repo_name": "wwfifi/uliweb",
"id": "aa20cd9d37c0f7741ad73f247ea10eec736c433f",
"size": "4865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uliweb/contrib/dbupload/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4614"
},
{
"name": "HTML",
"bytes": "51427"
},
{
"name": "JavaScript",
"bytes": "3450"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "940"
},
{
"name": "Python",
"bytes": "2029810"
},
{
"name": "Shell",
"bytes": "684"
},
{
"name": "Smarty",
"bytes": "698"
}
],
"symlink_target": ""
} |
import codecs
from os import environ, path
from sys import argv, path as sys_path
from setuptools import find_packages, setup
import versioneer
data_files = []
deps = [
"requests>=2.26.0,<3.0",
"isodate",
"lxml>=4.6.3",
"websocket-client>=0.58.0",
# Support for SOCKS proxies
"PySocks!=1.5.7,>=1.5.6",
]
# for encrypted streams
if environ.get("STREAMLINK_USE_PYCRYPTO"):
deps.append("pycrypto")
else:
# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
deps.append("pycryptodome>=3.4.3,<4")
# for localization
if environ.get("STREAMLINK_USE_PYCOUNTRY"):
deps.append("pycountry")
else:
deps.append("iso-639")
deps.append("iso3166")
# When we build an egg for the Win32 bootstrap we don"t want dependency
# information built into it.
if environ.get("NO_DEPS"):
deps = []
this_directory = path.abspath(path.dirname(__file__))
srcdir = path.join(this_directory, "src/")
sys_path.insert(0, srcdir)
with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
long_description = f.read()
def is_wheel_for_windows():
if "bdist_wheel" in argv:
names = ["win32", "win-amd64", "cygwin"]
length = len(argv)
for pos in range(argv.index("bdist_wheel") + 1, length):
if argv[pos] == "--plat-name" and pos + 1 < length:
return argv[pos + 1] in names
elif argv[pos][:12] == "--plat-name=":
return argv[pos][12:] in names
return False
entry_points = {
"console_scripts": ["streamlink=streamlink_cli.main:main"]
}
if is_wheel_for_windows():
entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
additional_files = [
("share/man/man1", ["docs/_build/man/streamlink.1"])
]
for destdir, srcfiles in additional_files:
files = []
for srcfile in srcfiles:
if path.exists(srcfile):
files.append(srcfile)
if files:
data_files.append((destdir, files))
setup(name="streamlink",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Streamlink is a command-line utility that extracts streams "
"from various services and pipes them into a video player of "
"choice.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/streamlink/streamlink",
project_urls={
"Documentation": "https://streamlink.github.io/",
"Tracker": "https://github.com/streamlink/streamlink/issues",
"Source": "https://github.com/streamlink/streamlink",
"Funding": "https://opencollective.com/streamlink"
},
author="Streamlink",
# temp until we have a mailing list / global email
author_email="streamlink@protonmail.com",
license="Simplified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"streamlink.plugins": [".removed"]},
entry_points=entry_points,
data_files=data_files,
install_requires=deps,
test_suite="tests",
python_requires=">=3.6, <4",
classifiers=["Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia :: Video",
"Topic :: Utilities"])
| {
"content_hash": "d4c10a0baf73b4a2a69c0e2e4da0eadc",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 96,
"avg_line_length": 33.6910569105691,
"alnum_prop": 0.5912162162162162,
"repo_name": "melmorabity/streamlink",
"id": "6f79f045db816ac2566fca4ee9407efd075611ff",
"size": "4166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1537432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
import os
from lxml import etree
from shapely.wkt import loads
from operator import attrgetter
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.db.models.signals import post_save
from sorl.thumbnail.fields import ImageWithThumbnailsField
from djangoratings.fields import RatingField
class Tag(models.Model):
tag_name = models.CharField(max_length=150)
def __unicode__(self):
return '%s' % self.tag_name
class Meta:
ordering = ['tag_name']
class DataType(models.Model):
data_type = models.CharField(max_length=50)
def __unicode__(self):
return '%s' % self.data_type
class Meta:
ordering = ['data_type']
class UrlType(models.Model):
url_type = models.CharField(max_length=50)
def __unicode__(self):
return '%s' % self.url_type
class Meta:
ordering = ['url_type']
class UpdateFrequency(models.Model):
update_frequency = models.CharField(max_length=50)
def __unicode__(self):
return '%s' % self.update_frequency
class Meta:
ordering = ['update_frequency']
class CoordSystem(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
EPSG_code = models.IntegerField(blank=True, help_text="Official EPSG code, numbers only")
def __unicode__(self):
return '%s, %s' % (self.EPSG_code, self.name)
class Meta:
ordering = ['EPSG_code']
verbose_name = 'Coordinate system'
class Resource(models.Model):
@classmethod
def search(cls, qs = None, objs = None):
if objs == None:
objs = cls.objects.filter(is_published = True)
if qs:
objs = objs.filter(Q(name__icontains=qs) | Q(description__icontains=qs) | Q(organization__icontains=qs) | Q(division__icontains=qs))
return objs
def save(self, *args, **kwargs):
if not self.pk:
super(Resource, self).save(*args, **kwargs)
self.csw_xml = self.gen_csw_xml()
self.csw_anytext = self.gen_csw_anytext()
super(Resource, self).save(*args, **kwargs)
# Basic Info
name = models.CharField(max_length=255)
short_description = models.CharField(max_length=255)
release_date = models.DateField(blank=True, null=True)
time_period = models.CharField(max_length=50, blank=True)
organization = models.CharField(max_length=255)
division = models.CharField(max_length=255, blank=True)
usage = models.TextField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
data_types = models.ManyToManyField(DataType, blank=True, null=True)
# More Info
description = models.TextField()
contact_phone = models.CharField(max_length=50, blank=True)
contact_email = models.CharField(max_length=255, blank=True)
contact_url = models.CharField(max_length=255, blank=True)
updates = models.ForeignKey(UpdateFrequency, null=True, blank=True)
area_of_interest = models.CharField(max_length=255, blank=True)
is_published = models.BooleanField(default=True, verbose_name="Public")
created_by = models.ForeignKey(User, related_name='created_by')
last_updated_by = models.ForeignKey(User, related_name='updated_by')
created = models.DateTimeField()
last_updated = models.DateTimeField(auto_now=True)
metadata_contact = models.CharField(max_length=255, blank=True)
metadata_notes = models.TextField(blank=True)
coord_sys = models.ManyToManyField(CoordSystem, blank=True, null=True, verbose_name="Coordinate system")
rating = RatingField(range=5, can_change_vote=True)
update_frequency = models.CharField(max_length=255, blank=True)
data_formats = models.CharField(max_length=255, blank=True)
proj_coord_sys = models.CharField(max_length=255, blank=True, verbose_name="Coordinate system")
# CSW specific properties
wkt_geometry = models.TextField(blank=True)
csw_typename = models.CharField(max_length=200,default="csw:Record")
csw_schema = models.CharField(max_length=200,default="http://www.opengis.net/cat/csw/2.0.2")
csw_mdsource = models.CharField(max_length=100,default="local")
csw_xml = models.TextField(blank=True)
csw_anytext = models.TextField(blank=True)
def get_distinct_url_types(self):
types = []
for url in self.url_set.all():
if url.url_type not in types:
types.append(url.url_type)
return sorted(types, key=attrgetter('url_type'))
def get_grouped_urls(self):
urls = {}
for utype in UrlType.objects.all():
urls[utype.url_type] = self.url_set.filter(url_type=utype)
return urls
def get_first_image(self):
images = UrlImage.objects.filter(url__resource=self)
if images.count() == 0:
return None
return images[0]
def get_images(self):
images = UrlImage.objects.filter(url__resource=self)
if images.count() == 0:
return None
return images
def get_absolute_url(self):
slug = slugify(self.name)
return "/opendata/resource/%i/%s" % (self.id, slug)
def __unicode__(self):
return '%s' % self.name
# CSW specific properties
@property
def csw_identifier(self):
if not settings.SITEHOST:
raise RuntimeError('settings.SITEHOST is not set')
fqrhn = '.'.join((reversed(settings.SITEHOST.split('.'))))
return 'urn:x-odc:resource:%s::%d' % (fqrhn, self.id)
@property
def csw_type(self):
data_types = self.data_types.values()
if len(data_types) > 0:
return data_types[0]['data_type']
return None
@property
def csw_crs(self):
crs = self.coord_sys.values()
if len(crs) > 0:
return crs[0]['name']
return None
@property
def csw_links(self):
links = []
for url in self.url_set.all():
tmp = '%s,%s,%s,%s' % (url.url_label, url.url_type.url_type, 'WWW:DOWNLOAD-1.0-http--download', url.url)
links.append(tmp)
abs_url = '%s%s' % (gen_website_url(), self.get_absolute_url())
link = '%s,%s,%s,%s' % (self.name, self.name, 'WWW:LINK-1.0-http--link', abs_url)
links.append(link)
return '^'.join(links)
@property
def csw_keywords(self):
keywords = []
for keyword in self.tags.values():
keywords.append(keyword['tag_name'])
return ','.join(keywords)
@property
def csw_creator(self):
creator = User.objects.filter(username=self.created_by)[0]
return '%s %s' % (creator.first_name, creator.last_name)
def gen_csw_xml(self):
def nspath(ns, element):
return '{%s}%s' % (ns, element)
nsmap = {
'csw': 'http://www.opengis.net/cat/csw/2.0.2',
'dc' : 'http://purl.org/dc/elements/1.1/',
'dct': 'http://purl.org/dc/terms/',
'ows': 'http://www.opengis.net/ows',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
}
record = etree.Element(nspath(nsmap['csw'], 'Record'), nsmap=nsmap)
etree.SubElement(record, nspath(nsmap['dc'], 'identifier')).text = self.csw_identifier
etree.SubElement(record, nspath(nsmap['dc'], 'title')).text = self.name
if self.csw_type is not None:
etree.SubElement(record, nspath(nsmap['dc'], 'type')).text = self.csw_type
for tag in self.tags.all():
etree.SubElement(record, nspath(nsmap['dc'], 'subject')).text = tag.tag_name
etree.SubElement(record, nspath(nsmap['dc'], 'format')).text = str(self.data_formats)
abs_url = '%s%s' % (gen_website_url(), self.get_absolute_url())
etree.SubElement(record, nspath(nsmap['dct'], 'references'), scheme='WWW:LINK-1.0-http--link').text = abs_url
for link in self.url_set.all():
etree.SubElement(record, nspath(nsmap['dct'], 'references'),
scheme='WWW:DOWNLOAD-1.0-http--download').text = link.url
etree.SubElement(record, nspath(nsmap['dct'], 'modified')).text = str(self.last_updated)
etree.SubElement(record, nspath(nsmap['dct'], 'abstract')).text = self.description
etree.SubElement(record, nspath(nsmap['dc'], 'date')).text = str(self.created)
etree.SubElement(record, nspath(nsmap['dc'], 'creator')).text = str(self.csw_creator)
etree.SubElement(record, nspath(nsmap['dc'], 'coverage')).text = self.area_of_interest
try:
geom = loads(self.wkt_geometry)
bounds = geom.envelope.bounds
dimensions = str(geom.envelope._ndim)
bbox = etree.SubElement(record, nspath(nsmap['ows'], 'BoundingBox'), dimensions=dimensions)
if self.csw_crs is not None:
bbox.attrib['crs'] = self.csw_crs
etree.SubElement(bbox, nspath(nsmap['ows'], 'LowerCorner')).text = '%s %s' % (bounds[1], bounds[0])
etree.SubElement(bbox, nspath(nsmap['ows'], 'UpperCorner')).text = '%s %s' % (bounds[3], bounds[2])
except Exception:
# We can safely ignore geom issues
pass
return etree.tostring(record)
def gen_csw_anytext(self):
xml = etree.fromstring(self.csw_xml)
return ' '.join([value.strip() for value in xml.xpath('//text()')])
class Url(models.Model):
url = models.CharField(max_length=255)
url_label = models.CharField(max_length=255)
url_type = models.ForeignKey(UrlType)
resource = models.ForeignKey(Resource)
def __unicode__(self):
return '%s - %s - %s' % (self.url_label, self.url_type, self.url)
class UrlImage(models.Model):
def get_image_path(instance, filename):
fsplit = filename.split('.')
extra = 1
test_path = os.path.join(settings.MEDIA_ROOT, 'url_images', str(instance.url_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[1])
while os.path.exists(test_path):
extra += 1
test_path = os.path.join(settings.MEDIA_ROOT, 'url_images', str(instance.url_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[1])
path = os.path.join('url_images', str(instance.url_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[-1])
return path
url = models.ForeignKey(Url)
image = ImageWithThumbnailsField(upload_to=get_image_path, thumbnail={'size': (80, 80)}, help_text="The site will resize this master image as necessary for page display")
title = models.CharField(max_length=255, help_text="For image alt tags")
source = models.CharField(max_length=255, help_text="Source location or person who created the image")
source_url = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return '%s' % (self.image)
class Idea(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
author = models.CharField(max_length=255)
created_by = models.ForeignKey(User, related_name="idea_created_by")
created_by_date = models.DateTimeField(verbose_name="Created on")
updated_by = models.ForeignKey(User, related_name="idea_updated_by")
updated_by_date = models.DateTimeField(auto_now=True, verbose_name="Updated on")
resources = models.ManyToManyField(Resource, blank=True, null=True)
def get_home_page_image(self):
images = IdeaImage.objects.filter(idea=self)
home = images.filter(home_page=True)
if home.count() == 0:
return images[0]
return home[0]
def get_absolute_url(self):
slug = slugify(self.title)
return "/idea/%i/%s" % (self.id, slug)
def __unicode__(self):
return '%s' % (self.title)
class IdeaImage(models.Model):
def get_image_path(instance, filename):
fsplit = filename.split('.')
extra = 1
test_path = os.path.join(settings.MEDIA_ROOT, 'idea_images', str(instance.idea_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[1])
while os.path.exists(test_path):
extra += 1
test_path = os.path.join(settings.MEDIA_ROOT, 'idea_images', str(instance.idea_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[1])
path = os.path.join('idea_images', str(instance.idea_id), fsplit[0] + '_' + str(extra) + '.' + fsplit[-1])
return path
idea = models.ForeignKey(Idea)
image = ImageWithThumbnailsField(upload_to=get_image_path, thumbnail={'size': (300, 300)}, help_text="The site will resize this master image as necessary for page display")
title = models.CharField(max_length=255, help_text="For image alt tags")
source = models.CharField(max_length=255, help_text="Source location or person who created the image")
source_url = models.CharField(max_length=255, blank=True)
home_page = models.BooleanField(default=False, help_text="Select this image for use on the home page.")
def __unicode__(self):
return '%s' % (self.image)
class Submission(models.Model):
user = models.ForeignKey(User)
sent_date = models.DateTimeField(auto_now=True)
email_text = models.TextField()
class TwitterCache(models.Model):
text = models.TextField()
class ODPUserProfile(models.Model):
organization = models.CharField(max_length=255, blank=True)
can_notify = models.BooleanField(default=False)
user = models.ForeignKey(User, unique=True)
def gen_website_url():
if not settings.SITEHOST:
raise RuntimeError('settings.SITEHOST is not set')
if not settings.SITEPORT:
raise RuntimeError('settings.SITEPORT is not set')
scheme = 'http'
port = ':%d' % settings.SITEPORT
if settings.SITEPORT == 443:
scheme = 'https'
if settings.SITEPORT == 80:
port = ''
return '%s://%s%s' % (scheme, settings.SITEHOST, port)
| {
"content_hash": "885ccec3476b0591ace34a7b6fede412",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 176,
"avg_line_length": 37.84823848238482,
"alnum_prop": 0.6331805814120006,
"repo_name": "azavea/Open-Data-Catalog",
"id": "4dec7cae3864aa2233fd639d88e6658af01b75cc",
"size": "13966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenDataCatalog/opendata/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "45"
},
{
"name": "CSS",
"bytes": "14826"
},
{
"name": "JavaScript",
"bytes": "36551"
},
{
"name": "Python",
"bytes": "136469"
}
],
"symlink_target": ""
} |
"""
EventWall
Application Configuration
"""
# Allowed file extensions for upload
allowed_extensions = ['jpg', 'jpeg', 'png', 'gif', 'tiff']
# Azure Storage
azure_account_name = ""
azure_account_key = ""
# SparkPost
sparkpost_api_key = "" | {
"content_hash": "1dd3a36b3cb1b89601f22e9b9f6060b1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.6833333333333333,
"repo_name": "stevenmirabito/eventwall",
"id": "76675afa8ccb96ac932e13f314fdf2cfeb23a216",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventwall/config.example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5043"
},
{
"name": "CSS",
"bytes": "1087"
},
{
"name": "HTML",
"bytes": "4996"
},
{
"name": "JavaScript",
"bytes": "18467"
},
{
"name": "Python",
"bytes": "6753"
},
{
"name": "TypeScript",
"bytes": "5106"
}
],
"symlink_target": ""
} |
import copy
import shutil
import os
import sys
import click
from .inenv import (InenvManager, INENV_ENV_VAR, EVAL_EXIT_CODE, InenvException,
autojump_enabled, toggle_autojump)
from .utils import override_envars_and_deactivate
from . import version
def activator_warn(inenv):
click.secho("Please add the following to your bash RC for auto switch.", fg='red')
click.secho("source {file}".format(file=inenv.activate_file), fg='green')
class InenvCliGroup(click.Group):
sort_later = set()
def get_command(self, ctx, cmd_name):
cmd = click.Group.get_command(self, ctx, cmd_name)
if cmd is not None:
return cmd
matches = [x for x in self.list_commands(ctx)
if x.startswith(cmd_name)]
if not matches:
return None
elif len(matches) == 1:
return click.Group.get_command(self, ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
def add_command(self, cmd, name=None, sort_later=False):
super(InenvCliGroup, self).add_command(cmd, name=name)
if sort_later:
self.sort_later.add(name)
def list_commands(self, ctx):
core_commands, sort_later_commands = [], []
for key in self.commands:
if key in self.sort_later:
sort_later_commands.append(key)
else:
core_commands.append(key)
return sorted(core_commands) + sorted(sort_later_commands)
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
core_commands, inenv_commands = [], []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
help_ = cmd.short_help or ''
if subcommand in self.sort_later:
inenv_commands.append((subcommand, help_))
else:
core_commands.append((subcommand, help_))
if core_commands:
with formatter.section('Commands'):
formatter.write_dl(core_commands)
if inenv_commands:
with formatter.section('Inenvs'):
formatter.write_dl(inenv_commands)
@click.group(cls=InenvCliGroup, name='inenv')
@click.version_option(version=version.__version__)
def main_cli():
pass
def _run(venv_name, cmd):
if len(cmd) == 1:
cmd = cmd[0].split()
inenv = InenvManager()
venv = inenv.get_prepped_venv(venv_name)
venv.run(cmd, always_exit=True)
@click.argument('cmd', nargs=-1)
@click.option('--venv_name', default=None)
@click.command()
def switch_or_run(cmd, venv_name=None):
"""Switch or run in this env"""
if cmd:
return _run(venv_name, cmd)
inenv = InenvManager()
if not os.getenv(INENV_ENV_VAR):
activator_warn(inenv)
return
else:
venv = inenv.get_prepped_venv(venv_name)
inenv.clear_extra_source_file()
inenv.write_extra_source_file("source {}".format(venv.activate_shell_file))
inenv.write_extra_source_file(override_envars_and_deactivate(inenv.get_envvars(venv_name)))
if autojump_enabled():
directory = inenv.guess_contents_dir(venv_name)
inenv.write_extra_source_file('cd {}'.format(directory))
click.secho("Jumping to {}".format(directory), fg='green')
sys.exit(EVAL_EXIT_CODE)
@click.argument('venv_name')
@main_cli.command()
def rm(venv_name):
""" Removes the venv by name """
inenv = InenvManager()
venv = inenv.get_venv(venv_name)
click.confirm("Delete dir {}".format(venv.path))
shutil.rmtree(venv.path)
@click.argument('venv_name')
@main_cli.command()
def root(venv_name):
"""Print the root directory of a virtualenv"""
inenv = InenvManager()
inenv.get_venv(venv_name)
venv = inenv.registered_venvs[venv_name]
click.secho(venv['root'])
@click.argument('venv_name')
@main_cli.command()
def init(venv_name):
"""Initializez a virtualenv"""
inenv = InenvManager()
inenv.get_prepped_venv(venv_name, skip_cached=False)
if not os.getenv(INENV_ENV_VAR):
activator_warn(inenv)
click.secho("Your venv is ready. Enjoy!", fg='green')
@main_cli.command()
def autojump():
"""Initializes a virtualenv"""
currently_enabled = autojump_enabled()
toggle_autojump()
if not currently_enabled:
click.secho("Autojump enabled", fg='green')
else:
click.secho("Autojump disabled", fg='red')
@main_cli.command()
def extra_source():
"""Path to file sourced after an inenv switch"""
inenv = InenvManager()
click.secho(inenv.extra_source_file)
@main_cli.command('version')
def print_version():
"""Print the inenv version"""
click.secho(version.__version__)
def run_cli():
try:
inenv = InenvManager()
for venv in inenv.registered_venvs:
new_switch = copy.deepcopy(switch_or_run)
for param in new_switch.params:
if param.name == 'venv_name':
param.default = venv
main_cli.add_command(new_switch, name=venv, sort_later=True)
main_cli(obj={}, prog_name="inenv")
except InenvException as exc:
click.secho("{}".format(exc.message), fg='red')
if __name__ == '__main__':
run_cli()
| {
"content_hash": "1fd67ebbf067d35cfcbf578b2d972a3e",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 99,
"avg_line_length": 30.585635359116022,
"alnum_prop": 0.6150650289017341,
"repo_name": "pnegahdar/inenv",
"id": "5b6883492534918166ae3593de17b9d93092c66d",
"size": "5536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inenv/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25606"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from os import path
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from .compat import bytes_to_str
import os
def compile_less(less_filename, css_filename, minify=False, source_map=True):
cmd = ['lessc', '--silent']
if minify:
cmd += ['--clean-css=--s1 --advanced']
if source_map:
cmd += ['--source-map']
less_basename = path.basename(less_filename)
less_pathname = path.dirname(less_filename)
cmd += [less_basename, css_filename]
proc = Popen(cmd, cwd=less_pathname)
proc.communicate()
return True
def compile_scss(scss_filename, css_filename, minify=False, source_map=True,
autoprefix=True):
basename = path.basename(scss_filename)
dirname = path.dirname(scss_filename)
cmd = ['sassc']
if source_map:
cmd += ['-m']
if minify:
cmd += ['-t', 'compressed']
cmd += [basename, css_filename]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, cwd=dirname)
out, err = proc.communicate()
out = bytes_to_str(out)
if len(out):
raise RuntimeError(out.rstrip())
if autoprefix:
cmd = ['postcss', '--use', 'autoprefixer', '--replace',
css_filename]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, cwd=dirname)
out, err = proc.communicate()
err = bytes_to_str(err)
if len(err):
raise RuntimeError(err.rstrip())
mtime = path.getmtime(scss_filename)
os.utime(css_filename, (mtime, mtime))
return True
def compile_coffee(coffee_filename, js_filename, minify=False):
cmd = ['coffeebar', '-c']
if minify:
cmd += ['-m']
cmd += ['-o', js_filename, coffee_filename]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
out = bytes_to_str(out)
if 'error' in out:
raise RuntimeError(out)
mtime = path.getmtime(coffee_filename)
os.utime(js_filename, (mtime, mtime))
return True
| {
"content_hash": "784503f9b345da7e6f2cf6678d7dff60",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 25.506172839506174,
"alnum_prop": 0.6190706679574056,
"repo_name": "CorverDevelopment/Pandora",
"id": "0a633ee78e3d5900ff628ecebe53f70565466c56",
"size": "2090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pandora/shortcuts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83279"
}
],
"symlink_target": ""
} |
from sys import argv
script, name, age, color, food = argv
print("Your name is...", name)
print(f"You are {age} years old.")
print("Your favorite color is...", color)
print("Your favorite food is...", food)
| {
"content_hash": "b1e5ced4c07daedae84de89d158db24a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 26.125,
"alnum_prop": 0.6746411483253588,
"repo_name": "dadavidson/Python_Lab",
"id": "5ce44a0d75ae5856c7d4a0dc4f98218b53c616dd",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LP3THW/ex13b.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2047495"
},
{
"name": "Python",
"bytes": "106265"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.estimators import multi_label_head
from tensorflow_privacy.privacy.estimators import test_utils
from tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasSGDOptimizer
from tensorflow_estimator.python.estimator import estimator
class DPMultiLabelHeadTest(tf.test.TestCase):
"""Tests for DP-enabled multilabel heads."""
def testLoss(self):
"""Tests loss() returns per-example losses."""
head = multi_label_head.DPMultiLabelHead(3)
features = {'feature_a': np.full((4), 1.0)}
labels = np.array([[0, 1, 1], [1, 1, 0], [0, 1, 0], [1, 1, 1]])
logits = np.array([[2.0, 1.5, 4.1], [2.0, 1.5, 4.1], [2.0, 1.5, 4.1],
[2.0, 1.5, 4.1]])
actual_loss = head.loss(labels, logits, features)
expected_loss = tf.reduce_mean(
tf.compat.v1.losses.sigmoid_cross_entropy(
multi_class_labels=labels,
logits=logits,
reduction=tf.keras.losses.Reduction.NONE),
axis=-1,
keepdims=True)
if tf.executing_eagerly():
self.assertEqual(actual_loss.shape, [4, 1])
self.assertAllClose(actual_loss, expected_loss)
return
self.assertEqual(actual_loss.shape, [4, 1])
self.assertAllClose(expected_loss, self.evaluate(actual_loss))
def testCreateTPUEstimatorSpec(self):
"""Tests that an Estimator built with this head works."""
train_features, train_labels = test_utils.make_multilabel_input_data(256)
feature_columns = []
for key in train_features:
feature_columns.append(tf.feature_column.numeric_column(key=key))
head = multi_label_head.DPMultiLabelHead(3)
optimizer = DPKerasSGDOptimizer(
learning_rate=0.5,
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=2)
model_fn = test_utils.make_model_fn(head, optimizer, feature_columns)
classifier = estimator.Estimator(model_fn=model_fn)
classifier.train(
input_fn=test_utils.make_input_fn(train_features, train_labels, True),
steps=4)
test_features, test_labels = test_utils.make_multilabel_input_data(64)
classifier.evaluate(
input_fn=test_utils.make_input_fn(test_features, test_labels, False),
steps=4)
predict_features, predict_labels = test_utils.make_multilabel_input_data(64)
classifier.predict(
input_fn=test_utils.make_input_fn(predict_features, predict_labels,
False))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "829ddc72482f4d8472e61dd779bad7ce",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 88,
"avg_line_length": 36.15492957746479,
"alnum_prop": 0.6595247370471368,
"repo_name": "tensorflow/privacy",
"id": "2a35c64697a948bdcf9b2a4db5357e3e8f95b644",
"size": "3157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_privacy/privacy/estimators/multi_label_head_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "767849"
},
{
"name": "Python",
"bytes": "1466141"
},
{
"name": "Shell",
"bytes": "21949"
},
{
"name": "Starlark",
"bytes": "35224"
}
],
"symlink_target": ""
} |
from flask import render_template, Blueprint
from common import get_app_list, get_query_list
from application.common.util import translate
from os.path import dirname, join
import json
exhibition_consumption = Blueprint('exhibition_consumption', __name__, template_folder='templates')
@exhibition_consumption.route('/dashboard/consumption')
def show():
context_dict = get_app_list()
app_id = context_dict['app_id']
username = context_dict['username']
app_list = context_dict['app_list']
if not app_id or app_id == '5621fb0f60b27457e863fabb': # Demo App
fake_data = json.load(file(join(dirname(dirname(dirname(__file__))), 'fake_data.json')))
static_info = fake_data.get('static_info')
consumption_obj = static_info.get('consumption')
hascar_obj = static_info.get('has_car')
haspet_obj = static_info.get('has_pet')
consum = {'category': map(lambda x: translate(x, 'consumption'), consumption_obj.keys()),
'series': map(lambda x: consumption_obj.get(x), consumption_obj.keys())}
car = {'category': map(lambda x: translate(x, 'has_car'), hascar_obj.keys()),
'series': map(lambda x: hascar_obj.get(x), hascar_obj.keys())}
pet = {'category': map(lambda x: translate(x, 'has_pet'), haspet_obj.keys()),
'series': map(lambda x: haspet_obj.get(x), haspet_obj.keys())}
else:
result_dict = get_query_list(app_id, 'consumption', 'has_car', 'has_pet')
consumption_list = filter(lambda x: x is not None, result_dict['consumption'])
car_list = filter(lambda x: x is not None, result_dict['has_car'])
pet_list = filter(lambda x: x is not None, result_dict['has_pet'])
consumption_tmp = map(lambda x: list(x),
zip(*map(lambda x: [x, consumption_list.count(x)], set(consumption_list))))
consum = {"category": map(lambda x: translate(x, 'consumption'), consumption_tmp[0]),
"series": consumption_tmp[1]} if consumption_tmp else {}
car = {'category': map(lambda x: translate(x, 'has_car'), list(set(car_list))),
'series': map(lambda x: car_list.count(x), list(set(car_list)))}
pet = {'category': map(lambda x: translate(x, 'has_pet'), list(set(pet_list))),
'series': map(lambda x: pet_list.count(x), list(set(pet_list)))}
ret_json = {
'errcode': 0,
'errmsg': 'ok',
'data': {
'consumption': consum,
'car': car,
'pet': pet
}
}
return render_template('dashboard/user-consumption.html', option=json.dumps(ret_json),
username=username, app_id=app_id, app_list=app_list)
| {
"content_hash": "137dab2723b7d7fbe7f34b5f90be7698",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 105,
"avg_line_length": 50.68518518518518,
"alnum_prop": 0.6039459261965656,
"repo_name": "petchat/senz.dashboard.backend",
"id": "e24bff2dd8aea35af35ee570491b423829c0469e",
"size": "2737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/views/exhibition/consumption.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624765"
},
{
"name": "HTML",
"bytes": "2863274"
},
{
"name": "JavaScript",
"bytes": "8052515"
},
{
"name": "Python",
"bytes": "118095"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
PROGRAM_VERSION = "scculs.py, part of SCCULS preview-1"
import libscculs
import argparse
import os
import csv
def safe_open(file_path, overwrite):
if (overwrite == False) and os.path.exists(file_path):
if os.path.isfile:
raise Exception("This file already exists: " + file_path)
elif os.path.isfolder:
raise Exception("Please specify full paths including file names. This path is a folder: " + file_path)
else:
raise Exception()
safe_open_file = open(file_path, "w")
return safe_open_file
arg_parser = argparse.ArgumentParser(description = "SCCULS: Scalable Conditional-Clade Ultrametric Summary trees. Distills a summary tree from an MCMC sample, using the highest tree probability calculated from conditional clade frequencies, or from topology frequencies.")
arg_parser.add_argument("-v", "--version", action = "version", version = PROGRAM_VERSION)
defaults_group = arg_parser.add_argument_group("program defaults")
defaults_group.add_argument("-c", "--candidate-method", type = str, default = "derived", choices = ["derived", "sampled"], help = "Only consider topologies in the MCMC sample, or derive the most probable topology or topologies using conditional clades. Default: derived.")
defaults_group.add_argument("-g", "--node-heights", type = str, choices = ["median", "mean"], help = "Specify the method used to calculate node heights. Without this option, node heights will not be calculated, and trees of equal branch lengths will be returned.")
defaults_group.add_argument("-p", "--probability-method", type = str, choices = ["conditional-clade", "tree-topology"], help = "Infer tree topology probabilities using either tree topology probabilities or conditional clade probabilities. When -c/--candidate-method is 'derived', default is conditional-clade. When -c/--candidate-method is 'sampled', default is tree-topology.")
defaults_group.add_argument("-s", "--support-values", type = str, choices = ["conditional-clade", "tree-topology"], help = "Add clade monophyly support values to output trees, and infer them using either tree topology frequencies or conditional clade frequencies.")
output_group = arg_parser.add_argument_group('output files')
output_group.add_argument("-i", "--info-output", metavar = "INFO_OUTPUT_PATH", type = str, help = "Calculate whole-sample statistics and output them to a text format file.")
output_group.add_argument("-n", "--newick-output", metavar = "NEWICK_OUTPUT_PATH", type = str, help = "Output the summary tree(s) to newick format file(s). When -l/--max-topologies is greater than 1, more than one tree may be returned, so an identifying number will be appended to the end of each filename.")
output_group.add_argument("-o", "--csv-output", metavar = "CSV_OUTPUT_PATH", type = str, help = "Calculate statistics for each returned tree topology, and output them to CSV format file.")
output_group.add_argument("-w", "--overwrite", action = "store_true", help = "If output file paths point to existing files, overwrite the existing files.")
limits_group = arg_parser.add_argument_group('output limits')
limits_group.add_argument("-l", "--max-topologies", type = int, default = 1, help = "The size of the credible set in the number of unique topologies to output. The number of topologies returned will still be limited by -m/--max-probability. Default: 1.")
limits_group.add_argument("-m", "--max-probability", type = float, default = 1.0, help = "The size of the credible set in total posterior probability to output. The number of topologies returned will still be limited by -l/--max-topologies. Default: 1.0")
input_group = arg_parser.add_argument_group('program input')
input_group.add_argument("-b", "--burn-in", type = int, default = 0, help = "The number of trees to discard from the beginning of the MCMC sample. Default: 0.")
input_group.add_argument("-d", "--calibration-date", type = float, default = 0.0, help = "If any tip dates are not contemporary (including tip date sampling), set a fixed date for the calibration taxon so that the tree height is correctly calculated. Negative numbers are used for past dates, positive numbers for future dates. Default: 0.0.")
input_group.add_argument("-t", "--calibration-taxon", type = str, default = "", help = "If any tip dates are not contemporary (including tip date sampling), set the calibration taxon so that the tree height is correctly calculated.")
input_group.add_argument("sample_path", metavar = "MCMC_SAMPLE_PATH", type = str, help = "The path to an MCMC sample of phylogenetic trees in either nexus or newick format.")
args = arg_parser.parse_args()
# raise errors in response to incomplete or nonsensical user-supplied arguments
if args.max_topologies <= 0:
arg_parser.error("argument -l/--max-topologies: must be equal to or greater than 1")
elif args.max_probability <= 0.0 or args.max_probability > 1.0:
arg_parser.error("argument -m/--max-probability: must be greater than 0.0 and less than 1.0")
elif not os.path.isfile(args.sample_path):
arg_parser.error("argument MCMC_SAMPLE_PATH: not a file path")
# set probability method
if args.probability_method is None: # defaults if not supplied
if args.candidate_method == "derived":
probability_method = "conditional-clade"
else:
probability_method = "tree-topology"
else: # user-supplied method
probability_method = args.probability_method
calibration_taxon = args.calibration_taxon
calibration_date = args.calibration_date
sample_path = args.sample_path
sample_burn_in = args.burn_in
max_tree_topologies = args.max_topologies
max_probability = args.max_probability
overwrite = args.overwrite
print "Reading MCMC sample..."
mcmc_sample = libscculs.trees_from_path(sample_path)
mcmc_post = mcmc_sample[sample_burn_in:] # discard burn-in
ultrametric_sample = libscculs.UltrametricSample(mcmc_post, calibration_taxon, calibration_date)
taxon_order = ultrametric_sample.taxon_order
n_taxa = len(taxon_order)
print("Counting topologies and conditional clades...")
topology_set, topology_counts, cc_sets, cc_counts, clade_set = libscculs.calculate_topology_probabilities(ultrametric_sample)
n_unique_topologies = topology_set.n_features
# all circumstances where conditional clade probabilities are required
# don't bother to calculate if not needed
if (args.candidate_method == "derived") or (probability_method == "conditional-clade") or (args.support_values == "conditional-clade"):
print("Calculating conditional clade probabilities...")
for parent_hash, split_counts in cc_counts.items():
cc_sets[parent_hash].probabilities_from_counts(split_counts)
# adding tree-topology based support values needs to be done before other steps, in case the topology set is modified later
if args.support_values == "conditional-clade":
print("Calculating clade probabilities from conditional clade probabilities...")
clade_set.derive_clade_probabilities(cc_sets, n_taxa)
elif args.support_values == "tree-topology":
print("Calculating topology and clade probabilities from MCMC sample...")
topology_set.probabilities_from_counts(topology_counts)
clade_set.melt_clade_probabilities(topology_set, n_taxa)
if args.candidate_method == "derived": # derive credible topologies from conditional clades
print("Deriving probable topologies from conditional clades...")
output_topology_set = libscculs.derive_best_topologies(cc_sets, taxon_order, max_tree_topologies, max_probability)
else: # base credible topologies on frequency in MCMC sample
output_topology_set = topology_set
if probability_method == "conditional-clade":
print("Calculating topology probabilities from conditional clade probabilities...")
output_topology_set.probabilities_from_ccs(cc_sets)
else:
print("Calculating topology probabilities...")
output_topology_set.probabilities_from_counts(topology_counts)
# once probabilities have been calculated for each topology in the sampled set
# then topologies that exceed maximum topology/probability limits can be removed
if args.candidate_method == "sampled":
print("Limiting output topologies to credible set...")
output_topology_set.cull_probabilities(max_tree_topologies, max_probability)
if args.support_values is not None:
print("Adding clade support values to tree topologies...")
output_topology_set.add_clade_support(clade_set, taxon_order)
if args.info_output is not None:
print("Writing MCMC sample statistics file...")
info_output_path = args.info_output
info_output_file = safe_open(info_output_path, overwrite)
info_output_file.write("Number of taxa in each tree: %i\n" % (n_taxa))
info_output_file.write("Number of unique tree topologies in MCMC sample: %i\n" % (n_unique_topologies))
if args.candidate_method == "derived": # calculate summary statistics for topologies
n_nonzero_topologies = libscculs.n_derived_topologies(cc_sets, n_taxa)
info_output_file.write("Number of topologies derived from conditional clades: %i\n" % (n_nonzero_topologies))
#n_derived_topologies = libscculs.n_derived_topologies(cc_sets, n_taxa, include_zero_probability = True)
#n_nonzero_topologies = libscculs.n_derived_topologies(cc_sets, n_taxa)
#info_output_file.write("Number of topologies derived from conditional clades: %i\n" % (n_derived_topologies))
#info_output_file.write("Number of topologies derived from conditional clades (with non-zero probabilities): %i\n" % (n_nonzero_topologies))
info_output_file.close()
if args.newick_output is not None:
print("Writing tree topology files...")
newick_path_prefix = args.newick_output
for i in range(output_topology_set.n_features):
newick_string = output_topology_set.data_array[i]
newick_output_path = newick_path_prefix + "." + str(i)
newick_output_file = safe_open(newick_output_path, overwrite)
newick_output_file.write(newick_string + "\n")
newick_output_file.close()
if args.csv_output is not None:
print("Writing tree statistics file...")
csv_output_path = args.csv_output
csv_output_file = safe_open(csv_output_path, overwrite)
csv_writer = csv.writer(csv_output_file)
header_row = ["topology", "probability"]
csv_writer.writerow(header_row)
for i in range(output_topology_set.n_features):
topology_probability = output_topology_set.probabilities_array[i]
output_row = [i, topology_probability]
csv_writer.writerow(output_row)
csv_output_file.close()
| {
"content_hash": "30227c34f181115896e3400e6339eb29",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 378,
"avg_line_length": 62.41818181818182,
"alnum_prop": 0.7571608894067385,
"repo_name": "genomescale/scculs",
"id": "aee4bf8dfda54af5db69f08ed9e217114c4d2f7e",
"size": "10299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scculs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "32891"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
"""Test Library for OneConvergencePlugin."""
import contextlib
import uuid
import mock
from oslo.config import cfg
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.plugins.oneconvergence import plugin as nvsd_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_l3_plugin
PLUGIN_NAME = 'neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2'
class OneConvergencePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
def mocked_oneconvergence_init(self):
def side_effect(*args, **kwargs):
return {'id': str(uuid.uuid4())}
self.nvsdlib = mock.Mock()
self.nvsdlib.create_network.side_effect = side_effect
with mock.patch.object(nvsd_plugin.OneConvergencePluginV2,
'oneconvergence_init',
new=mocked_oneconvergence_init):
super(OneConvergencePluginV2TestCase,
self).setUp(self._plugin_name)
class TestOneConvergencePluginNetworksV2(test_plugin.TestNetworksV2,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergencePluginSubnetsV2(test_plugin.TestSubnetsV2,
OneConvergencePluginV2TestCase):
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_create_subnet_with_v6_allocation_pool(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
self.skipTest("NVSD Plugin does not support IPV6.")
class TestOneConvergencePluginPortsV2(test_plugin.TestPortsV2,
test_bindings.PortBindingsTestCase,
OneConvergencePluginV2TestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
def test_requested_subnet_id_v4_and_v6(self):
self.skipTest("NVSD Plugin does not support IPV6.")
def test_port_vif_details(self):
plugin = NeutronManager.get_plugin()
with self.port(name='name') as port1:
ctx = context.get_admin_context()
port = plugin.get_port(ctx, port1['port']['id'])
self.assertEqual(port['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
def test_ports_vif_details(self):
cfg.CONF.set_default('allow_overlapping_ips', True)
plugin = NeutronManager.get_plugin()
with contextlib.nested(self.port(), self.port()) as (port1, port2):
ctx = context.get_admin_context()
ports = plugin.get_ports(ctx)
self.assertEqual(len(ports), 2)
for port in ports:
self.assertEqual(port['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
class TestOneConvergenceBasicGet(test_plugin.TestBasicGet,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergenceV2HTTPResponse(test_plugin.TestV2HTTPResponse,
OneConvergencePluginV2TestCase):
pass
class TestOneConvergenceL3NatTestCase(test_l3_plugin.L3NatDBIntTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
def mocked_oneconvergence_init(self):
def side_effect(*args, **kwargs):
return {'id': str(uuid.uuid4())}
self.nvsdlib = mock.Mock()
self.nvsdlib.create_network.side_effect = side_effect
ext_mgr = test_l3_plugin.L3TestExtensionManager()
with mock.patch.object(nvsd_plugin.OneConvergencePluginV2,
'oneconvergence_init',
new=mocked_oneconvergence_init):
super(TestOneConvergenceL3NatTestCase,
self).setUp(plugin=self._plugin_name, ext_mgr=ext_mgr)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(self._plugin_name)
| {
"content_hash": "a207ffa485bddecc4527b4a2b4bfbdb5",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 76,
"avg_line_length": 37.9051724137931,
"alnum_prop": 0.6443029338185127,
"repo_name": "zhhf/charging",
"id": "02b0fef1d72fe14e7ac7431616419f183e861602",
"size": "5033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charging/tests/unit/oneconvergence/test_nvsd_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8696203"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
Routines to create questions.
"""
import pdb
import json
import logging
from hashlib import sha1
from tqdm import tqdm
from . import db
from . import api
from . import turk
from .util import stuple
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUESTION_VERSION=0.2
# TODO: allow us to 'override' and add samples to questions that have already been answered.
def create_questions_for_submission_sample(submission_id, sample_batch_id):
"""
Produces questions for a submission, based on what's in the
database.
"""
# Group by doc_id, subject, object
question_groups = {}
for q in db.select("""
SELECT DISTINCT ON (s.doc_id, LEAST(s.subject, s.object), GREATEST(s.subject, s.object))
s.doc_id, s.subject, s.object,
r.subject_type, r.object_type,
r.subject_gloss, r.object_gloss,
r.subject_canonical_gloss, r.object_canonical_gloss,
r.subject_canonical, r.object_canonical,
r.subject_entity, r.object_entity
FROM submission_sample s
JOIN submission_entity_relation r ON (
r.submission_id = s.submission_id
AND s.doc_id = r.doc_id AND s.subject = r.subject AND s.object = r.object)
WHERE s.submission_id = %(submission_id)s
AND s.batch_id = %(sample_batch_id)s
;
""", submission_id=submission_id, sample_batch_id=sample_batch_id):
question_groups[q.doc_id, stuple(q.subject), stuple(q.object)] = q
# Go through and remove any of these from possible_questions
for q in db.select("""
SELECT doc_id, subject, object, subject_canonical_gloss, object_canonical_gloss, subject_entity, object_entity
FROM evaluation_relation_question r
WHERE (state <> 'error' OR state <> 'revoked')
"""):
key = (q.doc_id, stuple(q.subject), stuple(q.object))
if key not in question_groups: continue
q_ = question_groups[key]
# Purge any questions that have already been asked.
if (q.subject_canonical_gloss in q_.subject_canonical_gloss or q.subject_entity == q_.subject_entity) and \
(q.object_canonical_gloss == q_.object_canonical_gloss or q.object_entity == q_.object_entity):
del question_groups[key]
# Flip the entity order
if (q.object_canonical_gloss in q_.subject_canonical_gloss or q.object_entity == q_.subject_entity) and \
(q.suobject_canonical_gloss == q_.object_canonical_gloss or q.subject_entity == q_.object_entity):
del question_groups[key]
for q in db.select("""
SELECT doc_id, subject, object,
subject_entity AS subject_canonical_gloss, object_entity AS object_canonical_gloss,
subject_entity AS subject_entity, object_entity AS object_entity
FROM evaluation_entity_relation r
"""):
key = (q.doc_id, stuple(q.subject), stuple(q.object))
if key not in question_groups: continue
q_ = question_groups[key]
# If you find a match, then break out of this loop
if (q.subject_canonical_gloss == q_.subject_canonical_gloss or q.subject_entity == q_.subject_entity) and \
(q.object_canonical_gloss == q_.object_canonical_gloss or q.object_entity == q_.object_entity):
del question_groups[key]
# Flip the entity order
if (q.object_canonical_gloss in q_.subject_canonical_gloss or q.object_entity == q_.subject_entity) and \
(q.subject_canonical_gloss == q_.object_canonical_gloss or q.subject_entity == q_.object_entity):
del question_groups[key]
questions = []
for row in question_groups.values():
# In some cases, we will need to flip types.
if row.subject_canonical_gloss.startswith("gloss:"):
subject_canonical_gloss = row.subject_canonical_gloss[len("gloss:"):]
else:
subject_canonical_gloss = row.subject_canonical_gloss
if row.object_canonical_gloss.startswith("gloss:"):
object_canonical_gloss = row.object_canonical_gloss[len("gloss:"):]
else:
object_canonical_gloss = row.object_canonical_gloss
question = {
"batch_type": "selective_relations",
"submission_id": submission_id,
"doc_id": row.doc_id,
"subject": {
"span": stuple(row.subject),
"gloss": row.subject_gloss,
"type": row.subject_type,
"entity": {
"span": stuple(row.subject_canonical),
"gloss": subject_canonical_gloss,
"type": row.subject_type,
"link": row.subject_entity,
}
},
"object": {
"span": stuple(row.object),
"gloss": row.object_gloss,
"type": row.object_type,
"entity": {
"span": stuple(row.object_canonical),
"gloss": object_canonical_gloss,
"type": row.object_type,
"link": row.object_entity,
}
},
}
# Flip types to be nice to Javascript.
if row.subject_type == 'ORG' and row.object_type == 'PER':
question["subject"], question["object"] = question["object"], question["subject"]
elif row.subject_type == 'GPE':
question["subject"], question["object"] = question["object"], question["subject"]
else:
pass
questions.append(question)
return questions
def test_create_questions_for_submission_sample():
submission_id = 25
sample_batch_id = 19
questions = create_questions_for_submission_sample(submission_id, sample_batch_id)
assert len(questions) > 100
def create_questions_for_corpus(corpus_tag):
"""
Produces questions for a submission, based on what's in the
database.
"""
questions = []
for row in db.select("""
(SELECT DISTINCT doc_id
FROM document_sample s
JOIN document_tag t ON (s.doc_id = t.doc_id)
WHERE t.tag=%(corpus_tag)s)
EXCEPT
(SELECT doc_id
FROM evaluation_question q
JOIN evaluation_doc_question d ON (q.id = d.id AND q.batch_id = d.batch_id)
WHERE q.state <> 'error');
""", corpus_tag=corpus_tag):
questions.append({
"batch_type": "exhaustive_mentions",
"doc_id": row.doc_id,
})
return questions
def validate_question_params(params):
assert "batch_type" in params
if params["batch_type"] == "exhaustive_mentions":
assert "doc_id" in params
elif params["batch_type"] == "exhaustive_relations":
assert "doc_id" in params
assert "subject" in params
assert "object" in params
elif params["batch_type"] == "selective_relations":
assert "doc_id" in params
assert "subject" in params
assert "object" in params
else:
assert False, "Invalid batch type: {}".format(params["batch_type"])
def insert_evaluation_question(batch_id, params, cur=None):
validate_question_params(params)
if cur is None:
with db.CONN:
with db.CONN.cursor() as cur:
return insert_evaluation_question(batch_id, params, cur)
else:
params_str = json.dumps(params)
id_ = sha1(params_str.encode("utf-8")).hexdigest()
cur.execute(
"""INSERT INTO evaluation_question(id, batch_id, state, params) VALUES %s""",
[(id_, batch_id, "pending-turking", params_str)]
)
def test_insert_evaluation_question():
# TODO: Create a test database for this.
raise NotImplementedError()
def insert_evaluation_batch(corpus_tag, batch_type, description, questions, sample_batch_id, cur=None):
"""
Creates an evaluation batch with a set of questions.
@questions is a list of parameters to launch tasks with.
"""
for params in questions: validate_question_params(params)
if cur is None:
with db.CONN:
with db.CONN.cursor() as cur:
return insert_evaluation_batch(corpus_tag, batch_type, description, questions, sample_batch_id, cur)
else:
# Create new batch.
cur.execute("""
INSERT INTO evaluation_batch(corpus_tag, batch_type, description, sample_batch_id) VALUES %s
RETURNING (id);
""", [(corpus_tag, batch_type, description, sample_batch_id)])
batch_id, = next(cur)
questions = [json.dumps(params, sort_keys=True) for params in questions]
ids = [sha1(params.encode("utf-8")).hexdigest() for params in questions]
db.execute_values(
cur,
"""INSERT INTO evaluation_question(id, batch_id, state, params) VALUES %s""",
[(id_, batch_id, "pending-turking", params) for id_, params in zip(ids, questions)])
return batch_id
def test_insert_evaluation_batch():
# TODO: Create a test database for this.
raise NotImplementedError()
def create_evaluation_batch_for_submission_sample(submission_id, sample_batch_id):
submission = api.get_submission(submission_id)
# First of all, make sure there are even samples for this submission.
batches = api.get_submission_sample_batches(submission_id)
assert len(batches) > 0,\
"No sample batches for submission {}".format(submission_id)
assert any(batch == sample_batch_id for batch in batches),\
"Sample batch {} is not part of submission {}".format(sample_batch_id, submission_id)
# Now, get the questions.
questions = create_questions_for_submission_sample(submission_id, sample_batch_id)
if len(questions) == 0:
logger.warning("There are unasked questions for submission %s!", submission_id)
return None
# Create an evaluation_batch out of these questions.
batch_type = 'selective_relations'
description = "{} unique questions asked from submission {} ({})".format(len(questions), submission.name, submission_id)
evaluation_batch_id = insert_evaluation_batch(submission.corpus_tag, batch_type, description, questions, sample_batch_id)
return evaluation_batch_id
def revoke_question(question_batch_id, question_id, mturk_conn=None):
if mturk_conn is None:
mturk_conn = turk.connect()
# Revoke all mturk hits associated with this question.
hits = db.select("""
SELECT id
FROM mturk_hit
WHERE question_batch_id = %(question_batch_id)s AND question_id = %(question_id)s
AND state <> 'revoked'
""", question_batch_id=question_batch_id, question_id=question_id)
had_errors = False
for row in hits:
try:
turk.revoke_hit(mturk_conn, row.id)
db.execute("""
UPDATE mturk_hit
SET state = %(state)s, message = %(message)s
WHERE id=%(hit_id)s
""", state="revoked", message="",
hit_id=row.id)
except turk.HitMustBeReviewed as e:
logger.exception(e)
had_errors = True
continue
if not had_errors:
db.execute("""
UPDATE evaluation_question
SET state=%(state)s, message=%(message)s
WHERE id=%(question_id)s AND batch_id=%(question_batch_id)s
""", state="revoked", message="",
question_batch_id=question_batch_id, question_id=question_id)
def revoke_question_batch(question_batch_id, mturk_conn=None):
questions = api.get_questions(question_batch_id)
if mturk_conn is None:
mturk_conn = turk.connect()
# Revoke all mturk hits associated with this question.
for question in tqdm(questions, desc="revoking question batch"):
revoke_question(question_batch_id, question.id, mturk_conn=mturk_conn)
| {
"content_hash": "2da786e5286a5b7f4e6fb07ba6856a6b",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 125,
"avg_line_length": 40.71283783783784,
"alnum_prop": 0.607916355489171,
"repo_name": "arunchaganty/kbp-online",
"id": "2fd415ce52cd4a5a77946c8f7cb28cd1389c2a9d",
"size": "12051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/kbpo/questions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6524"
},
{
"name": "HTML",
"bytes": "130435"
},
{
"name": "JavaScript",
"bytes": "309886"
},
{
"name": "PLpgSQL",
"bytes": "44305"
},
{
"name": "Python",
"bytes": "521294"
},
{
"name": "Shell",
"bytes": "5038"
}
],
"symlink_target": ""
} |
""" Astronomy related functions """
from functools import wraps
import numpy as np
try:
import dask
except ImportError:
dask = None
def dask_compatibility(fn):
""" Make functions transparent to using dask delayed objects """
@wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as issue:
if dask is None:
raise issue
return dask.delayed(fn)(*args, **kwargs).compute()
return wrapped
def nside2npix(nside):
"""Give the number of pixels for the given nside.
Parameters
----------
nside : int
healpix nside parameter; an exception is raised if nside is not valid
(nside must be a power of 2, less than 2**30)
Returns
-------
npix : int
corresponding number of pixels
Notes
-----
Raise a ValueError exception if nside is not valid.
Examples
--------
>>> import numpy as np
>>> nside2npix(8)
768
"""
return 12 * nside * nside
def gaia_healpix_expression(healpix_expression="source_id/34359738368",
healpix_max_level=12, healpix_level=8):
"""
Give the healpix expression from the Gaia source_id at
a given healpix level
Parameters
----------
healpix_expression: str
field name and conversion to healpix cell
healpix_max_level: int
expression corresponding level
healpix_level: int
desired healpix level from the data
Returns
-------
expression: str
final expression
"""
reduce_level = healpix_max_level - healpix_level
# NSIDE = 2 ** healpix_level
# nmax = nside2npix(NSIDE)
scaling = 4 ** reduce_level
# epsilon = 1. / scaling / 2
expression = "%s/%s" % (healpix_expression, scaling)
return expression
@dask_compatibility
def get_healpix_grid(data, healpix_level):
"""Convert a dataframe to the dense grid
Parameters
----------
data: pd.DataFrame
data from a database query
healpix_level: int
level of the query
Returns
-------
grid: np.array (npix, )
dense grid of npix(healpix_level) with the data values
"""
grid = np.zeros(nside2npix(2 ** healpix_level), dtype=data.n.values.dtype)
grid[data.hpx] = data.n
return grid
def healpix_grid_plot(fgrid, what_label=None, cmap="afmhot",
grid_limits=None, healpix_input="equatorial",
healpix_output="galactic", image_size=800, nest=True,
norm=None, title="", smooth=None,
colorbar=True, rotation=(0, 0, 0), **kwargs):
""" Plot data from healpix configuration
what_label: str
colorbar label
cmap: str or cmap instance
colormap used by matplotlib
healpix_input: str
Specificy if the healpix index is in
"equatorial", "galactic" or "ecliptic".
healpix_output: str
Plot in "equatorial", "galactic" or "ecliptic".
grid_limits: tuple, optional
[minvalue, maxvalue] value that map to the colormap
(values below and above these are clipped to the the min/max).
image_size: int
size for the image that healpy uses for rendering
nest: boolean
If the healpix data is in nested (True) or ring (False)
title: str
Title of figure
smooth: float
apply gaussian smoothing, in degrees
rotation: tuple(3)
Rotate the plot, in format (lon, lat, psi)
such that (lon, lat) is the center,
and rotate on the screen by angle psi. All angles are degrees.
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
"""
import healpy as hp
from matplotlib import colors
import warnings
# Compatibility filter
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if grid_limits is None:
grid_limits = [vmin, vmax]
if isinstance(norm, colors.LogNorm):
norm = 'log'
if grid_limits[0] is None:
grid_limits[0] = 1
if smooth:
if nest:
grid = hp.reorder(fgrid, inp="NEST", out="RING")
nest = False
# grid[np.isnan(grid)] = np.nanmean(grid)
grid = hp.smoothing(fgrid, sigma=np.radians(smooth))
else:
grid = fgrid
if grid_limits:
grid_min, grid_max = grid_limits
else:
grid_min = grid_max = None
func = hp.mollview
coord_map = dict(equatorial='C', galactic='G', ecliptic="E")
coord = coord_map[healpix_input], coord_map[healpix_output]
if coord_map[healpix_input] == coord_map[healpix_output]:
coord = None
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
return func(grid, unit=what_label, rot=rotation, nest=nest,
title=title, coord=coord, cmap=cmap, hold=True,
xsize=image_size, min=grid_min, norm=norm,
max=grid_max, cbar=colorbar, **kwargs)
@dask_compatibility
def add_column_healpix(self, name="healpix", longitude="ra", latitude="dec",
degrees=True, healpix_order=12, nest=True):
"""Add a healpix (in memory) column based on a longitude and latitude
Parameters
----------
name: str
name of the column
longitude: str
expression of the longitude (or right-ascension) coordinate
(astronomical convenction latitude=90 is north pole)
latitude: str
expression of the latitude (or declinasion) coordinate
degrees: boolean
If lon/lat are in degrees (default) or radians.
healpix_order: int
healpix order, >= 0
nest: boolean
Nested healpix (default) or ring.
"""
import healpy as hp
if degrees:
scale = np.pi / 180.
else:
scale = 1.
phi = self[longitude] * scale
theta = np.pi / 2 - self[latitude] * scale
hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)
try:
self.add_column(name, hp_index)
except AttributeError:
self[name] = hp_index
return self
@dask_compatibility
def project_aitoff(alphain, deltain, radians=True):
"""Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection
TODO: optimize for DASK DataFrame
Parameters
----------
alpha: array
azimuth angle
delta: array
polar angle
radians: boolean
input and output in radians (True), or degrees (False)
returns
-------
x: ndarray
x coordinate
y: ndarray
y coordinate
"""
try:
transform = 1. if radians else np.pi / 180.
alpha = np.copy(alphain)
if not radians:
ind = alphain > 180
alpha[ind] = alphain[ind] - 360
else:
ind = alphain > np.pi
alpha[ind] = alphain[ind] - 2. * np.pi
delta = deltain
aitoff_alpha = np.arccos(np.cos(delta * transform) *
np.cos(0.5 * alpha * transform))
x = (2 * np.cos(delta * transform) * np.sin(0.5 * alpha * transform) /
np.sinc(aitoff_alpha / np.pi) / np.pi)
y = np.sin(delta * transform) / np.sinc(aitoff_alpha / np.pi) / np.pi
return x, y
except ValueError as issue:
# dask df are not playing nice with the above
try:
import dask
return dask.delayed(project_aitoff)(alphain, deltain, radians)\
.compute()
except ImportError:
raise issue
def add_aitoff_projections(self, alpha, delta, x, y, radians=False):
"""Add aitoff (https://en.wikipedia.org/wiki/Aitoff_projection) projection
Parameters
----------
alpha: array
azimuth angle
delta: array
polar angle
radians: boolean
input and output in radians (True), or degrees (False)
x: str
output name for x coordinate
y: str
output name for y coordinate
returns
-------
x: ndarray
output name for x coordinate
y: ndarray
output name for y coordinate
"""
x_, y_ = project_aitoff(self[alpha], self[delta], radians=radians)
try:
self.add_column(x, x_)
except AttributeError:
self[x] = x_
try:
self.add_column(y, y_)
except AttributeError:
self[y] = y_
return self
def find_matching_parenthesis(string):
""" Find recursively groups of balanced parenthesis """
stack = 0
startIndex = None
results = []
for i, c in enumerate(string):
if c == '(':
if stack == 0:
startIndex = i + 1 # string to extract starts one index later
# push to stack
stack += 1
elif c == ')':
# pop stack
stack -= 1
if stack == 0:
results.append(string[startIndex:i])
rprime = [find_matching_parenthesis(rk) for rk in results if len(results)]
if len(results):
if len(rprime):
return results + rprime
else:
return results
def flatten(lst):
""" Flatten a nest list or nested sequence of values """
res = []
for k in lst:
if isinstance(k, (list, tuple)):
res.extend(flatten(k))
else:
if k is not None:
res.append(k)
return res
def healpix_plot(self, healpix_expression='healpix', healpix_level=8,
what='count(*)', grid=None,
healpix_input='equatorial', healpix_output='galactic',
norm=None, cmap='afmhot', grid_limits=None,
image_size=800, nest=True,
title='', smooth=None, colorbar=True,
rotation=(0, 0, 0), **kwargs):
""" Plot data from healpix configuration
what_label: str
colorbar label
cmap: str or cmap instance
colormap used by matplotlib
grid: ndarray
healpix grid of size nside2npix(2 ** level)
healpix_input: str
Specificy if the healpix index is in
"equatorial", "galactic" or "ecliptic".
healpix_output: str
Plot in "equatorial", "galactic" or "ecliptic".
grid_limits: tuple, optional
[minvalue, maxvalue] value that map to the colormap
(values below and above these are clipped to the the min/max).
image_size: int
size for the image that healpy uses for rendering
nest: boolean
If the healpix data is in nested (True) or ring (False)
title: str
Title of figure
smooth: float
apply gaussian smoothing, in degrees
rotation: tuple(3)
Rotate the plot, in format (lon, lat, psi)
such that (lon, lat) is the center,
and rotate on the screen by angle psi. All angles are degrees.
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
"""
from scipy.stats import binned_statistic
if grid is None:
try:
what_ = find_matching_parenthesis(what)[0]
except TypeError:
what_ = what
func = what.replace(what_, '')[:-2] # remove ()
if what_ in ('*', ):
value = self[healpix_expression]
else:
value = self[what_]
binned_statistic_ = dask_compatibility(binned_statistic)
bins = np.arange(nside2npix(2 ** healpix_level) + 1)
grid = binned_statistic_(self[healpix_expression],
value, bins=bins, statistic=func).statistic
return healpix_grid_plot(grid, what_label=what,
grid_limits=grid_limits,
healpix_input=healpix_input,
healpix_output=healpix_output,
image_size=image_size, nest=nest, norm=norm,
title=title, smooth=smooth,
cmap=cmap, rotation=rotation, **kwargs)
| {
"content_hash": "08a01d1e0c5966a6125189106d0ad3bc",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 79,
"avg_line_length": 30.34825870646766,
"alnum_prop": 0.5832786885245902,
"repo_name": "mfouesneau/ezdata",
"id": "97ed6fffdf6a6bf938986e7c78b8850a6212ba0a",
"size": "12200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ezdata/astro/astro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266436"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import argparse
import collections
import json
import os.path
import re
import sys
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
CHIP_SIZES = {
"esp32": {
"total_iram": 0x20000,
"total_irom": 0x330000,
"total_drom": 0x800000,
# total dram is determined from objdump output
}
}
def _json_dump(obj):
""" Pretty-print JSON object to stdout """
json.dump(obj, sys.stdout, indent=4)
print('\n')
def scan_to_header(f, header_line):
""" Scan forward in a file until you reach 'header_line', then return """
for line in f:
if line.strip() == header_line:
return
raise RuntimeError("Didn't find line '%s' in file" % header_line)
def load_map_data(map_file):
memory_config = load_memory_config(map_file)
sections = load_sections(map_file)
return memory_config, sections
def load_memory_config(map_file):
""" Memory Configuration section is the total size of each output section """
result = {}
scan_to_header(map_file, "Memory Configuration")
RE_MEMORY_SECTION = r"(?P<name>[^ ]+) +0x(?P<origin>[\da-f]+) +0x(?P<length>[\da-f]+)"
for line in map_file:
m = re.match(RE_MEMORY_SECTION, line)
if m is None:
if len(result) == 0:
continue # whitespace or a header, before the content we want
else:
return result # we're at the end of the Memory Configuration
section = {
"name": m.group("name"),
"origin": int(m.group("origin"), 16),
"length": int(m.group("length"), 16),
}
if section["name"] != "*default*":
result[section["name"]] = section
raise RuntimeError("End of file while scanning memory configuration?")
def load_sections(map_file):
""" Load section size information from the MAP file.
Returns a dict of 'sections', where each key is a section name and the value
is a dict with details about this section, including a "sources" key which holds a list of source file line
information for each symbol linked into the section.
"""
scan_to_header(map_file, "Linker script and memory map")
sections = {}
section = None
sym_backup = None
for line in map_file:
# output section header, ie '.iram0.text 0x0000000040080400 0x129a5'
RE_SECTION_HEADER = r"(?P<name>[^ ]+) +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+)$"
m = re.match(RE_SECTION_HEADER, line)
if m is not None: # start of a new section
section = {
"name": m.group("name"),
"address": int(m.group("address"), 16),
"size": int(m.group("size"), 16),
"sources": [],
}
sections[section["name"]] = section
continue
# source file line, ie
# 0x0000000040080400 0xa4 /home/gus/esp/32/idf/examples/get-started/hello_world/build/esp32/libesp32.a(cpu_start.o)
RE_SOURCE_LINE = r"\s*(?P<sym_name>\S*).* +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+) (?P<archive>.+\.a)\((?P<object_file>.+\.ob?j?)\)"
m = re.match(RE_SOURCE_LINE, line, re.M)
if not m:
# cmake build system links some object files directly, not part of any archive
RE_SOURCE_LINE = r"\s*(?P<sym_name>\S*).* +0x(?P<address>[\da-f]+) +0x(?P<size>[\da-f]+) (?P<object_file>.+\.ob?j?)"
m = re.match(RE_SOURCE_LINE, line)
if section is not None and m is not None: # input source file details=ma,e
sym_name = m.group("sym_name") if len(m.group("sym_name")) > 0 else sym_backup
try:
archive = m.group("archive")
except IndexError:
archive = "(exe)"
source = {
"size": int(m.group("size"), 16),
"address": int(m.group("address"), 16),
"archive": os.path.basename(archive),
"object_file": os.path.basename(m.group("object_file")),
"sym_name": sym_name,
}
source["file"] = "%s:%s" % (source["archive"], source["object_file"])
section["sources"] += [source]
# In some cases the section name appears on the previous line, back it up in here
RE_SYMBOL_ONLY_LINE = r"^ (?P<sym_name>\S*)$"
m = re.match(RE_SYMBOL_ONLY_LINE, line)
if section is not None and m is not None:
sym_backup = m.group("sym_name")
return sections
def sizes_by_key(sections, key):
""" Takes a dict of sections (from load_sections) and returns
a dict keyed by 'key' with aggregate output size information.
Key can be either "archive" (for per-archive data) or "file" (for per-file data) in the result.
"""
result = {}
for section in sections.values():
for s in section["sources"]:
if not s[key] in result:
result[s[key]] = {}
archive = result[s[key]]
if not section["name"] in archive:
archive[section["name"]] = 0
archive[section["name"]] += s["size"]
return result
def main():
parser = argparse.ArgumentParser("idf_size - a tool to print IDF elf file sizes")
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before objdump executable",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
'--json',
help="Output results as JSON",
action="store_true")
parser.add_argument(
'map_file', help='MAP file produced by linker',
type=argparse.FileType('r'))
parser.add_argument(
'--archives', help='Print per-archive sizes', action='store_true')
parser.add_argument(
'--archive_details', help='Print detailed symbols per archive')
parser.add_argument(
'--files', help='Print per-file sizes', action='store_true')
args = parser.parse_args()
memory_config, sections = load_map_data(args.map_file)
if not args.json or not (args.archives or args.files or args.archive_details):
print_summary(memory_config, sections, args.json)
if args.archives:
print_detailed_sizes(sections, "archive", "Archive File", args.json)
if args.files:
print_detailed_sizes(sections, "file", "Object File", args.json)
if args.archive_details:
print_archive_symbols(sections, args.archive_details, args.json)
def print_summary(memory_config, sections, as_json=False):
def get_size(section):
try:
return sections[section]["size"]
except KeyError:
return 0
# if linker script changes, these need to change
total_iram = memory_config["iram0_0_seg"]["length"]
total_dram = memory_config["dram0_0_seg"]["length"]
used_data = get_size(".dram0.data")
used_bss = get_size(".dram0.bss")
used_dram = used_data + used_bss
try:
used_dram_ratio = used_dram / total_dram
except ZeroDivisionError:
used_dram_ratio = float('nan')
used_iram = sum(get_size(s) for s in sections if s.startswith(".iram0"))
try:
used_iram_ratio = used_iram / total_iram
except ZeroDivisionError:
used_iram_ratio = float('nan')
flash_code = get_size(".flash.text")
flash_rodata = get_size(".flash.rodata")
total_size = used_data + used_iram + flash_code + flash_rodata
if as_json:
_json_dump(collections.OrderedDict([
("dram_data", used_data),
("dram_bss", used_bss),
("used_dram", used_dram),
("available_dram", total_dram - used_dram),
("used_dram_ratio", used_dram_ratio),
("used_iram", used_iram),
("available_iram", total_iram - used_iram),
("used_iram_ratio", used_iram_ratio),
("flash_code", flash_code),
("flash_rodata", flash_rodata),
("total_size", total_size)
]))
else:
print("Total sizes:")
print(" DRAM .data size: %7d bytes" % used_data)
print(" DRAM .bss size: %7d bytes" % used_bss)
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_dram, total_dram - used_dram, 100.0 * used_dram_ratio))
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_iram, total_iram - used_iram, 100.0 * used_iram_ratio))
print(" Flash code: %7d bytes" % flash_code)
print(" Flash rodata: %7d bytes" % flash_rodata)
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
def print_detailed_sizes(sections, key, header, as_json=False):
sizes = sizes_by_key(sections, key)
result = {}
for k in sizes:
v = sizes[k]
result[k] = collections.OrderedDict()
result[k]["data"] = v.get(".dram0.data", 0)
result[k]["bss"] = v.get(".dram0.bss", 0)
result[k]["iram"] = sum(t for (s,t) in v.items() if s.startswith(".iram0"))
result[k]["flash_text"] = v.get(".flash.text", 0)
result[k]["flash_rodata"] = v.get(".flash.rodata", 0)
result[k]["total"] = sum(result[k].values())
def return_total_size(elem):
val = elem[1]
return val["total"]
def return_header(elem):
return elem[0]
s = sorted(list(result.items()), key=return_header)
# do a secondary sort in order to have consistent order (for diff-ing the output)
s = sorted(s, key=return_total_size, reverse=True)
if as_json:
_json_dump(collections.OrderedDict(s))
else:
print("Per-%s contributions to ELF file:" % key)
headings = (header,
"DRAM .data",
"& .bss",
"IRAM",
"Flash code",
"& rodata",
"Total")
header_format = "%24s %10d %6d %6d %10d %8d %7d"
print(header_format.replace("d", "s") % headings)
for k,v in s:
if ":" in k: # print subheadings for key of format archive:file
sh,k = k.split(":")
print(header_format % (k[:24],
v["data"],
v["bss"],
v["iram"],
v["flash_text"],
v["flash_rodata"],
v["total"]))
def print_archive_symbols(sections, archive, as_json=False):
interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
result = {}
for t in interested_sections:
result[t] = {}
for section in sections.values():
section_name = section["name"]
if section_name not in interested_sections:
continue
for s in section["sources"]:
if archive != s["archive"]:
continue
s["sym_name"] = re.sub("(.text.|.literal.|.data.|.bss.|.rodata.)", "", s["sym_name"])
result[section_name][s["sym_name"]] = result[section_name].get(s["sym_name"], 0) + s["size"]
# build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
section_symbols = collections.OrderedDict()
for t in interested_sections:
s = sorted(list(result[t].items()), key=lambda k_v: k_v[0])
# do a secondary sort in order to have consistent order (for diff-ing the output)
s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
section_symbols[t] = collections.OrderedDict(s)
if as_json:
_json_dump(section_symbols)
else:
print("Symbols within the archive: %s (Not all symbols may be reported)" % (archive))
for t,s in section_symbols.items():
section_total = 0
print("\nSymbols from section:", t)
for key, val in s.items():
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
section_total += val
print("\nSection total:",section_total)
if __name__ == "__main__":
main()
| {
"content_hash": "2c2761b5d2dabafbd664dff966296b7d",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 146,
"avg_line_length": 37.9320987654321,
"alnum_prop": 0.5615947925142393,
"repo_name": "espressif/ESP8266_RTOS_SDK",
"id": "e89312dd448421194ede4160482831075886228f",
"size": "13192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/idf_size.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "48701"
},
{
"name": "Batchfile",
"bytes": "8453"
},
{
"name": "C",
"bytes": "16675480"
},
{
"name": "C++",
"bytes": "788226"
},
{
"name": "CMake",
"bytes": "197769"
},
{
"name": "Dockerfile",
"bytes": "2032"
},
{
"name": "Inno Setup",
"bytes": "3663"
},
{
"name": "Lex",
"bytes": "7800"
},
{
"name": "M4",
"bytes": "98046"
},
{
"name": "Makefile",
"bytes": "159517"
},
{
"name": "Pascal",
"bytes": "52308"
},
{
"name": "Perl",
"bytes": "15204"
},
{
"name": "Python",
"bytes": "1161765"
},
{
"name": "Shell",
"bytes": "100056"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "VBScript",
"bytes": "294"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
} |
import unittest
import lob
# Setting the API key
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
class BankAccountFunctions(unittest.TestCase):
def setUp(self):
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
self.addr = lob.Address.list(count=1).data[0]
def test_list_bankAccounts(self):
bankAccounts = lob.BankAccount.list()
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(bankAccounts.object, 'list')
def test_list_bankAccounts_limit(self):
bankAccounts = lob.BankAccount.list(count=2)
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(len(bankAccounts.data), 2)
def test_list_bankAccounts_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.list, count=1000)
def test_create_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.create)
def test_create_bankAccount(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address=self.addr.id,
account_address=self.addr.id,
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_lob_obj(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address=self.addr,
account_address=self.addr,
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_inline(self):
bankAccount = lob.BankAccount.create(
routing_number='122100024',
account_number='123456789',
bank_address= {
'name': 'Lob1',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
},
account_address= {
'name': 'Lob2',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
},
signatory='John Doe'
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEquals(bankAccount.bank_address.name, 'Lob1')
self.assertEquals(bankAccount.account_address.name, 'Lob2')
def test_retrieve_bankAccount(self):
bankAccount = lob.BankAccount.retrieve(id=lob.BankAccount.list().data[0].id)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
def test_retrieve_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.retrieve, id='test')
def test_delete_bankAccount(self):
ba = lob.BankAccount.list().data[0].id
delBa = lob.BankAccount.delete(id=ba)
self.assertEqual(ba, delBa.id)
| {
"content_hash": "ed2e7703df1894cd1bcebca40895d079",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 93,
"avg_line_length": 38.7816091954023,
"alnum_prop": 0.6253704801422644,
"repo_name": "ami/lob-python",
"id": "90de583a20623bee102c582c7e4dc9b01c0ca12f",
"size": "3374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bankaccount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39450"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
from .termui import get_terminal_size
from .parser import split_opt
from ._compat import term_len
def measure_table(rows):
widths = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(rows, col_count):
for row in rows:
row = tuple(row)
yield row + ('',) * (col_count - len(row))
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
return wrapper.fill(text)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == '\b':
p.append((indent or 0, True, '\n'.join(buf[1:])))
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return '\n\n'.join(rv)
class HelpFormatter(object):
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(self, indent_increment=2, width=None):
self.indent_increment = indent_increment
if width is None:
width = min(get_terminal_size()[0], 80) - 2
self.width = width
self.current_indent = 0
self.buffer = []
def write(self, string):
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self):
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self):
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
prefix = '%*s%s' % (self.current_indent, prefix, prog)
self.write(prefix)
text_width = max(self.width - self.current_indent - term_len(prefix), 10)
indent = ' ' * (term_len(prefix) + 1)
self.write(wrap_text(args, text_width,
initial_indent=' ',
subsequent_indent=indent))
self.write('\n')
def write_heading(self, heading):
"""Writes a heading into the buffer."""
self.write('%*s%s:\n' % (self.current_indent, '', heading))
def write_paragraph(self):
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write('\n')
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = ' ' * self.current_indent
self.write(wrap_text(text, text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True))
self.write('\n')
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = self.width - first_col - 2
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
@contextmanager
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self):
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self):
"""Returns the buffer contents."""
return ''.join(self.buffer)
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == '/':
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ', '.join(x[1] for x in rv)
return rv, any_prefix_is_slash
| {
"content_hash": "7d4a4eaf2253015429e744be1096632b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 81,
"avg_line_length": 34.11297071129707,
"alnum_prop": 0.5686250459953391,
"repo_name": "untitaker/click",
"id": "7b9fa7bc9fb8d6124f456fd9121750d2c710b38d",
"size": "8153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "click/formatting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "253928"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
"""
Compute zonal statistics for every feature in a vector datasource across every
band in a raster datasource.
"""
import pprint
import warnings
import click
import fiona as fio
from fiona.transform import transform_geom
import numpy as np
import rasterio as rio
from rasterio.features import rasterize
from shapely.geometry import asShape
warnings.filterwarnings('ignore')
def cb_bands(ctx, param, value):
"""
Click callback for parsing and validating `--bands`.
Parameters
----------
ctx : click.Context
Ignored.
param : click.Parameter
Ignored.
value : str
See the decorator for `--bands`.
Returns
-------
tuple
Band indexes to process.
"""
if value is None:
return value
else:
return sorted([int(i) for i in value.split(',')])
def zonal_stats_from_raster(vector, raster, bands=None, all_touched=False, custom=None):
"""
Compute zonal statistics for each input feature across all bands of an input
raster. God help ye who supply large non-block encoded rasters or large
polygons...
By default min, max, mean, standard deviation and sum are computed but the
user can also create their own functions to compute custom metrics across
the intersecting area for every feature and every band. Functions must
accept a 2D masked array extracted from a single band. Should probably
be changed to allow the user to compute statistics across all bands.
Use `custom={'my_metric': my_metric_func}` to call `my_metric_func` on the
intersecting pixels. A key named `my_metric` will be added alongside `min`,
`max`, etc. Metrics can also be disabled by doing `custom={'min': None}`
to turn off the call to `min`. The `min` key will still be included in the
output but will have a value of `None`.
While this function will work with any geometry type the input is intended
to be polygons. The goal of this function is to be able to take large
rasters and a large number of not too giant polygons and be pretty confident
that nothing is going to break. There are better methods for collecting
statistics if the goal is speed or by optimizing for each datatype. Point
layers will work but are not as efficient and should be used with rasterio's
`sample()` method, and an initial pass to index points against the raster's
blocks.
Further optimization could be performed to limit raster I/O for really
really large numbers of overlapping polygons but that is outside the
intended scope.
In order to handle raster larger than available memory and vector datasets
containing a large number of features, the minimum bounding box for each
feature's geometry is computed and all intersecting raster windows are read.
The inverse of the geometry is burned into this subset's mask yielding only
the values that intersect the feature. Metrics are then computed against
this masked array.
Example output:
The outer keys are feature ID's
{
'0': {
'bands': {
1: {
'max': 244,
'mean': 97.771298771710065,
'min': 15,
'std': 44.252917708519028,
'sum': 15689067
}
},
},
'1': {
'bands': {
1: {
'max': 240,
'mean': 102.17252754327959,
'min': 14,
'std': 43.650764099201055,
'sum': 26977532
}
},
}
}
Parameters
----------
vector : <fiona feature collection>
Vector datasource.
raster : <rasterio RasterReader>
Raster datasource.
window_band : int, optional
Specify which band should supply the read windows are extracted from.
Ideally the windows are identical across all bands.
custom : dict or None,
Supply custom functions as `{'name': func}`.
bands : int or list or None, optional
Bands to compute stats against. Default is all.
Returns
-------
dict
See 'Example output' above.
"""
if bands is None:
bands = list(range(1, raster.count + 1))
elif isinstance(bands, int):
bands = [bands]
else:
bands = sorted(bands)
metrics = {
'min': lambda x: x.min(),
'max': lambda x: x.max(),
'mean': lambda x: x.mean(),
'std': lambda x: x.std(),
'sum': lambda x: x.sum()
}
if custom is not None:
metrics.update(**custom)
# Make sure the user gave all callable objects or None
for name, func in metrics.items():
if func is not None and not hasattr(func, '__call__'):
raise click.ClickException(
"Custom function `%s' is not callable: %s" % (name, func))
r_x_min, r_y_min, r_x_max, r_y_max = raster.bounds
feature_stats = {}
for feature in vector:
"""
rasterize(
shapes,
out_shape=None,
fill=0,
out=None,
output=None,
transform=Affine(1.0, 0.0, 0.0, 0.0, 1.0, 0.0),
all_touched=False,
default_value=1,
dtype=None
)
"""
stats = {'bands': {}}
reproj_geom = asShape(transform_geom(
vector.crs, raster.crs, feature['geometry'], antimeridian_cutting=True))
x_min, y_min, x_max, y_max = reproj_geom.bounds
if (r_x_min <= x_min <= x_max <= r_x_max) and (r_y_min <= y_min <= y_max <= r_y_max):
stats['contained'] = True
else:
stats['contained'] = False
col_min, row_max = ~raster.affine * (x_min, y_min)
col_max, row_min = ~raster.affine * (x_max, y_max)
window = ((row_min, row_max), (col_min, col_max))
rasterized = rasterize(
shapes=[reproj_geom],
out_shape=(row_max - row_min, col_max - col_min),
fill=1,
transform=raster.window_transform(window),
all_touched=all_touched,
default_value=0,
dtype=rio.ubyte
).astype(np.bool)
for bidx in bands:
stats['bands'][bidx] = {}
data = raster.read(indexes=bidx, window=window, boundless=True, masked=True)
# This should be a masked array, but a bug requires us to build our own:
# https://github.com/mapbox/rasterio/issues/338
if not isinstance(data, np.ma.MaskedArray):
data = np.ma.array(data, mask=data == raster.nodata)
data.mask += rasterized
for name, func in metrics.items():
if func is not None:
stats['bands'][bidx][name] = func(data)
feature_stats[feature['id']] = stats
return feature_stats
@click.command()
@click.argument('raster')
@click.argument('vector')
@click.option(
'-b', '--bands', callback=cb_bands,
help="Bands to process as `1` or `1,2,3`."
)
@click.option(
'-a', '--all-touched', is_flag=True,
help="Enable 'all-touched' rasterization."
)
@click.option(
'-n', '--no-pretty-print', is_flag=True,
help="Print stats JSON on one line."
)
@click.option(
'--indent', type=click.INT, default=0,
help="Pretty print indent."
)
def main(raster, vector, bands, all_touched, no_pretty_print, indent):
"""
Get raster stats for every feature in a vector datasource.
\b
Only compute against the first two bands:
\b
$ zonal-statistics.py sample-data/NAIP.tif \\
sample-data/polygon-samples.geojson -b 1,2
\b
"""
with fio.drivers(), rio.drivers():
with rio.open(raster) as src_r, fio.open(vector) as src_v:
if not bands:
bands = list(range(1, src_r.count + 1))
results = zonal_stats_from_raster(
src_v, src_r, bands=bands, all_touched=all_touched)
if not no_pretty_print:
results = pprint.pformat(results, indent=indent)
click.echo(results)
if __name__ == '__main__':
main()
| {
"content_hash": "6633174b3083d8849f4351f00e62fb3b",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 93,
"avg_line_length": 30.525547445255473,
"alnum_prop": 0.580703012912482,
"repo_name": "geowurster/Geoprocessing-Examples",
"id": "933aecfa0387f6ad3889c92bc7915486128e6529",
"size": "8388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zonal-statistics.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45923"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2018_01_22.aio.IotHubClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists all of the available IoT Hub REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2018_01_22.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-01-22")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationListResult]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Devices/operations"} # type: ignore
| {
"content_hash": "93b2508cc6ba971523af5b719f476552",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 117,
"avg_line_length": 44.19642857142857,
"alnum_prop": 0.6484848484848484,
"repo_name": "Azure/azure-sdk-for-python",
"id": "eda95b8d8884dba910517bcdba7e2ef7534dca85",
"size": "5450",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2018_01_22/aio/operations/_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from .annotation_payload import (
AnnotationPayload,
)
from .annotation_spec import (
AnnotationSpec,
)
from .classification import (
ClassificationAnnotation,
ClassificationEvaluationMetrics,
VideoClassificationAnnotation,
ClassificationType,
)
from .column_spec import (
ColumnSpec,
)
from .data_items import (
Document,
DocumentDimensions,
ExamplePayload,
Image,
Row,
TextSnippet,
)
from .data_stats import (
ArrayStats,
CategoryStats,
CorrelationStats,
DataStats,
Float64Stats,
StringStats,
StructStats,
TimestampStats,
)
from .data_types import (
DataType,
StructType,
TypeCode,
)
from .dataset import (
Dataset,
)
from .detection import (
BoundingBoxMetricsEntry,
ImageObjectDetectionAnnotation,
ImageObjectDetectionEvaluationMetrics,
VideoObjectTrackingAnnotation,
VideoObjectTrackingEvaluationMetrics,
)
from .geometry import (
BoundingPoly,
NormalizedVertex,
)
from .image import (
ImageClassificationDatasetMetadata,
ImageClassificationModelDeploymentMetadata,
ImageClassificationModelMetadata,
ImageObjectDetectionDatasetMetadata,
ImageObjectDetectionModelDeploymentMetadata,
ImageObjectDetectionModelMetadata,
)
from .io import (
BatchPredictInputConfig,
BatchPredictOutputConfig,
BigQueryDestination,
BigQuerySource,
DocumentInputConfig,
ExportEvaluatedExamplesOutputConfig,
GcrDestination,
GcsDestination,
GcsSource,
InputConfig,
ModelExportOutputConfig,
OutputConfig,
)
from .model import (
Model,
)
from .model_evaluation import (
ModelEvaluation,
)
from .operations import (
BatchPredictOperationMetadata,
CreateModelOperationMetadata,
DeleteOperationMetadata,
DeployModelOperationMetadata,
ExportDataOperationMetadata,
ExportEvaluatedExamplesOperationMetadata,
ExportModelOperationMetadata,
ImportDataOperationMetadata,
OperationMetadata,
UndeployModelOperationMetadata,
)
from .prediction_service import (
BatchPredictRequest,
BatchPredictResult,
PredictRequest,
PredictResponse,
)
from .ranges import (
DoubleRange,
)
from .regression import (
RegressionEvaluationMetrics,
)
from .service import (
CreateDatasetRequest,
CreateModelRequest,
DeleteDatasetRequest,
DeleteModelRequest,
DeployModelRequest,
ExportDataRequest,
ExportEvaluatedExamplesRequest,
ExportModelRequest,
GetAnnotationSpecRequest,
GetColumnSpecRequest,
GetDatasetRequest,
GetModelEvaluationRequest,
GetModelRequest,
GetTableSpecRequest,
ImportDataRequest,
ListColumnSpecsRequest,
ListColumnSpecsResponse,
ListDatasetsRequest,
ListDatasetsResponse,
ListModelEvaluationsRequest,
ListModelEvaluationsResponse,
ListModelsRequest,
ListModelsResponse,
ListTableSpecsRequest,
ListTableSpecsResponse,
UndeployModelRequest,
UpdateColumnSpecRequest,
UpdateDatasetRequest,
UpdateTableSpecRequest,
)
from .table_spec import (
TableSpec,
)
from .tables import (
TablesAnnotation,
TablesDatasetMetadata,
TablesModelColumnInfo,
TablesModelMetadata,
)
from .temporal import (
TimeSegment,
)
from .text import (
TextClassificationDatasetMetadata,
TextClassificationModelMetadata,
TextExtractionDatasetMetadata,
TextExtractionModelMetadata,
TextSentimentDatasetMetadata,
TextSentimentModelMetadata,
)
from .text_extraction import (
TextExtractionAnnotation,
TextExtractionEvaluationMetrics,
)
from .text_segment import (
TextSegment,
)
from .text_sentiment import (
TextSentimentAnnotation,
TextSentimentEvaluationMetrics,
)
from .translation import (
TranslationAnnotation,
TranslationDatasetMetadata,
TranslationEvaluationMetrics,
TranslationModelMetadata,
)
from .video import (
VideoClassificationDatasetMetadata,
VideoClassificationModelMetadata,
VideoObjectTrackingDatasetMetadata,
VideoObjectTrackingModelMetadata,
)
__all__ = (
"AnnotationPayload",
"AnnotationSpec",
"ClassificationAnnotation",
"ClassificationEvaluationMetrics",
"VideoClassificationAnnotation",
"ClassificationType",
"ColumnSpec",
"Document",
"DocumentDimensions",
"ExamplePayload",
"Image",
"Row",
"TextSnippet",
"ArrayStats",
"CategoryStats",
"CorrelationStats",
"DataStats",
"Float64Stats",
"StringStats",
"StructStats",
"TimestampStats",
"DataType",
"StructType",
"TypeCode",
"Dataset",
"BoundingBoxMetricsEntry",
"ImageObjectDetectionAnnotation",
"ImageObjectDetectionEvaluationMetrics",
"VideoObjectTrackingAnnotation",
"VideoObjectTrackingEvaluationMetrics",
"BoundingPoly",
"NormalizedVertex",
"ImageClassificationDatasetMetadata",
"ImageClassificationModelDeploymentMetadata",
"ImageClassificationModelMetadata",
"ImageObjectDetectionDatasetMetadata",
"ImageObjectDetectionModelDeploymentMetadata",
"ImageObjectDetectionModelMetadata",
"BatchPredictInputConfig",
"BatchPredictOutputConfig",
"BigQueryDestination",
"BigQuerySource",
"DocumentInputConfig",
"ExportEvaluatedExamplesOutputConfig",
"GcrDestination",
"GcsDestination",
"GcsSource",
"InputConfig",
"ModelExportOutputConfig",
"OutputConfig",
"Model",
"ModelEvaluation",
"BatchPredictOperationMetadata",
"CreateModelOperationMetadata",
"DeleteOperationMetadata",
"DeployModelOperationMetadata",
"ExportDataOperationMetadata",
"ExportEvaluatedExamplesOperationMetadata",
"ExportModelOperationMetadata",
"ImportDataOperationMetadata",
"OperationMetadata",
"UndeployModelOperationMetadata",
"BatchPredictRequest",
"BatchPredictResult",
"PredictRequest",
"PredictResponse",
"DoubleRange",
"RegressionEvaluationMetrics",
"CreateDatasetRequest",
"CreateModelRequest",
"DeleteDatasetRequest",
"DeleteModelRequest",
"DeployModelRequest",
"ExportDataRequest",
"ExportEvaluatedExamplesRequest",
"ExportModelRequest",
"GetAnnotationSpecRequest",
"GetColumnSpecRequest",
"GetDatasetRequest",
"GetModelEvaluationRequest",
"GetModelRequest",
"GetTableSpecRequest",
"ImportDataRequest",
"ListColumnSpecsRequest",
"ListColumnSpecsResponse",
"ListDatasetsRequest",
"ListDatasetsResponse",
"ListModelEvaluationsRequest",
"ListModelEvaluationsResponse",
"ListModelsRequest",
"ListModelsResponse",
"ListTableSpecsRequest",
"ListTableSpecsResponse",
"UndeployModelRequest",
"UpdateColumnSpecRequest",
"UpdateDatasetRequest",
"UpdateTableSpecRequest",
"TableSpec",
"TablesAnnotation",
"TablesDatasetMetadata",
"TablesModelColumnInfo",
"TablesModelMetadata",
"TimeSegment",
"TextClassificationDatasetMetadata",
"TextClassificationModelMetadata",
"TextExtractionDatasetMetadata",
"TextExtractionModelMetadata",
"TextSentimentDatasetMetadata",
"TextSentimentModelMetadata",
"TextExtractionAnnotation",
"TextExtractionEvaluationMetrics",
"TextSegment",
"TextSentimentAnnotation",
"TextSentimentEvaluationMetrics",
"TranslationAnnotation",
"TranslationDatasetMetadata",
"TranslationEvaluationMetrics",
"TranslationModelMetadata",
"VideoClassificationDatasetMetadata",
"VideoClassificationModelMetadata",
"VideoObjectTrackingDatasetMetadata",
"VideoObjectTrackingModelMetadata",
)
| {
"content_hash": "e03f31f55ffb898cdeca3cd10144845d",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 50,
"avg_line_length": 25.224422442244226,
"alnum_prop": 0.7440795499149548,
"repo_name": "googleapis/python-automl",
"id": "d7edffcb250265166df1dba0f7d500cdb59af199",
"size": "8243",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/automl_v1beta1/types/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2347989"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
"""Mock tests
Unit tests for the Mocks.
"""
from __future__ import absolute_import
__author__ = "jcgregorio@google.com (Joe Gregorio)"
import os
import unittest
import httplib2
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError, UnexpectedBodyError, UnexpectedMethodError
from googleapiclient.http import HttpMock, RequestMockBuilder
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def datafile(filename):
return os.path.join(DATA_DIR, filename)
class Mocks(unittest.TestCase):
def setUp(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
self.zoo_http = HttpMock(datafile("zoo.json"), {"status": "200"})
def test_default_response(self):
requestBuilder = RequestMockBuilder({})
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = plus.activities().get(activityId="tag:blah").execute()
self.assertEqual({}, activity)
def test_simple_response(self):
requestBuilder = RequestMockBuilder(
{"plus.activities.get": (None, '{"foo": "bar"}')}
)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = plus.activities().get(activityId="tag:blah").execute()
self.assertEqual({"foo": "bar"}, activity)
def test_unexpected_call(self):
requestBuilder = RequestMockBuilder({}, check_unexpected=True)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
plus.activities().get(activityId="tag:blah").execute()
self.fail("UnexpectedMethodError should have been raised")
except UnexpectedMethodError:
pass
def test_simple_unexpected_body(self):
requestBuilder = RequestMockBuilder(
{"zoo.animals.insert": (None, '{"data": {"foo": "bar"}}', None)}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body="{}").execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def test_simple_expected_body(self):
requestBuilder = RequestMockBuilder(
{"zoo.animals.insert": (None, '{"data": {"foo": "bar"}}', "{}")}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body="").execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def test_simple_wrong_body(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
'{"data": {"foo": "bar"}}',
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
zoo.animals().insert(body='{"data": {"foo": "blah"}}').execute()
self.fail("UnexpectedBodyError should have been raised")
except UnexpectedBodyError:
pass
def test_simple_matching_str_body(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
'{"data": {"foo": "bar"}}',
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = zoo.animals().insert(body={"data": {"foo": "bar"}}).execute()
self.assertEqual({"foo": "bar"}, activity)
def test_simple_matching_dict_body(self):
requestBuilder = RequestMockBuilder(
{
"zoo.animals.insert": (
None,
'{"data": {"foo": "bar"}}',
{"data": {"foo": "bar"}},
)
}
)
zoo = build(
"zoo",
"v1",
http=self.zoo_http,
requestBuilder=requestBuilder,
static_discovery=False,
)
activity = zoo.animals().insert(body={"data": {"foo": "bar"}}).execute()
self.assertEqual({"foo": "bar"}, activity)
def test_errors(self):
errorResponse = httplib2.Response({"status": 500, "reason": "Server Error"})
requestBuilder = RequestMockBuilder(
{"plus.activities.list": (errorResponse, b"{}")}
)
plus = build(
"plus",
"v1",
http=self.http,
requestBuilder=requestBuilder,
static_discovery=False,
)
try:
activity = (
plus.activities().list(collection="public", userId="me").execute()
)
self.fail("An exception should have been thrown")
except HttpError as e:
self.assertEqual(b"{}", e.content)
self.assertEqual(500, e.resp.status)
self.assertEqual("Server Error", e.resp.reason)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c13c4c6c8a58757f832888e833a0be03",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 88,
"avg_line_length": 29.323383084577113,
"alnum_prop": 0.5130641330166271,
"repo_name": "googleapis/google-api-python-client",
"id": "1f542b594c4323104f9d7f3e43b0eda95ddc49d8",
"size": "6516",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_mocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1276"
},
{
"name": "Python",
"bytes": "482401"
},
{
"name": "Shell",
"bytes": "32576"
}
],
"symlink_target": ""
} |
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte-core/gitian.sigs.git'])
if not os.path.isdir('digibyte-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte-core/digibyte-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('digibyte'):
subprocess.check_call(['git', 'clone', 'https://github.com/digibyte-core/digibyte.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('digibyte-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://github.com/DigiByte-Core/osslsigncode-1.7.1.tar.gz-1/raw/master/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://raw.githubusercontent.com/DigiByte-Core/osslsigncode-1.7.1.tar.gz-1/master/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(['make', '-C', '../digibyte/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/digibyte-*.tar.gz build/out/src/digibyte-*.tar.gz ../digibyte-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/digibyte-*-win-unsigned.tar.gz inputs/digibyte-win-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/digibyte-*.zip build/out/digibyte-*.exe ../digibyte-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'digibyte='+args.commit, '--url', 'digibyte='+args.url, '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/digibyte-*-osx-unsigned.tar.gz inputs/digibyte-osx-unsigned.tar.gz', shell=True)
subprocess.check_call('mv build/out/digibyte-*.tar.gz build/out/digibyte-*.dmg ../digibyte-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/digibyte-*win64-setup.exe ../digibyte-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/digibyte-*win32-setup.exe ../digibyte-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/digibyte-osx-signed.dmg ../digibyte-binaries/'+args.version+'/digibyte-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../digibyte/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../digibyte/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../digibyte/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../digibyte/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../digibyte/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/digibyte/digibyte', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set enviroment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
args.commit = ('' if args.commit else 'v') + args.version
print(args.commit)
if args.setup:
setup()
os.chdir('digibyte')
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| {
"content_hash": "c9529e49e3556f583968cdad1aeba007",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 232,
"avg_line_length": 57.93119266055046,
"alnum_prop": 0.6428062396072531,
"repo_name": "digibyte/digibyte",
"id": "d188da11728730b8c159683b31822d71592adac8",
"size": "12653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/gitian-build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1383691"
},
{
"name": "C++",
"bytes": "6176996"
},
{
"name": "CSS",
"bytes": "126479"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30298"
},
{
"name": "M4",
"bytes": "196094"
},
{
"name": "Makefile",
"bytes": "118253"
},
{
"name": "Objective-C",
"bytes": "6742"
},
{
"name": "Objective-C++",
"bytes": "6587"
},
{
"name": "Python",
"bytes": "1705099"
},
{
"name": "QMake",
"bytes": "757"
},
{
"name": "Shell",
"bytes": "98920"
}
],
"symlink_target": ""
} |
def noop(*args):
pass
def dumpmesg():
print "Hi!"
# Try canceltimer with a handle, expects failure
def tryjunk(handle):
try:
canceltimer(handle)
except:
pass
else:
print "Expected canceltimer to fail with handle:",handle
if callfunc == "initialize":
# Try to set 2 timers
handle1 = settimer(20,dumpmesg,())
handle2 = settimer(20,dumpmesg,())
# We should be able to use cancel timer with these handles
val1 = canceltimer(handle1)
val2 = canceltimer(handle2)
if not (val1 and val2):
print "Canceltimer unexpectedly failed. Handle 1 stop:",val1,"Handle 2 stop:",val2
# Try some junk handles
tryjunk(1)
tryjunk(5L)
tryjunk(5.0)
tryjunk("Hi!")
tryjunk(None)
tryjunk(True)
# Get a handle from recvmess
waith = recvmess(getmyip(),<messport>,noop)
# Try canceling this
tryjunk(waith)
stopcomm(waith)
# Try stopping the handles we already stopped
val2 = canceltimer(handle1)
val3 = canceltimer(handle2)
if val2 or val3:
print "Was able to stop an already stopped event! Handle 1 stop:",val3,"Handle 2 stop:",val4
# Test pseudo-valid handles
# This is an invalid argument according to the namespace layer.
#if canceltimer("_EVENT:JUNKHANDLE"):
# print "A junk pseudo-handle was able to stop an event!"
| {
"content_hash": "1fc7038f5df3000a518186d764a3ef7f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 96,
"avg_line_length": 23.472727272727273,
"alnum_prop": 0.6955848179705655,
"repo_name": "sburnett/seattle",
"id": "94ab3434107aae69a86cd9e5fd90b7b84a90cbce",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repy/tests/ut_repytests_testeventhandles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85039"
},
{
"name": "CSS",
"bytes": "44140"
},
{
"name": "Java",
"bytes": "178864"
},
{
"name": "JavaScript",
"bytes": "791008"
},
{
"name": "Perl",
"bytes": "36791"
},
{
"name": "Python",
"bytes": "4683648"
},
{
"name": "Scala",
"bytes": "2587"
},
{
"name": "Shell",
"bytes": "87609"
}
],
"symlink_target": ""
} |
__author__ = 'Amin'
class MouseButton:
def __init__(self):
self.previous_x = 0
self.previous_y = 0
self.is_pressed = False
def update_previous_location(self, x, y):
self.previous_x = x
self.previous_y = y
def get_previous_location_x(self):
return self.previous_x
def get_previous_location_y(self):
return self.previous_y
def callback_pressed(self, x, y):
self.update_previous_location(x, y)
self.is_pressed = True
def callback_released(self, x, y):
self.is_pressed = False
def callback_moved(self, x, y):
if self.is_pressed:
self.update_previous_location(x, y) | {
"content_hash": "d3b597b8a5583f999e1e8bd17e1c5c10",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 47,
"avg_line_length": 24,
"alnum_prop": 0.5905172413793104,
"repo_name": "Michal-Fularz/database_marking_tool",
"id": "8f0f9682e167e9d4322aa1b4523a9fcc334e0384",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MouseButton.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21346"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
from dnc.controller import BaseController
"""
A 2-Layers feedforward neural network with 128, 256 nodes respectively
"""
class FeedforwardController(BaseController):
def network_vars(self):
initial_std = lambda in_nodes: np.min(1e-2, np.sqrt(2.0 / in_nodes))
input_ = self.nn_input_size
self.W1 = tf.Variable(tf.truncated_normal([input_, 128], stddev=initial_std(input_)), name='layer1_W')
self.W2 = tf.Variable(tf.truncated_normal([128, 256], stddev=initial_std(128)), name='layer2_W')
self.b1 = tf.Variable(tf.zeros([128]), name='layer1_b')
self.b2 = tf.Variable(tf.zeros([256]), name='layer2_b')
def network_op(self, X):
l1_output = tf.matmul(X, self.W1) + self.b1
l1_activation = tf.nn.relu(l1_output)
l2_output = tf.matmul(l1_activation, self.W2) + self.b2
l2_activation = tf.nn.relu(l2_output)
return l2_activation
def initials(self):
initial_std = lambda in_nodes: np.min(1e-2, np.sqrt(2.0 / in_nodes))
# defining internal weights of the controller
self.interface_weights = tf.Variable(
tf.truncated_normal([self.nn_output_size, self.interface_vector_size], stddev=initial_std(self.nn_output_size)),
name='interface_weights'
)
self.nn_output_weights = tf.Variable(
tf.truncated_normal([self.nn_output_size, self.output_size], stddev=initial_std(self.nn_output_size)),
name='nn_output_weights'
)
self.mem_output_weights = tf.Variable(
tf.truncated_normal([self.word_size * self.read_heads, self.output_size], stddev=initial_std(self.word_size * self.read_heads)),
name='mem_output_weights'
)
| {
"content_hash": "1b5befb19f1941ee76e4c6632c3c4e37",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 141,
"avg_line_length": 38.891304347826086,
"alnum_prop": 0.6383454443823365,
"repo_name": "Mostafa-Samir/DNC-tensorflow",
"id": "608daadc4ae54ad8330b8ee2338377bc48f7e3bf",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/copy/feedforward_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "225669"
},
{
"name": "Python",
"bytes": "103063"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
"""`get_route` returns a Manhattan route between two ports.
`get_route` only works for an individual routes. For routing groups of ports you need to use `get_bundle` instead
To make a route, you need to supply:
- input port
- output port
- bend
- straight
- taper to taper to wider straights and reduce straight loss (Optional)
To generate a straight route:
1. Generate the backbone of the route.
This is a list of manhattan coordinates that the route would pass through
if it used only sharp bends (right angles)
2. Replace the corners by bend references
(with rotation and position computed from the manhattan backbone)
3. Add tapers if needed and if space permits
4. generate straight portions in between tapers or bends
A `Route` is a dataclass with:
- references: list of references for tapers, bends and straight waveguides
- ports: a dict of port name to Port, usually two ports "input" and "output"
- length: a float with the length of the route
"""
from functools import partial
from typing import Callable, Optional, Union
import numpy as np
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.taper import taper as taper_function
from gdsfactory.components.via_corner import via_corner
from gdsfactory.components.wire import wire_corner
from gdsfactory.cross_section import metal2, metal3
from gdsfactory.port import Port
from gdsfactory.routing.manhattan import round_corners, route_manhattan
from gdsfactory.types import (
ComponentSpec,
Coordinates,
CrossSectionSpec,
MultiCrossSectionAngleSpec,
Route,
)
def get_route(
input_port: Port,
output_port: Port,
bend: ComponentSpec = bend_euler,
with_sbend: bool = True,
straight: ComponentSpec = straight_function,
taper: Optional[ComponentSpec] = None,
start_straight_length: float = 0.01,
end_straight_length: float = 0.01,
min_straight_length: float = 0.01,
cross_section: Union[CrossSectionSpec, MultiCrossSectionAngleSpec] = "strip",
**kwargs,
) -> Route:
"""Returns a Manhattan Route between 2 ports.
The references are straights, bends and tapers.
`get_route` is an automatic version of `get_route_from_steps`.
Args:
input_port: start port.
output_port: end port.
bend: bend spec.
with_sbend: add sbend in case there are routing errors.
straight: straight spec.
taper: taper spec.
start_straight_length: length of starting straight.
end_straight_length: length of end straight.
min_straight_length: min length of straight for any intermediate segment.
cross_section: spec.
kwargs: cross_section settings.
.. plot::
:include-source:
import gdsfactory as gf
c = gf.Component('sample_connect')
mmi1 = c << gf.components.mmi1x2()
mmi2 = c << gf.components.mmi1x2()
mmi2.move((40, 20))
route = gf.routing.get_route(mmi1.ports["o2"], mmi2.ports["o1"], radius=5)
c.add(route.references)
c.plot()
"""
bend90 = (
bend
if isinstance(bend, Component)
else gf.get_component(bend, cross_section=cross_section, **kwargs)
)
if taper:
if isinstance(cross_section, list):
raise ValueError(
"Tapers not implemented for routes made from multiple cross_sections."
)
x = gf.get_cross_section(cross_section, **kwargs)
taper_length = x.taper_length
width1 = input_port.width
auto_widen = x.auto_widen
width2 = x.width_wide if auto_widen else width1
taper = gf.get_component(
taper,
length=taper_length,
width1=input_port.width,
width2=width2,
cross_section=cross_section,
**kwargs,
)
return route_manhattan(
input_port=input_port,
output_port=output_port,
straight=straight,
taper=taper,
start_straight_length=start_straight_length,
end_straight_length=end_straight_length,
min_straight_length=min_straight_length,
bend=bend90,
with_sbend=with_sbend,
cross_section=cross_section,
**kwargs,
)
get_route_electrical = partial(
get_route,
bend=wire_corner,
start_straight_length=10,
end_straight_length=10,
cross_section="metal_routing",
taper=None,
min_straight_length=2.0,
)
get_route_electrical_m2 = partial(
get_route,
bend=wire_corner,
start_straight_length=25,
end_straight_length=25,
cross_section=metal2,
taper=None,
min_straight_length=2.0,
)
get_route_electrical_multilayer = partial(
get_route_electrical,
bend=via_corner,
cross_section=[(metal2, (0, 180)), (metal3, (90, 270))],
)
def get_route_from_waypoints(
waypoints: Coordinates,
bend: Callable = bend_euler,
straight: Callable = straight_function,
taper: Optional[Callable] = taper_function,
cross_section: CrossSectionSpec = "strip",
**kwargs,
) -> Route:
"""Returns a route formed by the given waypoints with bends instead of \
corners and optionally tapers in straight sections. Tapering to wider \
straights reduces the optical loss. `get_route_from_waypoints` is a manual \
version of `get_route` `get_route_from_steps` is a more concise and \
convenient version of `get_route_from_waypoints` also available in \
gf.routing.
Args:
waypoints: Coordinates that define the route
bend: function that returns bends
straight: function that returns straight waveguides
taper: function that returns tapers
cross_section:
kwargs: cross_section settings
.. plot::
:include-source:
import gdsfactory as gf
c = gf.Component("waypoints_sample")
w = gf.components.straight()
left = c << w
right = c << w
right.move((100, 80))
obstacle = gf.components.rectangle(size=(100, 10))
obstacle1 = c << obstacle
obstacle2 = c << obstacle
obstacle1.ymin = 40
obstacle2.xmin = 25
p0x, p0y = left.ports["o2"].center
p1x, p1y = right.ports["o2"].center
o = 10 # vertical offset to overcome bottom obstacle
ytop = 20
routes = gf.routing.get_route_from_waypoints(
[
(p0x, p0y),
(p0x + o, p0y),
(p0x + o, ytop),
(p1x + o, ytop),
(p1x + o, p1y),
(p1x, p1y),
],
)
c.add(routes.references)
c.plot()
"""
if isinstance(cross_section, list):
taper = None
elif taper:
x = gf.get_cross_section(cross_section, **kwargs)
auto_widen = x.auto_widen
width1 = x.width
width2 = x.width_wide if auto_widen else width1
taper_length = x.taper_length
if auto_widen:
taper = (
taper(
length=taper_length,
width1=width1,
width2=width2,
cross_section=cross_section,
**kwargs,
)
if callable(taper)
else taper
)
else:
taper = None
waypoints = np.array(waypoints)
kwargs.pop("route_filter", "")
return round_corners(
points=waypoints,
bend=bend,
straight=straight,
taper=taper,
cross_section=cross_section,
**kwargs,
)
get_route_from_waypoints_electrical = gf.partial(
get_route_from_waypoints, bend=wire_corner, cross_section="metal_routing"
)
get_route_from_waypoints_electrical_m2 = gf.partial(
get_route_from_waypoints, bend=wire_corner, cross_section=metal2
)
get_route_from_waypoints_electrical_multilayer = gf.partial(
get_route_from_waypoints,
bend=via_corner,
cross_section=[(metal2, (0, 180)), (metal3, (90, 270))],
)
if __name__ == "__main__":
# w = gf.components.mmi1x2()
# c = gf.Component()
# c << w
# route = get_route(w.ports["o2"], w.ports["o1"], layer=(2, 0), width=2)
# cc = c.add(route.references)
# cc.show(show_ports=True)
c = gf.Component("multi-layer")
ptop = c << gf.components.pad_array()
pbot = c << gf.components.pad_array(orientation=90)
ptop.movex(300)
ptop.movey(300)
route = get_route_electrical_multilayer(
ptop.ports["e11"],
pbot.ports["e11"],
end_straight_length=100,
)
c.add(route.references)
c.show()
# import gdsfactory as gf
# c = gf.Component("sample_connect")
# mmi1 = c << gf.components.mmi1x2()
# mmi2 = c << gf.components.mmi1x2()
# mmi2.move((200, 50))
# route = gf.routing.get_route(
# mmi1.ports["o3"],
# mmi2.ports["o1"],
# cross_section=gf.cross_section.strip,
# auto_widen=True,
# width_wide=2,
# auto_widen_minimum_length=100,
# )
# c.add(route.references)
# c.show()
| {
"content_hash": "74db1770f016df5e7e3952df7fd2c7f9",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 113,
"avg_line_length": 28.567901234567902,
"alnum_prop": 0.6211106309420916,
"repo_name": "gdsfactory/gdsfactory",
"id": "a1084253f5565940d2e30d294452ecaf483f6c56",
"size": "9256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/routing/get_route.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
} |
from os import environ as env
import datetime
import pytz
# dev mode?
DEBUG = env.get("FLASK_ENV", "development") != "production"
TEST = env.get("FLASK_ENV") == "test"
DEBUG_CACHE = env.get("FLASK_DEBUG_CACHE", "false") == "true"
RUN_PERIODIC_TASKS = env.get("RUN_PERIODIC_TASKS") == "true"
WTF_CSRF_ENABLED = False if TEST else True
SECRET_KEY = env.get("FLASK_SECRET_KEY", "NSTHNSTHaoensutCGSRCGnsthoesucgsrSNTH")
GOOGLE_ANALYTICS_ID = "UA-10305579-1"
SQLALCHEMY_DATABASE_URI = env.get(
"SQLALCHEMY_DATABASE_URI",
"postgresql+psycopg2://pmg:pmg@localhost/pmg_test?client_encoding=utf8"
if TEST
else "postgresql+psycopg2://pmg:pmg@localhost/pmg?client_encoding=utf8",
)
SQLALCHEMY_ECHO = False
# This is required only be pmg.models.resources.delete_file_from_s3 and can de turned off if
# that is changed to use sqlalchemy events
SQLALCHEMY_TRACK_MODIFICATIONS = True
RESULTS_PER_PAGE = 50
# The V2 API can support much higher volumes because callers
# can choose the fields they want
RESULTS_PER_PAGE_V2 = 500
# Premium content before this date is free
# Calculated as 1 of last year
PREMIUM_FREE_BEFORE = datetime.datetime(
datetime.datetime.today().year - 1, 1, 1, tzinfo=pytz.utc
)
ES_SERVER = env.get("ES_SERVER", "http://localhost:9200")
SEARCH_REINDEX_CHANGES = not DEBUG # reindex changes to models
SEARCH_RESULTS_PER_PAGE = 20
SOUNDCLOUD_APP_KEY_ID = env.get("SOUNDCLOUD_APP_KEY_ID", "")
SOUNDCLOUD_APP_KEY_SECRET = env.get("SOUNDCLOUD_APP_KEY_SECRET", "")
SOUNDCLOUD_USERNAME = env.get("SOUNDCLOUD_USERNAME", "")
SOUNDCLOUD_PASSWORD = env.get("SOUNDCLOUD_PASSWORD", "")
MAX_SOUNDCLOUD_BATCH = int(env.get("MAX_SOUNDCLOUD_BATCH", "1"))
MAX_SOUNDCLOUD_RETRIES = int(env.get("MAX_SOUNDCLOUD_RETRIES", "3"))
SOUNDCLOUD_PERIOD_HOURS = env.get("SOUNDCLOUD_PERIOD_HOURS", "6")
S3_BUCKET = env.get("S3_BUCKET", "pmg-assets")
STATIC_HOST = env.get(
"STATIC_HOST", "http://%s.s3-website-eu-west-1.amazonaws.com/" % S3_BUCKET
)
UPLOAD_PATH = "/tmp/pmg_upload/"
if DEBUG:
RECAPTCHA_PUBLIC_KEY = env.get(
"RECAPTCHA_PUBLIC_KEY", "6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI"
)
RECAPTCHA_PRIVATE_KEY = env.get(
"RECAPTCHA_PRIVATE_KEY", "6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe"
)
else:
RECAPTCHA_PUBLIC_KEY = env.get("RECAPTCHA_PUBLIC_KEY")
RECAPTCHA_PRIVATE_KEY = env.get("RECAPTCHA_PRIVATE_KEY")
# must match client_max_body_size in nginx.conf
MAX_CONTENT_LENGTH = 100 * 1024 * 1024 # size cap on uploads
# uploadable files
ALLOWED_EXTENSIONS = set(
[
"doc",
"docx",
"gif",
"jpg",
"jpeg",
"mp3",
"pdf",
"png",
"ppt",
"pptx",
"rtf",
"txt",
"wav",
"xls",
"xlsx",
]
)
# Sendgrid
SENDGRID_API_KEY = env.get("SENDGRID_API_KEY")
SENDGRID_TRANSACTIONAL_TEMPLATE_ID = "2ef9656f-db37-4072-9ed8-449368b73617"
# Flask-Mail
MAIL_SERVER = env.get("MAIL_SERVER", "smtp.sendgrid.com")
MAIL_PORT = int(env.get("MAIL_PORT", "465"))
MAIL_USE_SSL = env.get("MAIL_USE_SSL", "true") == "true"
MAIL_USERNAME = env.get("MAIL_USERNAME", "apikey")
MAIL_PASSWORD = env.get("MAIL_PASSWORD", SENDGRID_API_KEY)
MAIL_DEFAULT_SENDER = '"PMG Subscriptions" <subscribe@pmg.org.za>'
# Flask-Security config
SECURITY_URL_PREFIX = "/user"
SECURITY_PASSWORD_HASH = "pbkdf2_sha512"
SECURITY_PASSWORD_SALT = env.get(
"SECURITY_PASSWORD_SALT", "ioaefroijaAMELRK#$(aerieuh984akef#$graerj"
)
SECURITY_EMAIL_SENDER = MAIL_DEFAULT_SENDER
SECURITY_TOKEN_AUTHENTICATION_HEADER = "Authentication-Token"
# Flask-Security URLs, overridden because they don't put a / at the end
SECURITY_LOGIN_URL = "/login/"
SECURITY_LOGOUT_URL = "/logout/"
SECURITY_CHANGE_URL = "/change-password/"
SECURITY_RESET_URL = (
"/forgot-password" # Trailing slash here was causing double slash in URLs in emails
)
SECURITY_REGISTER_URL = "/register/"
# Flask-Security email subject lines
SECURITY_EMAIL_SUBJECT_REGISTER = (
"Please confirm your email address to complete PMG signup"
)
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET = (
"Password reset instructions for your PMG account"
)
SECURITY_EMAIL_SUBJECT_CONFIRM = "Email address confirmation for your PMG account"
# Flask-Security features
SECURITY_CONFIRMABLE = True
SECURITY_LOGIN_WITHOUT_CONFIRMATION = True
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_TRACKABLE = True
SECURITY_CHANGEABLE = True
SERVER_NAME = env.get("SERVER_NAME", "pmg.test:5000")
FRONTEND_HOST = env.get("FRONTEND_HOST", "http://pmg.test:5000/")
SESSION_COOKIE_DOMAIN = env.get("SESSION_COOKIE_DOMAIN", "pmg.test")
| {
"content_hash": "0ffba6420d811916e08b61fb7c3fe41b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 92,
"avg_line_length": 32.29370629370629,
"alnum_prop": 0.7039844088349935,
"repo_name": "Code4SA/pmg-cms-2",
"id": "1c7c4ae194435af3d87ce462e7e34c71496793b3",
"size": "4618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "204619"
},
{
"name": "HTML",
"bytes": "361071"
},
{
"name": "JavaScript",
"bytes": "109536"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "469838"
}
],
"symlink_target": ""
} |
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'ironic_inventory.api.controllers.root.RootController',
'modules': ['ironic_inventory'],
'static_root': '%(confdir)s/../../public',
'template_path': '%(confdir)s/../templates',
'debug': True,
'errors': {
'404': '/error/404',
'__force_dict__': True
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
| {
"content_hash": "f37bb5feda9cf83fb54bfc1693dff4b0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 22.75,
"alnum_prop": 0.5860805860805861,
"repo_name": "softlayer/ironic-inventory-integrator",
"id": "b0ed831f32972767271a96e07d5ed2bece2f420a",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic_inventory/tests/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "HTML",
"bytes": "1590"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "36729"
}
],
"symlink_target": ""
} |
"""createNewDatabase.py: Creates new database in the R2DM service."""
__author__ = "Filip Lemic"
__copyright__ = "Copyright 2015, EVARILOS Project"
__version__ = "1.0.0"
__maintainer__ = "Filip Lemic"
__email__ = "lemic@tkn.tu-berlin.de"
__status__ = "Development"
import sys
import urllib2
import json
# The URL where server listens
apiURL = 'http://localhost:5000/'
# Database name
db_id = 'test_db'
req = urllib2.Request(apiURL + 'evarilos/raw_data/v1.0/database', headers={"Content-Type": "application/json"}, data = db_id)
resp = urllib2.urlopen(req)
response = json.loads(resp.read())
print response | {
"content_hash": "c39ae78c0a335d188bd87ae43037aa8e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 125,
"avg_line_length": 25.541666666666668,
"alnum_prop": 0.6949429037520392,
"repo_name": "evarilos/R2DM-EVARILOS",
"id": "26703e509eb067e3267ac2f4eb1e1c4f12f8fa4e",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/createNewDatabase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Protocol Buffer",
"bytes": "4063"
},
{
"name": "Python",
"bytes": "44509"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class cookie_app_Config(AppConfig):##change the verbose name of cookie_app
name = 'cookie_app'
verbose_name = "Business Database" | {
"content_hash": "4fa712c74170e586b20a0dae1f66549e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 74,
"avg_line_length": 34.4,
"alnum_prop": 0.75,
"repo_name": "nathanielbecker/business-contacter-django-app",
"id": "db6499f740adfffe1076f1e4848055de94ce3376",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/cookie_app/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12312"
},
{
"name": "Python",
"bytes": "118081"
},
{
"name": "Shell",
"bytes": "7046"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import zipfile
import torch
####################################################################################################
def recursive_print(name, val, spaces=0):
# Format the message.
if name is None:
msg = None
else:
fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
msg = fmt.format(name)
# Print and recurse (if needed).
if isinstance(val, dict):
if msg is not None:
print(msg)
for k in val.keys():
recursive_print(k, val[k], spaces + 2)
elif isinstance(val, torch.Tensor):
print(msg, ":", val.size())
else:
print(msg, ":", val)
####################################################################################################
def convert_megatron_checkpoint(args, input_state_dict):
# The converted output model.
output_state_dict = {}
# The model.
model = input_state_dict["model"]
# The language model.
lm = model["language_model"]
# The embeddings.
embeddings = lm["embedding"]
# The word embeddings.
word_embeddings = embeddings["word_embeddings"]["weight"]
# Store the word embeddings.
output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings
# The position embeddings.
pos_embeddings = embeddings["position_embeddings"]["weight"]
# Trained for 512 x 1024.
assert pos_embeddings.size(0) == 512 and pos_embeddings.size(1) == 1024
# Store the position embeddings.
output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings
# The token-type embeddings.
tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"]
# Store the position embeddings.
output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings
# The transformer.
transformer = lm["transformer"]
# The regex to extract layer names.
layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
# The simple map of names for "automated" rules.
megatron_to_transformers = {
"attention.dense": ".attention.output.dense.",
"mlp.dense_h_to_4h": ".intermediate.dense.",
"mlp.dense_4h_to_h": ".output.dense.",
}
# Keep track of the attention/query/value tensor.
attention_qkv_weight = None
# Extract the layers.
for key, val in transformer.items():
# Match the name.
m = layer_re.match(key)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
layer_idx = int(m.group(1))
# The name of the operation.
op_name = m.group(2)
# Is it a weight or a bias?
weight_or_bias = m.group(3)
# The name of the layer.
layer_name = f"bert.encoder.layer.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm"):
ln_name = "attention.ln" if op_name.startswith("input") else "ln"
output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
# Transpose the QKV matrix.
elif op_name == "attention.query_key_value" and weight_or_bias == "weight":
# Make sure the QKV pointer is nil.
assert attention_qkv_weight is None, ""
# Store the tensor as we need the bias as well to interleave QKV and biases.
attention_qkv_weight = val
# Transpose the bias.
elif op_name == "attention.query_key_value" and weight_or_bias == "bias":
# Make sure we read the weight tensor.
assert attention_qkv_weight is not None, ""
# Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved.
q = attention_qkv_weight[0 * 1024 : 1 * 1024, :]
k = attention_qkv_weight[1 * 1024 : 2 * 1024, :]
v = attention_qkv_weight[2 * 1024 : 3 * 1024, :]
# Split the bias.
q_bias = val[0 * 1024 : 1 * 1024]
k_bias = val[1 * 1024 : 2 * 1024]
v_bias = val[2 * 1024 : 3 * 1024]
# Store.
output_state_dict[f"{layer_name}.attention.self.query.weight"] = q
output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias
output_state_dict[f"{layer_name}.attention.self.key.weight"] = k
output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias
output_state_dict[f"{layer_name}.attention.self.value.weight"] = v
output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias
# Clear the stored tensor.
attention_qkv_weight = None
# Copy weights and biases as is.
elif weight_or_bias in ["weight", "bias"]:
out_name = megatron_to_transformers[op_name]
output_state_dict[layer_name + out_name + weight_or_bias] = val
# The final layernorm.
output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"]
output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"]
# The config.
output_config = {
"vocab_size": word_embeddings.size(0),
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
"hidden_act": "gelu_new",
"intermediate_size": 4096,
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 512,
"type_vocab_size": 2,
"initializer_range": 0.2,
"layer_norm_eps": 1e-12,
"gradient_checkpointing": False,
"position_embedding_type": "absolute",
"use_cache": False,
}
# The pooler.
pooler = lm["pooler"]
# Store the matrix and the bias.
output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"]
output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"]
# The LM head from Megatron (for RACE).
lm_head = model["lm_head"]
# The transform matrix.
output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"]
output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"]
# The transform LN.
output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"]
output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"]
# For the decoder, we replicate the weights.
output_state_dict["cls.predictions.decoder.weight"] = word_embeddings
output_state_dict["cls.predictions.bias"] = lm_head["bias"]
# The classifier from Megatron (for MLNI).
binary_head = model["binary_head"]
# Store the classifier.
output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"]
output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"]
# It should be done!
return output_state_dict, output_config
####################################################################################################
def main():
# Create the argument parser.
parser = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure", action="store_true")
parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint")
args = parser.parse_args()
# Extract the basename.
basename = os.path.dirname(args.path_to_checkpoint)
# Load the model.
print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"')
with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
input_state_dict = torch.load(pytorch_dict, map_location="cpu")
# Convert.
print("Converting")
output_state_dict, output_config = convert_megatron_checkpoint(args, input_state_dict)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(None, output_state_dict)
# Store the config to file.
output_config_file = os.path.join(basename, "config.json")
print(f'Saving config to "{output_config_file}"')
with open(output_config_file, "w") as f:
json.dump(output_config, f)
# Store the state_dict to file.
output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
print(f'Saving checkpoint to "{output_checkpoint_file}"')
torch.save(output_state_dict, output_checkpoint_file)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| {
"content_hash": "9b4acca39c576a7ffa98da2626d945fa",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 110,
"avg_line_length": 35.72874493927125,
"alnum_prop": 0.5858356940509915,
"repo_name": "huggingface/pytorch-transformers",
"id": "3d7f03dcbb767c5ecb53c6dfa00f5b6e9c063757",
"size": "9641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
import asyncio
import logging
log = logging.getLogger(__name__)
class Coworker(object):
""" Generic worker to perform concurrent tasks using coroutine IO loop. """
def __init__(self, max_concurrency=10, sliding_window=True):
"""
Initialize worker
:param int max_concurrency: How many tasks can be done at the same time. Defaults to 10.
:param bool sliding_window: Start a task as soon as there is an available slot based on concurrency instead of
waiting for all concurrent tasks to be completed first.
"""
#: Queue for tasks to be performed
self.task_queue = asyncio.Queue()
#: Futures for each task: task => {'result': future, 'do': future}
self.task_futures = {}
#: Exit the worker when idle (all tasks are done / no more tasks queued)
self.exit_when_idle = False
#: Current number of concurrent tasks being performed
self.concurrency = 0
#: Maxium number of concurrent tasks to perform
self.max_concurrency = max_concurrency
#: A future to indicate when new tasks need to be checked / can be worked on.
self._check_task_future = asyncio.Future()
#: Is this worker finished?
self.is_finished = False
#: Start a task as soon as there is an available slot based on concurrency instead of waiting until
#: all concurrent tasks are done.
self.sliding_window = sliding_window
#: Log debug. By having this distinct flag allows for debugging when needed without polluting upstream
#: debug logs.
self.debug = False
@property
def idle(self):
""" Worker has nothing to do and is doing nothing """
return self.task_queue.empty() and not self.task_futures
@property
def should_exit(self):
return self.idle and self.exit_when_idle
async def start(self, tasks=None):
"""
Start the worker.
:param list tasks: List of tasks to do. If provided, worker will exit immediately after all tasks
are done. If that's not desired, use :meth:`self.add_task` instead.
:return: List of futures for each task in the same order.
"""
self.is_finished = False
task_futures = []
if tasks:
task_futures = self.add_tasks(tasks)
self.exit_when_idle = True
self._log('on_start')
await self.on_start()
while not self.should_exit:
self._log('_do_tasks', '(Check for new tasks and start them)')
await self._do_tasks()
self._log('_wait_for_tasks', '(Wait for new tasks or existing tasks)')
await self._wait_for_tasks()
self._log('on_finish')
await self.on_finish()
self.is_finished = True
return task_futures
def _check_tasks(self):
"""" Signal that there are new tasks or tasks were completed. """
if not self._check_task_future.done():
self._check_task_future.set_result(True)
async def _wait_for_tasks(self):
"""" Wait for new tasks or tasks to be completed. """
await self._check_task_future
self._check_task_future = asyncio.Future()
@property
def available_slots(self):
""" Number of available slots to do tasks based on concurrency and window settings """
if self.sliding_window:
return self.max_concurrency - self.concurrency
else:
return self.max_concurrency if not self.concurrency else 0
async def _do_tasks(self):
""" Perform the tasks based on available concurrency slots """
for _ in range(self.available_slots):
try:
task = self.task_queue.get_nowait()
except asyncio.QueueEmpty:
break
if task in self.task_futures: # If it is not in here, it was cancelled. See :meth:`self.cancel_task`
task_future = self._do_task(task)
self.task_futures[task]['do'] = asyncio.ensure_future(task_future)
async def _do_task(self, task):
""" Perform the task and call :meth:`self.on_start_task` and :meth:`self.on_finish_task` """
self.concurrency += 1
try:
try:
self._log('on_start_task', task)
await self.on_start_task(task)
self._log('do_task', task)
result = await self.do_task(task)
self._log('on_finish_task', task)
await self.on_finish_task(task, result)
except Exception as e:
self.task_futures[task]['result'].set_exception(e)
else:
self.task_futures[task]['result'].set_result(result)
del self.task_futures[task]
except KeyError: # Task was cancelled / removed from self.task_futures by :meth:`self.cancel_task`
pass
self._check_tasks()
self.concurrency -= 1
async def do_task(self, task):
""" Perform the task. Sub-class should override this to do something more meaningful. """
print('Performing task', task)
def add_tasks(self, tasks):
"""
Add task(s) to queue
:param object|list tasks: A single or list of task(s) to add to the queue.
:return: If a single task is given, then returns a single task future that will contain result from
:meth:`self.do_task`. If a list of tasks is given, then a list of task futures, one for each task.
Note that if hash(task) is the same as another/existing task,
the same future will be returned, and the task is only performed once.
If it is desired to perform the same task multiple times / distinctly, then the task
will need to be wrapped in another object that has a unique hash.
"""
if not tasks:
raise ValueError('Please provide tasks to add')
is_list = isinstance(tasks, list)
tasks = tasks if is_list else [tasks]
task_futures = []
for task in tasks:
if task in self.task_futures:
task_futures.append(self.task_futures[task]['result'])
else:
task_future = asyncio.Future()
task_futures.append(task_future)
self.task_futures[task] = {} # Do not use defaultdict as that creates the task key from other places.
self.task_futures[task]['result'] = task_future
self.task_queue.put_nowait(task)
self._check_tasks()
if is_list:
return task_futures
else:
return task_future
async def stop(self):
""" Stop the worker by canceling all tasks and then wait for worker to finish. """
for task in self.task_futures:
self.cancel_task(task)
self.exit_when_idle = True
self._check_tasks()
while not self.is_finished:
await asyncio.sleep(0.1)
def cancel_task(self, task):
""" Cancel a task """
if task in self.task_futures:
for task_future in self.task_futures[task].values():
task_future.cancel()
# No way to remove task from self.task_queue so using existence in self.task_futures to indicate cancel.
del self.task_futures[task]
async def on_start(self):
""" Invoked before worker starts. Subclass should override if needed. """
async def on_finish(self):
""" Invoked after worker completes all tasks before exiting worker. Subclass should override if needed. """
async def on_start_task(self, task):
"""
Invoked before starting the task. Subclass should override if needed.
:param task: Task that will start
"""
async def on_finish_task(self, task, result):
""""
Invoked after the task is completed. Subclass should override if needed.
:param task: Task that was finished
:param result: Return value from :meth:`self.do_task(task)`
"""
def _log(self, action, detail=None):
"""" Log action with optional detail."""
if self.debug:
log.debug('%s %s %s', self.__class__.__name__, action, '' if detail is None else detail)
| {
"content_hash": "2763ae2969a7f78f4ee1fb1e49145cc9",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 118,
"avg_line_length": 34.78512396694215,
"alnum_prop": 0.5927773818009028,
"repo_name": "maxzheng/coworker",
"id": "04faf042068807e5a025b2161a408ec2ce8fd7cb",
"size": "8418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coworker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15935"
}
],
"symlink_target": ""
} |
from juju.client.facade import Type, ReturnMapping
from juju.client.old_clients._definitions import *
class UniterFacade(Type):
name = 'Uniter'
version = 18
schema = {'definitions': {'APIHostPortsResult': {'additionalProperties': False,
'properties': {'servers': {'items': {'items': {'$ref': '#/definitions/HostPort'},
'type': 'array'},
'type': 'array'}},
'required': ['servers'],
'type': 'object'},
'Action': {'additionalProperties': False,
'properties': {'name': {'type': 'string'},
'parameters': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'receiver': {'type': 'string'},
'tag': {'type': 'string'}},
'required': ['tag', 'receiver', 'name'],
'type': 'object'},
'ActionExecutionResult': {'additionalProperties': False,
'properties': {'action-tag': {'type': 'string'},
'message': {'type': 'string'},
'results': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'status': {'type': 'string'}},
'required': ['action-tag', 'status'],
'type': 'object'},
'ActionExecutionResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ActionExecutionResult'},
'type': 'array'}},
'type': 'object'},
'ActionMessage': {'additionalProperties': False,
'properties': {'message': {'type': 'string'},
'timestamp': {'format': 'date-time',
'type': 'string'}},
'required': ['timestamp', 'message'],
'type': 'object'},
'ActionMessageParams': {'additionalProperties': False,
'properties': {'messages': {'items': {'$ref': '#/definitions/EntityString'},
'type': 'array'}},
'required': ['messages'],
'type': 'object'},
'ActionResult': {'additionalProperties': False,
'properties': {'action': {'$ref': '#/definitions/Action'},
'completed': {'format': 'date-time',
'type': 'string'},
'enqueued': {'format': 'date-time',
'type': 'string'},
'error': {'$ref': '#/definitions/Error'},
'log': {'items': {'$ref': '#/definitions/ActionMessage'},
'type': 'array'},
'message': {'type': 'string'},
'output': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'started': {'format': 'date-time',
'type': 'string'},
'status': {'type': 'string'}},
'type': 'object'},
'ActionResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ActionResult'},
'type': 'array'}},
'type': 'object'},
'Address': {'additionalProperties': False,
'properties': {'cidr': {'type': 'string'},
'config-type': {'type': 'string'},
'is-secondary': {'type': 'boolean'},
'scope': {'type': 'string'},
'space-id': {'type': 'string'},
'space-name': {'type': 'string'},
'type': {'type': 'string'},
'value': {'type': 'string'}},
'required': ['value', 'type', 'scope'],
'type': 'object'},
'ApplicationStatusResult': {'additionalProperties': False,
'properties': {'application': {'$ref': '#/definitions/StatusResult'},
'error': {'$ref': '#/definitions/Error'},
'units': {'patternProperties': {'.*': {'$ref': '#/definitions/StatusResult'}},
'type': 'object'}},
'required': ['application',
'units'],
'type': 'object'},
'ApplicationStatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ApplicationStatusResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'BoolResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'boolean'}},
'required': ['result'],
'type': 'object'},
'BoolResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/BoolResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'CharmRelation': {'additionalProperties': False,
'properties': {'interface': {'type': 'string'},
'limit': {'type': 'integer'},
'name': {'type': 'string'},
'optional': {'type': 'boolean'},
'role': {'type': 'string'},
'scope': {'type': 'string'}},
'required': ['name',
'role',
'interface',
'optional',
'limit',
'scope'],
'type': 'object'},
'CharmURL': {'additionalProperties': False,
'properties': {'url': {'type': 'string'}},
'required': ['url'],
'type': 'object'},
'CharmURLs': {'additionalProperties': False,
'properties': {'urls': {'items': {'$ref': '#/definitions/CharmURL'},
'type': 'array'}},
'required': ['urls'],
'type': 'object'},
'CloudCredential': {'additionalProperties': False,
'properties': {'attrs': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'auth-type': {'type': 'string'},
'redacted': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['auth-type'],
'type': 'object'},
'CloudSpec': {'additionalProperties': False,
'properties': {'cacertificates': {'items': {'type': 'string'},
'type': 'array'},
'credential': {'$ref': '#/definitions/CloudCredential'},
'endpoint': {'type': 'string'},
'identity-endpoint': {'type': 'string'},
'is-controller-cloud': {'type': 'boolean'},
'name': {'type': 'string'},
'region': {'type': 'string'},
'skip-tls-verify': {'type': 'boolean'},
'storage-endpoint': {'type': 'string'},
'type': {'type': 'string'}},
'required': ['type', 'name'],
'type': 'object'},
'CloudSpecResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/CloudSpec'}},
'type': 'object'},
'CommitHookChangesArg': {'additionalProperties': False,
'properties': {'add-storage': {'items': {'$ref': '#/definitions/StorageAddParams'},
'type': 'array'},
'close-ports': {'items': {'$ref': '#/definitions/EntityPortRange'},
'type': 'array'},
'open-ports': {'items': {'$ref': '#/definitions/EntityPortRange'},
'type': 'array'},
'pod-spec': {'$ref': '#/definitions/PodSpec'},
'relation-unit-settings': {'items': {'$ref': '#/definitions/RelationUnitSettings'},
'type': 'array'},
'set-raw-k8s-spec': {'$ref': '#/definitions/PodSpec'},
'tag': {'type': 'string'},
'unit-state': {'$ref': '#/definitions/SetUnitStateArg'},
'update-network-info': {'type': 'boolean'}},
'required': ['tag',
'update-network-info'],
'type': 'object'},
'CommitHookChangesArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/CommitHookChangesArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'ConfigSettingsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'settings': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'}},
'required': ['settings'],
'type': 'object'},
'ConfigSettingsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ConfigSettingsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'Endpoint': {'additionalProperties': False,
'properties': {'application-name': {'type': 'string'},
'relation': {'$ref': '#/definitions/CharmRelation'}},
'required': ['application-name', 'relation'],
'type': 'object'},
'Entities': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/Entity'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'EntitiesCharmURL': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/EntityCharmURL'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'EntitiesPortRanges': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/EntityPortRange'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'Entity': {'additionalProperties': False,
'properties': {'tag': {'type': 'string'}},
'required': ['tag'],
'type': 'object'},
'EntityCharmURL': {'additionalProperties': False,
'properties': {'charm-url': {'type': 'string'},
'tag': {'type': 'string'}},
'required': ['tag', 'charm-url'],
'type': 'object'},
'EntityPortRange': {'additionalProperties': False,
'properties': {'endpoint': {'type': 'string'},
'from-port': {'type': 'integer'},
'protocol': {'type': 'string'},
'tag': {'type': 'string'},
'to-port': {'type': 'integer'}},
'required': ['tag',
'protocol',
'from-port',
'to-port',
'endpoint'],
'type': 'object'},
'EntityStatusArgs': {'additionalProperties': False,
'properties': {'data': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'info': {'type': 'string'},
'status': {'type': 'string'},
'tag': {'type': 'string'}},
'required': ['tag',
'status',
'info',
'data'],
'type': 'object'},
'EntityString': {'additionalProperties': False,
'properties': {'tag': {'type': 'string'},
'value': {'type': 'string'}},
'required': ['tag', 'value'],
'type': 'object'},
'EntityWorkloadVersion': {'additionalProperties': False,
'properties': {'tag': {'type': 'string'},
'workload-version': {'type': 'string'}},
'required': ['tag',
'workload-version'],
'type': 'object'},
'EntityWorkloadVersions': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/EntityWorkloadVersion'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'Error': {'additionalProperties': False,
'properties': {'code': {'type': 'string'},
'info': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'message': {'type': 'string'}},
'required': ['message', 'code'],
'type': 'object'},
'ErrorResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'}},
'type': 'object'},
'ErrorResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ErrorResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'GetLeadershipSettingsBulkResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/GetLeadershipSettingsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'GetLeadershipSettingsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'}},
'required': ['settings'],
'type': 'object'},
'GoalState': {'additionalProperties': False,
'properties': {'relations': {'patternProperties': {'.*': {'patternProperties': {'.*': {'$ref': '#/definitions/GoalStateStatus'}},
'type': 'object'}},
'type': 'object'},
'units': {'patternProperties': {'.*': {'$ref': '#/definitions/GoalStateStatus'}},
'type': 'object'}},
'required': ['units', 'relations'],
'type': 'object'},
'GoalStateResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/GoalState'}},
'required': ['result', 'error'],
'type': 'object'},
'GoalStateResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/GoalStateResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'GoalStateStatus': {'additionalProperties': False,
'properties': {'since': {'format': 'date-time',
'type': 'string'},
'status': {'type': 'string'}},
'required': ['status', 'since'],
'type': 'object'},
'HostPort': {'additionalProperties': False,
'properties': {'Address': {'$ref': '#/definitions/Address'},
'cidr': {'type': 'string'},
'config-type': {'type': 'string'},
'is-secondary': {'type': 'boolean'},
'port': {'type': 'integer'},
'scope': {'type': 'string'},
'space-id': {'type': 'string'},
'space-name': {'type': 'string'},
'type': {'type': 'string'},
'value': {'type': 'string'}},
'required': ['value',
'type',
'scope',
'Address',
'port'],
'type': 'object'},
'IntResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'integer'}},
'required': ['result'],
'type': 'object'},
'IntResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/IntResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'InterfaceAddress': {'additionalProperties': False,
'properties': {'cidr': {'type': 'string'},
'hostname': {'type': 'string'},
'value': {'type': 'string'}},
'required': ['hostname', 'value', 'cidr'],
'type': 'object'},
'LifeResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'life': {'type': 'string'}},
'required': ['life'],
'type': 'object'},
'LifeResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/LifeResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'MachinePortRange': {'additionalProperties': False,
'properties': {'port-range': {'$ref': '#/definitions/PortRange'},
'relation-tag': {'type': 'string'},
'unit-tag': {'type': 'string'}},
'required': ['unit-tag',
'relation-tag',
'port-range'],
'type': 'object'},
'MachinePortsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'ports': {'items': {'$ref': '#/definitions/MachinePortRange'},
'type': 'array'}},
'required': ['ports'],
'type': 'object'},
'MachinePortsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/MachinePortsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'MergeLeadershipSettingsBulkParams': {'additionalProperties': False,
'properties': {'params': {'items': {'$ref': '#/definitions/MergeLeadershipSettingsParam'},
'type': 'array'}},
'required': ['params'],
'type': 'object'},
'MergeLeadershipSettingsParam': {'additionalProperties': False,
'properties': {'application-tag': {'type': 'string'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'unit-tag': {'type': 'string'}},
'required': ['settings'],
'type': 'object'},
'MeterStatusResult': {'additionalProperties': False,
'properties': {'code': {'type': 'string'},
'error': {'$ref': '#/definitions/Error'},
'info': {'type': 'string'}},
'required': ['code', 'info'],
'type': 'object'},
'MeterStatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/MeterStatusResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'Metric': {'additionalProperties': False,
'properties': {'key': {'type': 'string'},
'labels': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'time': {'format': 'date-time',
'type': 'string'},
'value': {'type': 'string'}},
'required': ['key', 'value', 'time'],
'type': 'object'},
'MetricBatch': {'additionalProperties': False,
'properties': {'charm-url': {'type': 'string'},
'created': {'format': 'date-time',
'type': 'string'},
'metrics': {'items': {'$ref': '#/definitions/Metric'},
'type': 'array'},
'uuid': {'type': 'string'}},
'required': ['uuid',
'charm-url',
'created',
'metrics'],
'type': 'object'},
'MetricBatchParam': {'additionalProperties': False,
'properties': {'batch': {'$ref': '#/definitions/MetricBatch'},
'tag': {'type': 'string'}},
'required': ['tag', 'batch'],
'type': 'object'},
'MetricBatchParams': {'additionalProperties': False,
'properties': {'batches': {'items': {'$ref': '#/definitions/MetricBatchParam'},
'type': 'array'}},
'required': ['batches'],
'type': 'object'},
'ModelConfigResult': {'additionalProperties': False,
'properties': {'config': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'}},
'required': ['config'],
'type': 'object'},
'ModelResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'name': {'type': 'string'},
'type': {'type': 'string'},
'uuid': {'type': 'string'}},
'required': ['name', 'uuid', 'type'],
'type': 'object'},
'NetworkInfo': {'additionalProperties': False,
'properties': {'addresses': {'items': {'$ref': '#/definitions/InterfaceAddress'},
'type': 'array'},
'interface-name': {'type': 'string'},
'mac-address': {'type': 'string'}},
'required': ['mac-address',
'interface-name',
'addresses'],
'type': 'object'},
'NetworkInfoParams': {'additionalProperties': False,
'properties': {'bindings': {'items': {'type': 'string'},
'type': 'array'},
'relation-id': {'type': 'integer'},
'unit': {'type': 'string'}},
'required': ['unit', 'bindings'],
'type': 'object'},
'NetworkInfoResult': {'additionalProperties': False,
'properties': {'bind-addresses': {'items': {'$ref': '#/definitions/NetworkInfo'},
'type': 'array'},
'egress-subnets': {'items': {'type': 'string'},
'type': 'array'},
'error': {'$ref': '#/definitions/Error'},
'ingress-addresses': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'},
'NetworkInfoResults': {'additionalProperties': False,
'properties': {'results': {'patternProperties': {'.*': {'$ref': '#/definitions/NetworkInfoResult'}},
'type': 'object'}},
'required': ['results'],
'type': 'object'},
'NotifyWatchResult': {'additionalProperties': False,
'properties': {'NotifyWatcherId': {'type': 'string'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['NotifyWatcherId'],
'type': 'object'},
'NotifyWatchResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/NotifyWatchResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'OpenMachinePortRangesByEndpointResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'unit-port-ranges': {'patternProperties': {'.*': {'items': {'$ref': '#/definitions/OpenUnitPortRangesByEndpoint'},
'type': 'array'}},
'type': 'object'}},
'required': ['unit-port-ranges'],
'type': 'object'},
'OpenMachinePortRangesByEndpointResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/OpenMachinePortRangesByEndpointResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'OpenUnitPortRangesByEndpoint': {'additionalProperties': False,
'properties': {'endpoint': {'type': 'string'},
'port-ranges': {'items': {'$ref': '#/definitions/PortRange'},
'type': 'array'}},
'required': ['endpoint',
'port-ranges'],
'type': 'object'},
'PodSpec': {'additionalProperties': False,
'properties': {'spec': {'type': 'string'},
'tag': {'type': 'string'}},
'required': ['tag'],
'type': 'object'},
'PortRange': {'additionalProperties': False,
'properties': {'from-port': {'type': 'integer'},
'protocol': {'type': 'string'},
'to-port': {'type': 'integer'}},
'required': ['from-port', 'to-port', 'protocol'],
'type': 'object'},
'RelationIds': {'additionalProperties': False,
'properties': {'relation-ids': {'items': {'type': 'integer'},
'type': 'array'}},
'required': ['relation-ids'],
'type': 'object'},
'RelationResult': {'additionalProperties': False,
'properties': {'bool': {'type': 'boolean'},
'endpoint': {'$ref': '#/definitions/Endpoint'},
'error': {'$ref': '#/definitions/Error'},
'id': {'type': 'integer'},
'key': {'type': 'string'},
'life': {'type': 'string'},
'other-application': {'type': 'string'}},
'required': ['life',
'id',
'key',
'endpoint'],
'type': 'object'},
'RelationResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/RelationResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'RelationStatusArg': {'additionalProperties': False,
'properties': {'message': {'type': 'string'},
'relation-id': {'type': 'integer'},
'status': {'type': 'string'},
'unit-tag': {'type': 'string'}},
'required': ['unit-tag',
'relation-id',
'status',
'message'],
'type': 'object'},
'RelationStatusArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/RelationStatusArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'RelationUnit': {'additionalProperties': False,
'properties': {'relation': {'type': 'string'},
'unit': {'type': 'string'}},
'required': ['relation', 'unit'],
'type': 'object'},
'RelationUnitPair': {'additionalProperties': False,
'properties': {'local-unit': {'type': 'string'},
'relation': {'type': 'string'},
'remote-unit': {'type': 'string'}},
'required': ['relation',
'local-unit',
'remote-unit'],
'type': 'object'},
'RelationUnitPairs': {'additionalProperties': False,
'properties': {'relation-unit-pairs': {'items': {'$ref': '#/definitions/RelationUnitPair'},
'type': 'array'}},
'required': ['relation-unit-pairs'],
'type': 'object'},
'RelationUnitSettings': {'additionalProperties': False,
'properties': {'application-settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'relation': {'type': 'string'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'unit': {'type': 'string'}},
'required': ['relation',
'unit',
'settings',
'application-settings'],
'type': 'object'},
'RelationUnitStatus': {'additionalProperties': False,
'properties': {'in-scope': {'type': 'boolean'},
'relation-tag': {'type': 'string'},
'suspended': {'type': 'boolean'}},
'required': ['relation-tag',
'in-scope',
'suspended'],
'type': 'object'},
'RelationUnitStatusResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'results': {'items': {'$ref': '#/definitions/RelationUnitStatus'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'RelationUnitStatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/RelationUnitStatusResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'RelationUnits': {'additionalProperties': False,
'properties': {'relation-units': {'items': {'$ref': '#/definitions/RelationUnit'},
'type': 'array'}},
'required': ['relation-units'],
'type': 'object'},
'RelationUnitsChange': {'additionalProperties': False,
'properties': {'app-changed': {'patternProperties': {'.*': {'type': 'integer'}},
'type': 'object'},
'changed': {'patternProperties': {'.*': {'$ref': '#/definitions/UnitSettings'}},
'type': 'object'},
'departed': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['changed'],
'type': 'object'},
'RelationUnitsSettings': {'additionalProperties': False,
'properties': {'relation-units': {'items': {'$ref': '#/definitions/RelationUnitSettings'},
'type': 'array'}},
'required': ['relation-units'],
'type': 'object'},
'RelationUnitsWatchResult': {'additionalProperties': False,
'properties': {'changes': {'$ref': '#/definitions/RelationUnitsChange'},
'error': {'$ref': '#/definitions/Error'},
'watcher-id': {'type': 'string'}},
'required': ['watcher-id',
'changes'],
'type': 'object'},
'RelationUnitsWatchResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/RelationUnitsWatchResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'ResolvedModeResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'mode': {'type': 'string'}},
'required': ['mode'],
'type': 'object'},
'ResolvedModeResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ResolvedModeResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'SetStatus': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/EntityStatusArgs'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'SetUnitStateArg': {'additionalProperties': False,
'properties': {'charm-state': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'meter-status-state': {'type': 'string'},
'relation-state': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'storage-state': {'type': 'string'},
'tag': {'type': 'string'},
'uniter-state': {'type': 'string'}},
'required': ['tag'],
'type': 'object'},
'SetUnitStateArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/SetUnitStateArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'SettingsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'settings': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'}},
'required': ['settings'],
'type': 'object'},
'SettingsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/SettingsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StatusResult': {'additionalProperties': False,
'properties': {'data': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'error': {'$ref': '#/definitions/Error'},
'id': {'type': 'string'},
'info': {'type': 'string'},
'life': {'type': 'string'},
'since': {'format': 'date-time',
'type': 'string'},
'status': {'type': 'string'}},
'required': ['id',
'life',
'status',
'info',
'data',
'since'],
'type': 'object'},
'StatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StatusResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StorageAddParams': {'additionalProperties': False,
'properties': {'name': {'type': 'string'},
'storage': {'$ref': '#/definitions/StorageConstraints'},
'unit': {'type': 'string'}},
'required': ['unit', 'name', 'storage'],
'type': 'object'},
'StorageAttachment': {'additionalProperties': False,
'properties': {'kind': {'type': 'integer'},
'life': {'type': 'string'},
'location': {'type': 'string'},
'owner-tag': {'type': 'string'},
'storage-tag': {'type': 'string'},
'unit-tag': {'type': 'string'}},
'required': ['storage-tag',
'owner-tag',
'unit-tag',
'kind',
'location',
'life'],
'type': 'object'},
'StorageAttachmentId': {'additionalProperties': False,
'properties': {'storage-tag': {'type': 'string'},
'unit-tag': {'type': 'string'}},
'required': ['storage-tag',
'unit-tag'],
'type': 'object'},
'StorageAttachmentIds': {'additionalProperties': False,
'properties': {'ids': {'items': {'$ref': '#/definitions/StorageAttachmentId'},
'type': 'array'}},
'required': ['ids'],
'type': 'object'},
'StorageAttachmentIdsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/StorageAttachmentIds'}},
'required': ['result'],
'type': 'object'},
'StorageAttachmentIdsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StorageAttachmentIdsResult'},
'type': 'array'}},
'type': 'object'},
'StorageAttachmentResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/StorageAttachment'}},
'required': ['result'],
'type': 'object'},
'StorageAttachmentResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StorageAttachmentResult'},
'type': 'array'}},
'type': 'object'},
'StorageConstraints': {'additionalProperties': False,
'properties': {'count': {'type': 'integer'},
'pool': {'type': 'string'},
'size': {'type': 'integer'}},
'type': 'object'},
'StoragesAddParams': {'additionalProperties': False,
'properties': {'storages': {'items': {'$ref': '#/definitions/StorageAddParams'},
'type': 'array'}},
'required': ['storages'],
'type': 'object'},
'StringBoolResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'ok': {'type': 'boolean'},
'result': {'type': 'string'}},
'required': ['result', 'ok'],
'type': 'object'},
'StringBoolResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringBoolResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'},
'StringResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StringsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'},
'StringsWatchResult': {'additionalProperties': False,
'properties': {'changes': {'items': {'type': 'string'},
'type': 'array'},
'error': {'$ref': '#/definitions/Error'},
'watcher-id': {'type': 'string'}},
'required': ['watcher-id'],
'type': 'object'},
'StringsWatchResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringsWatchResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'UnitRefreshResult': {'additionalProperties': False,
'properties': {'Error': {'$ref': '#/definitions/Error'},
'Life': {'type': 'string'},
'Resolved': {'type': 'string'},
'provider-id': {'type': 'string'}},
'required': ['Life',
'Resolved',
'Error'],
'type': 'object'},
'UnitRefreshResults': {'additionalProperties': False,
'properties': {'Results': {'items': {'$ref': '#/definitions/UnitRefreshResult'},
'type': 'array'}},
'required': ['Results'],
'type': 'object'},
'UnitSettings': {'additionalProperties': False,
'properties': {'version': {'type': 'integer'}},
'required': ['version'],
'type': 'object'},
'UnitStateResult': {'additionalProperties': False,
'properties': {'charm-state': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'error': {'$ref': '#/definitions/Error'},
'meter-status-state': {'type': 'string'},
'relation-state': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'storage-state': {'type': 'string'},
'uniter-state': {'type': 'string'}},
'type': 'object'},
'UnitStateResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/UnitStateResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'UpgradeSeriesStatusParam': {'additionalProperties': False,
'properties': {'entity': {'$ref': '#/definitions/Entity'},
'message': {'type': 'string'},
'status': {'type': 'string'}},
'required': ['entity',
'status',
'message'],
'type': 'object'},
'UpgradeSeriesStatusParams': {'additionalProperties': False,
'properties': {'params': {'items': {'$ref': '#/definitions/UpgradeSeriesStatusParam'},
'type': 'array'}},
'required': ['params'],
'type': 'object'},
'UpgradeSeriesStatusResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'status': {'type': 'string'},
'target': {'type': 'string'}},
'type': 'object'},
'UpgradeSeriesStatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/UpgradeSeriesStatusResult'},
'type': 'array'}},
'type': 'object'}},
'properties': {'APIAddresses': {'description': 'APIAddresses returns the list '
'of addresses used to connect '
'to the API.',
'properties': {'Result': {'$ref': '#/definitions/StringsResult'}},
'type': 'object'},
'APIHostPorts': {'description': 'APIHostPorts returns the API '
'server addresses.',
'properties': {'Result': {'$ref': '#/definitions/APIHostPortsResult'}},
'type': 'object'},
'ActionStatus': {'description': 'ActionStatus returns the '
'status of Actions by Tags '
'passed in.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'Actions': {'description': 'Actions returns the Actions by '
'Tags passed and ensures that the '
'Unit asking\n'
'for them is the same Unit that has '
'the Actions.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ActionResults'}},
'type': 'object'},
'AddMetricBatches': {'description': 'AddMetricBatches adds the '
'metrics for the specified '
'unit.',
'properties': {'Params': {'$ref': '#/definitions/MetricBatchParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'AddUnitStorage': {'description': 'AddUnitStorage validates '
'and creates additional '
'storage instances for '
'units.\n'
'Failures on an individual '
'storage instance do not '
'block remaining\n'
'instances from being '
'processed.\n'
'TODO(juju3) - remove',
'properties': {'Params': {'$ref': '#/definitions/StoragesAddParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'AllMachinePorts': {'description': 'AllMachinePorts returns '
'all opened port ranges for '
'each given\n'
'machine (on all '
'networks).\n'
'\n'
'DEPRECATED: clients should '
'switch to the '
'OpenedMachinePortRanges '
'API call\n'
'when using the V17+ API.\n'
'\n'
'TODO(achilleasa): remove '
'from V17 once all client '
'references to this API\n'
'have been changed to use '
'the new API.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/MachinePortsResults'}},
'type': 'object'},
'ApplicationStatus': {'description': 'ApplicationStatus '
'returns the status of '
'the Applications and its '
'workloads\n'
'if the given unit is the '
'leader.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ApplicationStatusResults'}},
'type': 'object'},
'AssignedMachine': {'description': 'AssignedMachine returns '
'the machine tag for each '
'given unit tag, or\n'
'an error satisfying '
'params.IsCodeNotAssigned '
'when a unit has no\n'
'assigned machine.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'AvailabilityZone': {'description': 'AvailabilityZone returns '
'the availability zone for '
'each given unit, if '
'applicable.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'BeginActions': {'description': 'BeginActions marks the '
'actions represented by the '
'passed in Tags as running.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'CanApplyLXDProfile': {'description': 'CanApplyLXDProfile is a '
'shim to call the '
'LXDProfileAPIv2 version '
'of this method.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/BoolResults'}},
'type': 'object'},
'CharmArchiveSha256': {'description': 'CharmArchiveSha256 '
'returns the SHA256 '
'digest of the charm '
'archive\n'
'(bundle) data for each '
'charm url in the given '
'parameters.',
'properties': {'Params': {'$ref': '#/definitions/CharmURLs'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'CharmModifiedVersion': {'description': 'CharmModifiedVersion '
'returns the most '
'CharmModifiedVersion '
'for all given\n'
'units or '
'applications.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/IntResults'}},
'type': 'object'},
'CharmURL': {'description': 'CharmURL returns the charm URL '
'for all given units or '
'applications.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringBoolResults'}},
'type': 'object'},
'ClearResolved': {'description': 'ClearResolved removes any '
'resolved setting from each '
'given unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ClosePorts': {'description': 'ClosePorts sets the policy of '
'the port range with protocol to '
'be\n'
'closed, for all given units.\n'
'TODO(juju3) - remove',
'properties': {'Params': {'$ref': '#/definitions/EntitiesPortRanges'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'CloudAPIVersion': {'description': 'CloudAPIVersion returns '
'the cloud API version, if '
'available.',
'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'CloudSpec': {'description': 'CloudSpec returns the cloud spec '
'used by the model in which the\n'
'authenticated unit or '
'application resides.\n'
'A check is made beforehand to '
'ensure that the request is made '
'by an entity\n'
'that has been granted the '
'appropriate trust.',
'properties': {'Result': {'$ref': '#/definitions/CloudSpecResult'}},
'type': 'object'},
'CommitHookChanges': {'description': 'CommitHookChanges '
'batches together all '
'required API calls for '
'applying\n'
'a set of changes after a '
'hook successfully '
'completes and executes '
'them in a\n'
'single transaction.',
'properties': {'Params': {'$ref': '#/definitions/CommitHookChangesArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ConfigSettings': {'description': 'ConfigSettings returns the '
'complete set of application '
'charm config\n'
'settings available to each '
'given unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ConfigSettingsResults'}},
'type': 'object'},
'CurrentModel': {'description': 'CurrentModel returns the name '
'and UUID for the current juju '
'model.',
'properties': {'Result': {'$ref': '#/definitions/ModelResult'}},
'type': 'object'},
'Destroy': {'description': 'Destroy advances all given Alive '
"units' lifecycles as far as\n"
'possible. See '
'state/Unit.Destroy().',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'DestroyAllSubordinates': {'description': 'DestroyAllSubordinates '
'destroys all '
'subordinates of '
'each given unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'DestroyUnitStorageAttachments': {'description': 'DestroyUnitStorageAttachments '
'marks each '
'storage '
'attachment '
'of the\n'
'specified '
'units as '
'Dying.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'EnsureDead': {'description': 'EnsureDead calls EnsureDead on '
'each given entity from state. '
'It\n'
'will fail if the entity is not '
"present. If it's Alive, nothing "
'will\n'
'happen (see state/EnsureDead() '
'for units or machines).',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'EnterScope': {'description': 'EnterScope ensures each unit '
'has entered its scope in the '
'relation,\n'
'for all of the given '
'relation/unit pairs. See also\n'
'state.RelationUnit.EnterScope().',
'properties': {'Params': {'$ref': '#/definitions/RelationUnits'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'FinishActions': {'description': 'FinishActions saves the '
'result of a completed Action',
'properties': {'Params': {'$ref': '#/definitions/ActionExecutionResults'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'GetMeterStatus': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/MeterStatusResults'}},
'type': 'object'},
'GetPodSpec': {'description': 'GetPodSpec gets the pod specs '
'for a set of applications.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'GetPrincipal': {'description': 'GetPrincipal returns the '
'result of calling '
'PrincipalName() and\n'
'converting it to a tag, on '
'each given unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringBoolResults'}},
'type': 'object'},
'GetRawK8sSpec': {'description': 'GetRawK8sSpec gets the raw '
'k8s specs for a set of '
'applications.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'GoalStates': {'description': 'GoalStates returns information '
'of charm units and relations.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/GoalStateResults'}},
'type': 'object'},
'HasSubordinates': {'description': 'HasSubordinates returns '
'the whether each given '
'unit has any subordinates.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/BoolResults'}},
'type': 'object'},
'LXDProfileName': {'description': 'LXDProfileName is a shim to '
'call the LXDProfileAPIv2 '
'version of this method.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'LXDProfileRequired': {'description': 'LXDProfileRequired is a '
'shim to call the '
'LXDProfileAPIv2 version '
'of this method.',
'properties': {'Params': {'$ref': '#/definitions/CharmURLs'},
'Result': {'$ref': '#/definitions/BoolResults'}},
'type': 'object'},
'LeaveScope': {'description': 'LeaveScope signals each unit '
'has left its scope in the '
'relation,\n'
'for all of the given '
'relation/unit pairs. See also\n'
'state.RelationUnit.LeaveScope().',
'properties': {'Params': {'$ref': '#/definitions/RelationUnits'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Life': {'description': 'Life returns the life status of every '
'supplied entity, where available.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/LifeResults'}},
'type': 'object'},
'LogActionsMessages': {'description': 'LogActionsMessages '
'records the log '
'messages against the '
'specified actions.',
'properties': {'Params': {'$ref': '#/definitions/ActionMessageParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Merge': {'description': 'Merge merges in the provided '
'leadership settings. Only leaders '
'for\n'
'the given service may perform this '
'operation.',
'properties': {'Params': {'$ref': '#/definitions/MergeLeadershipSettingsBulkParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ModelConfig': {'description': 'ModelConfig returns the '
"current model's configuration.",
'properties': {'Result': {'$ref': '#/definitions/ModelConfigResult'}},
'type': 'object'},
'ModelUUID': {'description': 'ModelUUID returns the model UUID '
'that this unit resides in.\n'
'It is implemented here directly '
'as a result of removing it from\n'
'embedded APIAddresser *without* '
'bumping the facade version.\n'
'It should be blanked when this '
'facade version is next '
'incremented.',
'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'NetworkInfo': {'description': 'NetworkInfo returns network '
'interfaces/addresses for '
'specified bindings.',
'properties': {'Params': {'$ref': '#/definitions/NetworkInfoParams'},
'Result': {'$ref': '#/definitions/NetworkInfoResults'}},
'type': 'object'},
'OpenPorts': {'description': 'TODO(juju3) - remove',
'properties': {'Params': {'$ref': '#/definitions/EntitiesPortRanges'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'OpenedMachinePortRangesByEndpoint': {'description': 'OpenedMachinePortRangesByEndpoint '
'returns '
'the port '
'ranges '
'opened '
'by each\n'
'unit on '
'the '
'provided '
'machines '
'grouped '
'by '
'application '
'endpoint.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/OpenMachinePortRangesByEndpointResults'}},
'type': 'object'},
'PrivateAddress': {'description': 'PrivateAddress returns the '
'private address for each '
'given unit, if set.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'ProviderType': {'description': 'ProviderType returns the '
'provider type used by the '
'current juju\n'
'model.\n'
'\n'
'TODO(dimitern): Refactor the '
'uniter to call this instead '
'of calling\n'
'ModelConfig() just to get the '
'provider type. Once we have '
'machine\n'
'addresses, this might be '
'completely unnecessary '
'though.',
'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'PublicAddress': {'description': 'PublicAddress returns the '
'public address for each '
'given unit, if set.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'Read': {'description': 'Read reads leadership settings for '
'the provided service ID. Any\n'
'unit of the service may perform this '
'operation.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/GetLeadershipSettingsBulkResults'}},
'type': 'object'},
'ReadLocalApplicationSettings': {'description': 'ReadLocalApplicationSettings '
'returns the '
'local '
'application '
'settings for '
'a\n'
'particular '
'relation when '
'invoked by '
'the leader '
'unit.',
'properties': {'Params': {'$ref': '#/definitions/RelationUnit'},
'Result': {'$ref': '#/definitions/SettingsResult'}},
'type': 'object'},
'ReadRemoteSettings': {'description': 'ReadRemoteSettings '
'returns the remote '
'settings of each given '
'set of\n'
'relation/local '
'unit/remote unit.',
'properties': {'Params': {'$ref': '#/definitions/RelationUnitPairs'},
'Result': {'$ref': '#/definitions/SettingsResults'}},
'type': 'object'},
'ReadSettings': {'description': 'ReadSettings returns the '
'local settings of each given '
'set of\n'
'relation/unit.\n'
'\n'
'NOTE(achilleasa): Using this '
'call to read application data '
'is deprecated\n'
'and will not work for k8s '
'charms (see LP1876097). '
'Instead, clients should\n'
'use '
'ReadLocalApplicationSettings.',
'properties': {'Params': {'$ref': '#/definitions/RelationUnits'},
'Result': {'$ref': '#/definitions/SettingsResults'}},
'type': 'object'},
'Refresh': {'description': 'Refresh retrieves the latest '
'values for attributes on this '
'unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/UnitRefreshResults'}},
'type': 'object'},
'Relation': {'description': 'Relation returns information '
'about all given relation/unit '
'pairs,\n'
'including their id, key and the '
'local endpoint.',
'properties': {'Params': {'$ref': '#/definitions/RelationUnits'},
'Result': {'$ref': '#/definitions/RelationResults'}},
'type': 'object'},
'RelationById': {'description': 'RelationById returns '
'information about all given '
'relations,\n'
'specified by their ids, '
'including their key and the '
'local\n'
'endpoint.',
'properties': {'Params': {'$ref': '#/definitions/RelationIds'},
'Result': {'$ref': '#/definitions/RelationResults'}},
'type': 'object'},
'RelationsStatus': {'description': 'RelationsStatus returns '
'for each unit the '
'corresponding relation and '
'status information.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/RelationUnitStatusResults'}},
'type': 'object'},
'RemoveStorageAttachments': {'description': 'RemoveStorageAttachments '
'removes the '
'specified '
'storage\n'
'attachments from '
'state.',
'properties': {'Params': {'$ref': '#/definitions/StorageAttachmentIds'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'RequestReboot': {'description': 'RequestReboot sets the '
'reboot flag on the provided '
'machines',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Resolved': {'description': 'Resolved returns the current '
'resolved setting for each given '
'unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ResolvedModeResults'}},
'type': 'object'},
'SLALevel': {'description': "SLALevel returns the model's SLA "
'level.',
'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'SetAgentStatus': {'description': 'SetAgentStatus will set '
'status for agents of Units '
'passed in args, if one\n'
'of the args is not an Unit '
'it will fail.',
'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetApplicationStatus': {'description': 'SetApplicationStatus '
'sets the status for '
'all the Applications '
'in args if the given '
'Unit is\n'
'the leader.',
'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetCharmURL': {'description': 'SetCharmURL sets the charm URL '
'for each given unit. An error '
'will\n'
'be returned if a unit is dead, '
'or the charm URL is not known.',
'properties': {'Params': {'$ref': '#/definitions/EntitiesCharmURL'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetRelationStatus': {'description': 'SetRelationStatus '
'updates the status of '
'the specified relations.',
'properties': {'Params': {'$ref': '#/definitions/RelationStatusArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetState': {'description': 'SetState sets the state persisted '
'by the charm running in this '
'unit\n'
'and the state internal to the '
'uniter for this unit.',
'properties': {'Params': {'$ref': '#/definitions/SetUnitStateArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetStatus': {'description': 'SetStatus will set status for a '
'entities passed in args. If the '
'entity is\n'
'a Unit it will instead set '
'status to its agent, to emulate '
'backwards\n'
'compatibility.',
'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetUnitStatus': {'description': 'SetUnitStatus sets status '
'for all elements passed in '
'args, the difference\n'
'with SetStatus is that if an '
'entity is a Unit it will set '
'its status instead\n'
'of its agent.',
'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetUpgradeSeriesUnitStatus': {'description': 'SetUpgradeSeriesUnitStatus '
'sets the '
'upgrade series '
'status of the '
'unit.\n'
'If no upgrade '
'is in progress '
'an error is '
'returned '
'instead.',
'properties': {'Params': {'$ref': '#/definitions/UpgradeSeriesStatusParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetWorkloadVersion': {'description': 'SetWorkloadVersion sets '
'the workload version '
'for each given unit. An '
'error will\n'
'be returned if a unit '
'is dead.',
'properties': {'Params': {'$ref': '#/definitions/EntityWorkloadVersions'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'State': {'description': 'State returns the state persisted by '
'the charm running in this unit\n'
'and the state internal to the uniter '
'for this unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/UnitStateResults'}},
'type': 'object'},
'StorageAttachmentLife': {'description': 'StorageAttachmentLife '
'returns the '
'lifecycle state of '
'the storage '
'attachments\n'
'with the specified '
'tags.',
'properties': {'Params': {'$ref': '#/definitions/StorageAttachmentIds'},
'Result': {'$ref': '#/definitions/LifeResults'}},
'type': 'object'},
'StorageAttachments': {'description': 'StorageAttachments '
'returns the storage '
'attachments with the '
'specified tags.',
'properties': {'Params': {'$ref': '#/definitions/StorageAttachmentIds'},
'Result': {'$ref': '#/definitions/StorageAttachmentResults'}},
'type': 'object'},
'UnitStatus': {'description': 'UnitStatus returns the workload '
'status information for the '
'unit.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StatusResults'}},
'type': 'object'},
'UnitStorageAttachments': {'description': 'UnitStorageAttachments '
'returns the IDs of '
'storage attachments '
'for a collection of '
'units.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StorageAttachmentIdsResults'}},
'type': 'object'},
'UpdateNetworkInfo': {'description': 'UpdateNetworkInfo '
'refreshes the network '
"settings for a unit's "
'bound\n'
'endpoints.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'UpdateSettings': {'description': 'UpdateSettings persists all '
'changes made to the local '
'settings of\n'
'all given pairs of relation '
'and unit. Keys with empty '
'values are\n'
'considered a signal to '
'delete these values.\n'
'TODO(juju3) - remove',
'properties': {'Params': {'$ref': '#/definitions/RelationUnitsSettings'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'UpgradeSeriesUnitStatus': {'description': 'UpgradeSeriesUnitStatus '
'returns the '
'current '
'preparation status '
'of an\n'
'upgrading unit.\n'
'If no series '
'upgrade is in '
'progress an error '
'is returned '
'instead.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/UpgradeSeriesStatusResults'}},
'type': 'object'},
'Watch': {'description': 'Watch starts an NotifyWatcher for '
'each given entity.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WatchAPIHostPorts': {'description': 'WatchAPIHostPorts '
'watches the API server '
'addresses.',
'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchActionNotifications': {'description': 'WatchActionNotifications '
'returns a '
'StringsWatcher '
'for observing\n'
'incoming action '
'calls to a unit. '
'See also '
'state/watcher.go\n'
'Unit.WatchActionNotifications(). '
'This method is '
'called from\n'
'api/uniter/uniter.go '
'WatchActionNotifications().',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchConfigSettingsHash': {'description': 'WatchConfigSettingsHash '
'returns a '
'StringsWatcher '
'that yields a '
'hash\n'
'of the config '
'values every time '
'the config '
'changes. The '
'uniter can\n'
'save this hash and '
'use it to decide '
'whether the '
'config-changed '
'hook\n'
'needs to be run '
'(or whether this '
'was just an agent '
'restart with no\n'
'substantive config '
'change).',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchForModelConfigChanges': {'description': 'WatchForModelConfigChanges '
'returns a '
'NotifyWatcher '
'that observes\n'
'changes to the '
'model '
'configuration.\n'
'Note that '
'although the '
'NotifyWatchResult '
'contains an '
'Error field,\n'
"it's not used "
'because we are '
'only returning '
'a single '
'watcher,\n'
'so we use the '
'regular error '
'return.',
'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchInstanceData': {'description': 'WatchInstanceData is a '
'shim to call the '
'LXDProfileAPIv2 version '
'of this method.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WatchLeadershipSettings': {'description': 'WatchLeadershipSettings '
'will block the '
'caller until '
'leadership '
'settings\n'
'for the given '
'service ID change.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WatchMeterStatus': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WatchRelationUnits': {'description': 'WatchRelationUnits '
'returns a '
'RelationUnitsWatcher '
'for observing\n'
'changes to every unit '
'in the supplied '
'relation that is '
'visible to\n'
'the supplied unit. See '
'also '
'state/watcher.go:RelationUnit.Watch().',
'properties': {'Params': {'$ref': '#/definitions/RelationUnits'},
'Result': {'$ref': '#/definitions/RelationUnitsWatchResults'}},
'type': 'object'},
'WatchStorageAttachments': {'description': 'WatchStorageAttachments '
'creates watchers '
'for a collection '
'of storage\n'
'attachments, each '
'of which can be '
'used to watch '
'changes to '
'storage\n'
'attachment info.',
'properties': {'Params': {'$ref': '#/definitions/StorageAttachmentIds'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WatchTrustConfigSettingsHash': {'description': 'WatchTrustConfigSettingsHash '
'returns a '
'StringsWatcher '
'that yields '
'a\n'
'hash of the '
'application '
'config values '
'whenever they '
'change. The\n'
'uniter can '
'use the hash '
'to determine '
'whether the '
'actual values '
'have\n'
'changed since '
'it last saw '
'the config.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchUnitAddressesHash': {'description': 'WatchUnitAddressesHash '
'returns a '
'StringsWatcher that '
'yields the\n'
'hashes of the '
'addresses for the '
'unit whenever the '
'addresses\n'
'change. The uniter '
'can use the hash to '
'determine whether '
'the actual\n'
'address values have '
'changed since it '
'last saw the '
'config.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchUnitRelations': {'description': 'WatchUnitRelations '
'returns a '
'StringsWatcher, for '
'each given\n'
'unit, that notifies of '
'changes to the '
'lifecycles of '
'relations\n'
'relevant to that unit. '
'For principal units, '
'this will be all of '
'the\n'
'relations for the '
'application. For '
'subordinate units, '
'only\n'
'relations with the '
"principal unit's "
'application will be '
'monitored.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchUnitStorageAttachments': {'description': 'WatchUnitStorageAttachments '
'creates '
'watchers for a '
'collection of '
'units,\n'
'each of which '
'can be used to '
'watch for '
'lifecycle '
'changes to the '
'corresponding\n'
"unit's storage "
'attachments.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchUpgradeSeriesNotifications': {'description': 'WatchUpgradeSeriesNotifications '
'returns a '
'NotifyWatcher '
'for '
'observing '
'changes to '
'upgrade '
'series '
'locks.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'},
'WorkloadVersion': {'description': 'WorkloadVersion returns '
'the workload version for '
'all given units or '
'applications.',
'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(StringsResult)
async def APIAddresses(self):
'''
APIAddresses returns the list of addresses used to connect to the API.
Returns -> StringsResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='APIAddresses',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(APIHostPortsResult)
async def APIHostPorts(self):
'''
APIHostPorts returns the API server addresses.
Returns -> APIHostPortsResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='APIHostPorts',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def ActionStatus(self, entities=None):
'''
ActionStatus returns the status of Actions by Tags passed in.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ActionStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ActionResults)
async def Actions(self, entities=None):
'''
Actions returns the Actions by Tags passed and ensures that the Unit asking
for them is the same Unit that has the Actions.
entities : typing.Sequence[~Entity]
Returns -> ActionResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Actions',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def AddMetricBatches(self, batches=None):
'''
AddMetricBatches adds the metrics for the specified unit.
batches : typing.Sequence[~MetricBatchParam]
Returns -> ErrorResults
'''
if batches is not None and not isinstance(batches, (bytes, str, list)):
raise Exception("Expected batches to be a Sequence, received: {}".format(type(batches)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='AddMetricBatches',
version=18,
params=_params)
_params['batches'] = batches
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def AddUnitStorage(self, storages=None):
'''
AddUnitStorage validates and creates additional storage instances for units.
Failures on an individual storage instance do not block remaining
instances from being processed.
TODO(juju3) - remove
storages : typing.Sequence[~StorageAddParams]
Returns -> ErrorResults
'''
if storages is not None and not isinstance(storages, (bytes, str, list)):
raise Exception("Expected storages to be a Sequence, received: {}".format(type(storages)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='AddUnitStorage',
version=18,
params=_params)
_params['storages'] = storages
reply = await self.rpc(msg)
return reply
@ReturnMapping(MachinePortsResults)
async def AllMachinePorts(self, entities=None):
'''
AllMachinePorts returns all opened port ranges for each given
machine (on all networks).
DEPRECATED: clients should switch to the OpenedMachinePortRanges API call
when using the V17+ API.
TODO(achilleasa): remove from V17 once all client references to this API
have been changed to use the new API.
entities : typing.Sequence[~Entity]
Returns -> MachinePortsResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='AllMachinePorts',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ApplicationStatusResults)
async def ApplicationStatus(self, entities=None):
'''
ApplicationStatus returns the status of the Applications and its workloads
if the given unit is the leader.
entities : typing.Sequence[~Entity]
Returns -> ApplicationStatusResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ApplicationStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def AssignedMachine(self, entities=None):
'''
AssignedMachine returns the machine tag for each given unit tag, or
an error satisfying params.IsCodeNotAssigned when a unit has no
assigned machine.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='AssignedMachine',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def AvailabilityZone(self, entities=None):
'''
AvailabilityZone returns the availability zone for each given unit, if applicable.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='AvailabilityZone',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def BeginActions(self, entities=None):
'''
BeginActions marks the actions represented by the passed in Tags as running.
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='BeginActions',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(BoolResults)
async def CanApplyLXDProfile(self, entities=None):
'''
CanApplyLXDProfile is a shim to call the LXDProfileAPIv2 version of this method.
entities : typing.Sequence[~Entity]
Returns -> BoolResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CanApplyLXDProfile',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def CharmArchiveSha256(self, urls=None):
'''
CharmArchiveSha256 returns the SHA256 digest of the charm archive
(bundle) data for each charm url in the given parameters.
urls : typing.Sequence[~CharmURL]
Returns -> StringResults
'''
if urls is not None and not isinstance(urls, (bytes, str, list)):
raise Exception("Expected urls to be a Sequence, received: {}".format(type(urls)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CharmArchiveSha256',
version=18,
params=_params)
_params['urls'] = urls
reply = await self.rpc(msg)
return reply
@ReturnMapping(IntResults)
async def CharmModifiedVersion(self, entities=None):
'''
CharmModifiedVersion returns the most CharmModifiedVersion for all given
units or applications.
entities : typing.Sequence[~Entity]
Returns -> IntResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CharmModifiedVersion',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringBoolResults)
async def CharmURL(self, entities=None):
'''
CharmURL returns the charm URL for all given units or applications.
entities : typing.Sequence[~Entity]
Returns -> StringBoolResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CharmURL',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def ClearResolved(self, entities=None):
'''
ClearResolved removes any resolved setting from each given unit.
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ClearResolved',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def ClosePorts(self, entities=None):
'''
ClosePorts sets the policy of the port range with protocol to be
closed, for all given units.
TODO(juju3) - remove
entities : typing.Sequence[~EntityPortRange]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ClosePorts',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def CloudAPIVersion(self):
'''
CloudAPIVersion returns the cloud API version, if available.
Returns -> StringResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CloudAPIVersion',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(CloudSpecResult)
async def CloudSpec(self):
'''
CloudSpec returns the cloud spec used by the model in which the
authenticated unit or application resides.
A check is made beforehand to ensure that the request is made by an entity
that has been granted the appropriate trust.
Returns -> CloudSpecResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CloudSpec',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def CommitHookChanges(self, args=None):
'''
CommitHookChanges batches together all required API calls for applying
a set of changes after a hook successfully completes and executes them in a
single transaction.
args : typing.Sequence[~CommitHookChangesArg]
Returns -> ErrorResults
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CommitHookChanges',
version=18,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ConfigSettingsResults)
async def ConfigSettings(self, entities=None):
'''
ConfigSettings returns the complete set of application charm config
settings available to each given unit.
entities : typing.Sequence[~Entity]
Returns -> ConfigSettingsResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ConfigSettings',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ModelResult)
async def CurrentModel(self):
'''
CurrentModel returns the name and UUID for the current juju model.
Returns -> ModelResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='CurrentModel',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Destroy(self, entities=None):
'''
Destroy advances all given Alive units' lifecycles as far as
possible. See state/Unit.Destroy().
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Destroy',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def DestroyAllSubordinates(self, entities=None):
'''
DestroyAllSubordinates destroys all subordinates of each given unit.
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='DestroyAllSubordinates',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def DestroyUnitStorageAttachments(self, entities=None):
'''
DestroyUnitStorageAttachments marks each storage attachment of the
specified units as Dying.
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='DestroyUnitStorageAttachments',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def EnsureDead(self, entities=None):
'''
EnsureDead calls EnsureDead on each given entity from state. It
will fail if the entity is not present. If it's Alive, nothing will
happen (see state/EnsureDead() for units or machines).
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='EnsureDead',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def EnterScope(self, relation_units=None):
'''
EnterScope ensures each unit has entered its scope in the relation,
for all of the given relation/unit pairs. See also
state.RelationUnit.EnterScope().
relation_units : typing.Sequence[~RelationUnit]
Returns -> ErrorResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='EnterScope',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def FinishActions(self, results=None):
'''
FinishActions saves the result of a completed Action
results : typing.Sequence[~ActionExecutionResult]
Returns -> ErrorResults
'''
if results is not None and not isinstance(results, (bytes, str, list)):
raise Exception("Expected results to be a Sequence, received: {}".format(type(results)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='FinishActions',
version=18,
params=_params)
_params['results'] = results
reply = await self.rpc(msg)
return reply
@ReturnMapping(MeterStatusResults)
async def GetMeterStatus(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> MeterStatusResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='GetMeterStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def GetPodSpec(self, entities=None):
'''
GetPodSpec gets the pod specs for a set of applications.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='GetPodSpec',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringBoolResults)
async def GetPrincipal(self, entities=None):
'''
GetPrincipal returns the result of calling PrincipalName() and
converting it to a tag, on each given unit.
entities : typing.Sequence[~Entity]
Returns -> StringBoolResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='GetPrincipal',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def GetRawK8sSpec(self, entities=None):
'''
GetRawK8sSpec gets the raw k8s specs for a set of applications.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='GetRawK8sSpec',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(GoalStateResults)
async def GoalStates(self, entities=None):
'''
GoalStates returns information of charm units and relations.
entities : typing.Sequence[~Entity]
Returns -> GoalStateResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='GoalStates',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(BoolResults)
async def HasSubordinates(self, entities=None):
'''
HasSubordinates returns the whether each given unit has any subordinates.
entities : typing.Sequence[~Entity]
Returns -> BoolResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='HasSubordinates',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def LXDProfileName(self, entities=None):
'''
LXDProfileName is a shim to call the LXDProfileAPIv2 version of this method.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='LXDProfileName',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(BoolResults)
async def LXDProfileRequired(self, urls=None):
'''
LXDProfileRequired is a shim to call the LXDProfileAPIv2 version of this method.
urls : typing.Sequence[~CharmURL]
Returns -> BoolResults
'''
if urls is not None and not isinstance(urls, (bytes, str, list)):
raise Exception("Expected urls to be a Sequence, received: {}".format(type(urls)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='LXDProfileRequired',
version=18,
params=_params)
_params['urls'] = urls
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def LeaveScope(self, relation_units=None):
'''
LeaveScope signals each unit has left its scope in the relation,
for all of the given relation/unit pairs. See also
state.RelationUnit.LeaveScope().
relation_units : typing.Sequence[~RelationUnit]
Returns -> ErrorResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='LeaveScope',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(LifeResults)
async def Life(self, entities=None):
'''
Life returns the life status of every supplied entity, where available.
entities : typing.Sequence[~Entity]
Returns -> LifeResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Life',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def LogActionsMessages(self, messages=None):
'''
LogActionsMessages records the log messages against the specified actions.
messages : typing.Sequence[~EntityString]
Returns -> ErrorResults
'''
if messages is not None and not isinstance(messages, (bytes, str, list)):
raise Exception("Expected messages to be a Sequence, received: {}".format(type(messages)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='LogActionsMessages',
version=18,
params=_params)
_params['messages'] = messages
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def Merge(self, params=None):
'''
Merge merges in the provided leadership settings. Only leaders for
the given service may perform this operation.
params : typing.Sequence[~MergeLeadershipSettingsParam]
Returns -> ErrorResults
'''
if params is not None and not isinstance(params, (bytes, str, list)):
raise Exception("Expected params to be a Sequence, received: {}".format(type(params)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Merge',
version=18,
params=_params)
_params['params'] = params
reply = await self.rpc(msg)
return reply
@ReturnMapping(ModelConfigResult)
async def ModelConfig(self):
'''
ModelConfig returns the current model's configuration.
Returns -> ModelConfigResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ModelConfig',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def ModelUUID(self):
'''
ModelUUID returns the model UUID that this unit resides in.
It is implemented here directly as a result of removing it from
embedded APIAddresser *without* bumping the facade version.
It should be blanked when this facade version is next incremented.
Returns -> StringResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ModelUUID',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(NetworkInfoResults)
async def NetworkInfo(self, bindings=None, relation_id=None, unit=None):
'''
NetworkInfo returns network interfaces/addresses for specified bindings.
bindings : typing.Sequence[str]
relation_id : int
unit : str
Returns -> NetworkInfoResults
'''
if bindings is not None and not isinstance(bindings, (bytes, str, list)):
raise Exception("Expected bindings to be a Sequence, received: {}".format(type(bindings)))
if relation_id is not None and not isinstance(relation_id, int):
raise Exception("Expected relation_id to be a int, received: {}".format(type(relation_id)))
if unit is not None and not isinstance(unit, (bytes, str)):
raise Exception("Expected unit to be a str, received: {}".format(type(unit)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='NetworkInfo',
version=18,
params=_params)
_params['bindings'] = bindings
_params['relation-id'] = relation_id
_params['unit'] = unit
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def OpenPorts(self, entities=None):
'''
TODO(juju3) - remove
entities : typing.Sequence[~EntityPortRange]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='OpenPorts',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(OpenMachinePortRangesByEndpointResults)
async def OpenedMachinePortRangesByEndpoint(self, entities=None):
'''
OpenedMachinePortRangesByEndpoint returns the port ranges opened by each
unit on the provided machines grouped by application endpoint.
entities : typing.Sequence[~Entity]
Returns -> OpenMachinePortRangesByEndpointResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='OpenedMachinePortRangesByEndpoint',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def PrivateAddress(self, entities=None):
'''
PrivateAddress returns the private address for each given unit, if set.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='PrivateAddress',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def ProviderType(self):
'''
ProviderType returns the provider type used by the current juju
model.
TODO(dimitern): Refactor the uniter to call this instead of calling
ModelConfig() just to get the provider type. Once we have machine
addresses, this might be completely unnecessary though.
Returns -> StringResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ProviderType',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def PublicAddress(self, entities=None):
'''
PublicAddress returns the public address for each given unit, if set.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='PublicAddress',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(GetLeadershipSettingsBulkResults)
async def Read(self, entities=None):
'''
Read reads leadership settings for the provided service ID. Any
unit of the service may perform this operation.
entities : typing.Sequence[~Entity]
Returns -> GetLeadershipSettingsBulkResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Read',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(SettingsResult)
async def ReadLocalApplicationSettings(self, relation=None, unit=None):
'''
ReadLocalApplicationSettings returns the local application settings for a
particular relation when invoked by the leader unit.
relation : str
unit : str
Returns -> SettingsResult
'''
if relation is not None and not isinstance(relation, (bytes, str)):
raise Exception("Expected relation to be a str, received: {}".format(type(relation)))
if unit is not None and not isinstance(unit, (bytes, str)):
raise Exception("Expected unit to be a str, received: {}".format(type(unit)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ReadLocalApplicationSettings',
version=18,
params=_params)
_params['relation'] = relation
_params['unit'] = unit
reply = await self.rpc(msg)
return reply
@ReturnMapping(SettingsResults)
async def ReadRemoteSettings(self, relation_unit_pairs=None):
'''
ReadRemoteSettings returns the remote settings of each given set of
relation/local unit/remote unit.
relation_unit_pairs : typing.Sequence[~RelationUnitPair]
Returns -> SettingsResults
'''
if relation_unit_pairs is not None and not isinstance(relation_unit_pairs, (bytes, str, list)):
raise Exception("Expected relation_unit_pairs to be a Sequence, received: {}".format(type(relation_unit_pairs)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ReadRemoteSettings',
version=18,
params=_params)
_params['relation-unit-pairs'] = relation_unit_pairs
reply = await self.rpc(msg)
return reply
@ReturnMapping(SettingsResults)
async def ReadSettings(self, relation_units=None):
'''
ReadSettings returns the local settings of each given set of
relation/unit.
NOTE(achilleasa): Using this call to read application data is deprecated
and will not work for k8s charms (see LP1876097). Instead, clients should
use ReadLocalApplicationSettings.
relation_units : typing.Sequence[~RelationUnit]
Returns -> SettingsResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='ReadSettings',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(UnitRefreshResults)
async def Refresh(self, entities=None):
'''
Refresh retrieves the latest values for attributes on this unit.
entities : typing.Sequence[~Entity]
Returns -> UnitRefreshResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Refresh',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(RelationResults)
async def Relation(self, relation_units=None):
'''
Relation returns information about all given relation/unit pairs,
including their id, key and the local endpoint.
relation_units : typing.Sequence[~RelationUnit]
Returns -> RelationResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Relation',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(RelationResults)
async def RelationById(self, relation_ids=None):
'''
RelationById returns information about all given relations,
specified by their ids, including their key and the local
endpoint.
relation_ids : typing.Sequence[int]
Returns -> RelationResults
'''
if relation_ids is not None and not isinstance(relation_ids, (bytes, str, list)):
raise Exception("Expected relation_ids to be a Sequence, received: {}".format(type(relation_ids)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='RelationById',
version=18,
params=_params)
_params['relation-ids'] = relation_ids
reply = await self.rpc(msg)
return reply
@ReturnMapping(RelationUnitStatusResults)
async def RelationsStatus(self, entities=None):
'''
RelationsStatus returns for each unit the corresponding relation and status information.
entities : typing.Sequence[~Entity]
Returns -> RelationUnitStatusResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='RelationsStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def RemoveStorageAttachments(self, ids=None):
'''
RemoveStorageAttachments removes the specified storage
attachments from state.
ids : typing.Sequence[~StorageAttachmentId]
Returns -> ErrorResults
'''
if ids is not None and not isinstance(ids, (bytes, str, list)):
raise Exception("Expected ids to be a Sequence, received: {}".format(type(ids)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='RemoveStorageAttachments',
version=18,
params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def RequestReboot(self, entities=None):
'''
RequestReboot sets the reboot flag on the provided machines
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='RequestReboot',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ResolvedModeResults)
async def Resolved(self, entities=None):
'''
Resolved returns the current resolved setting for each given unit.
entities : typing.Sequence[~Entity]
Returns -> ResolvedModeResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Resolved',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def SLALevel(self):
'''
SLALevel returns the model's SLA level.
Returns -> StringResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SLALevel',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetAgentStatus(self, entities=None):
'''
SetAgentStatus will set status for agents of Units passed in args, if one
of the args is not an Unit it will fail.
entities : typing.Sequence[~EntityStatusArgs]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetAgentStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetApplicationStatus(self, entities=None):
'''
SetApplicationStatus sets the status for all the Applications in args if the given Unit is
the leader.
entities : typing.Sequence[~EntityStatusArgs]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetApplicationStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetCharmURL(self, entities=None):
'''
SetCharmURL sets the charm URL for each given unit. An error will
be returned if a unit is dead, or the charm URL is not known.
entities : typing.Sequence[~EntityCharmURL]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetCharmURL',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetRelationStatus(self, args=None):
'''
SetRelationStatus updates the status of the specified relations.
args : typing.Sequence[~RelationStatusArg]
Returns -> ErrorResults
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetRelationStatus',
version=18,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetState(self, args=None):
'''
SetState sets the state persisted by the charm running in this unit
and the state internal to the uniter for this unit.
args : typing.Sequence[~SetUnitStateArg]
Returns -> ErrorResults
'''
if args is not None and not isinstance(args, (bytes, str, list)):
raise Exception("Expected args to be a Sequence, received: {}".format(type(args)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetState',
version=18,
params=_params)
_params['args'] = args
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetStatus(self, entities=None):
'''
SetStatus will set status for a entities passed in args. If the entity is
a Unit it will instead set status to its agent, to emulate backwards
compatibility.
entities : typing.Sequence[~EntityStatusArgs]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetUnitStatus(self, entities=None):
'''
SetUnitStatus sets status for all elements passed in args, the difference
with SetStatus is that if an entity is a Unit it will set its status instead
of its agent.
entities : typing.Sequence[~EntityStatusArgs]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetUnitStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetUpgradeSeriesUnitStatus(self, params=None):
'''
SetUpgradeSeriesUnitStatus sets the upgrade series status of the unit.
If no upgrade is in progress an error is returned instead.
params : typing.Sequence[~UpgradeSeriesStatusParam]
Returns -> ErrorResults
'''
if params is not None and not isinstance(params, (bytes, str, list)):
raise Exception("Expected params to be a Sequence, received: {}".format(type(params)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetUpgradeSeriesUnitStatus',
version=18,
params=_params)
_params['params'] = params
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def SetWorkloadVersion(self, entities=None):
'''
SetWorkloadVersion sets the workload version for each given unit. An error will
be returned if a unit is dead.
entities : typing.Sequence[~EntityWorkloadVersion]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='SetWorkloadVersion',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(UnitStateResults)
async def State(self, entities=None):
'''
State returns the state persisted by the charm running in this unit
and the state internal to the uniter for this unit.
entities : typing.Sequence[~Entity]
Returns -> UnitStateResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='State',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(LifeResults)
async def StorageAttachmentLife(self, ids=None):
'''
StorageAttachmentLife returns the lifecycle state of the storage attachments
with the specified tags.
ids : typing.Sequence[~StorageAttachmentId]
Returns -> LifeResults
'''
if ids is not None and not isinstance(ids, (bytes, str, list)):
raise Exception("Expected ids to be a Sequence, received: {}".format(type(ids)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='StorageAttachmentLife',
version=18,
params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
@ReturnMapping(StorageAttachmentResults)
async def StorageAttachments(self, ids=None):
'''
StorageAttachments returns the storage attachments with the specified tags.
ids : typing.Sequence[~StorageAttachmentId]
Returns -> StorageAttachmentResults
'''
if ids is not None and not isinstance(ids, (bytes, str, list)):
raise Exception("Expected ids to be a Sequence, received: {}".format(type(ids)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='StorageAttachments',
version=18,
params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
@ReturnMapping(StatusResults)
async def UnitStatus(self, entities=None):
'''
UnitStatus returns the workload status information for the unit.
entities : typing.Sequence[~Entity]
Returns -> StatusResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='UnitStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StorageAttachmentIdsResults)
async def UnitStorageAttachments(self, entities=None):
'''
UnitStorageAttachments returns the IDs of storage attachments for a collection of units.
entities : typing.Sequence[~Entity]
Returns -> StorageAttachmentIdsResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='UnitStorageAttachments',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UpdateNetworkInfo(self, entities=None):
'''
UpdateNetworkInfo refreshes the network settings for a unit's bound
endpoints.
entities : typing.Sequence[~Entity]
Returns -> ErrorResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='UpdateNetworkInfo',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(ErrorResults)
async def UpdateSettings(self, relation_units=None):
'''
UpdateSettings persists all changes made to the local settings of
all given pairs of relation and unit. Keys with empty values are
considered a signal to delete these values.
TODO(juju3) - remove
relation_units : typing.Sequence[~RelationUnitSettings]
Returns -> ErrorResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='UpdateSettings',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(UpgradeSeriesStatusResults)
async def UpgradeSeriesUnitStatus(self, entities=None):
'''
UpgradeSeriesUnitStatus returns the current preparation status of an
upgrading unit.
If no series upgrade is in progress an error is returned instead.
entities : typing.Sequence[~Entity]
Returns -> UpgradeSeriesStatusResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='UpgradeSeriesUnitStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def Watch(self, entities=None):
'''
Watch starts an NotifyWatcher for each given entity.
entities : typing.Sequence[~Entity]
Returns -> NotifyWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='Watch',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResult)
async def WatchAPIHostPorts(self):
'''
WatchAPIHostPorts watches the API server addresses.
Returns -> NotifyWatchResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchAPIHostPorts',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchActionNotifications(self, entities=None):
'''
WatchActionNotifications returns a StringsWatcher for observing
incoming action calls to a unit. See also state/watcher.go
Unit.WatchActionNotifications(). This method is called from
api/uniter/uniter.go WatchActionNotifications().
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchActionNotifications',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchConfigSettingsHash(self, entities=None):
'''
WatchConfigSettingsHash returns a StringsWatcher that yields a hash
of the config values every time the config changes. The uniter can
save this hash and use it to decide whether the config-changed hook
needs to be run (or whether this was just an agent restart with no
substantive config change).
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchConfigSettingsHash',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResult)
async def WatchForModelConfigChanges(self):
'''
WatchForModelConfigChanges returns a NotifyWatcher that observes
changes to the model configuration.
Note that although the NotifyWatchResult contains an Error field,
it's not used because we are only returning a single watcher,
so we use the regular error return.
Returns -> NotifyWatchResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchForModelConfigChanges',
version=18,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def WatchInstanceData(self, entities=None):
'''
WatchInstanceData is a shim to call the LXDProfileAPIv2 version of this method.
entities : typing.Sequence[~Entity]
Returns -> NotifyWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchInstanceData',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def WatchLeadershipSettings(self, entities=None):
'''
WatchLeadershipSettings will block the caller until leadership settings
for the given service ID change.
entities : typing.Sequence[~Entity]
Returns -> NotifyWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchLeadershipSettings',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def WatchMeterStatus(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> NotifyWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchMeterStatus',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(RelationUnitsWatchResults)
async def WatchRelationUnits(self, relation_units=None):
'''
WatchRelationUnits returns a RelationUnitsWatcher for observing
changes to every unit in the supplied relation that is visible to
the supplied unit. See also state/watcher.go:RelationUnit.Watch().
relation_units : typing.Sequence[~RelationUnit]
Returns -> RelationUnitsWatchResults
'''
if relation_units is not None and not isinstance(relation_units, (bytes, str, list)):
raise Exception("Expected relation_units to be a Sequence, received: {}".format(type(relation_units)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchRelationUnits',
version=18,
params=_params)
_params['relation-units'] = relation_units
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def WatchStorageAttachments(self, ids=None):
'''
WatchStorageAttachments creates watchers for a collection of storage
attachments, each of which can be used to watch changes to storage
attachment info.
ids : typing.Sequence[~StorageAttachmentId]
Returns -> NotifyWatchResults
'''
if ids is not None and not isinstance(ids, (bytes, str, list)):
raise Exception("Expected ids to be a Sequence, received: {}".format(type(ids)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchStorageAttachments',
version=18,
params=_params)
_params['ids'] = ids
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchTrustConfigSettingsHash(self, entities=None):
'''
WatchTrustConfigSettingsHash returns a StringsWatcher that yields a
hash of the application config values whenever they change. The
uniter can use the hash to determine whether the actual values have
changed since it last saw the config.
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchTrustConfigSettingsHash',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchUnitAddressesHash(self, entities=None):
'''
WatchUnitAddressesHash returns a StringsWatcher that yields the
hashes of the addresses for the unit whenever the addresses
change. The uniter can use the hash to determine whether the actual
address values have changed since it last saw the config.
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchUnitAddressesHash',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchUnitRelations(self, entities=None):
'''
WatchUnitRelations returns a StringsWatcher, for each given
unit, that notifies of changes to the lifecycles of relations
relevant to that unit. For principal units, this will be all of the
relations for the application. For subordinate units, only
relations with the principal unit's application will be monitored.
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchUnitRelations',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringsWatchResults)
async def WatchUnitStorageAttachments(self, entities=None):
'''
WatchUnitStorageAttachments creates watchers for a collection of units,
each of which can be used to watch for lifecycle changes to the corresponding
unit's storage attachments.
entities : typing.Sequence[~Entity]
Returns -> StringsWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchUnitStorageAttachments',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResults)
async def WatchUpgradeSeriesNotifications(self, entities=None):
'''
WatchUpgradeSeriesNotifications returns a NotifyWatcher for observing changes to upgrade series locks.
entities : typing.Sequence[~Entity]
Returns -> NotifyWatchResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WatchUpgradeSeriesNotifications',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def WorkloadVersion(self, entities=None):
'''
WorkloadVersion returns the workload version for all given units or applications.
entities : typing.Sequence[~Entity]
Returns -> StringResults
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Uniter',
request='WorkloadVersion',
version=18,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
| {
"content_hash": "70430ffb33c859382d06e481fbd64a2a",
"timestamp": "",
"source": "github",
"line_count": 3918,
"max_line_length": 192,
"avg_line_length": 54.59060745278203,
"alnum_prop": 0.3822737346062856,
"repo_name": "juju/python-libjuju",
"id": "43c5c9a04201aff1e9507d715559f4195c0fe994",
"size": "214021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "juju/client/old_clients/_client18.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1171"
},
{
"name": "Python",
"bytes": "10522198"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Sum
from django.db.models.functions import ExtractYear
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from clarityv2.crm.models import Project
from .mixins import OrderingMixin
from .models import WorkEntry
class WorkEntryList(LoginRequiredMixin, OrderingMixin, ListView):
model = WorkEntry
context_object_name = 'work_entries'
paginate_by = 25
default_ordering = '-date'
table_headers = [{'name': 'notes', 'label': _('Notes'), 'sortable': False},
{'name': 'date', 'label': _('Date'), 'sortable': True},
{'name': 'duration', 'label': _('Duration (hours)'), 'sortable': True},
{'name': 'price', 'label': _('Price'), 'sortable': False}]
def _get_project_filters(self):
return {
'slug': self.kwargs['project_slug'],
'client__contacts__user': self.request.user
}
def get_context_data(self, **kwargs):
qs = Project.objects.filter(**self._get_project_filters())
kwargs['project'] = get_object_or_404(qs)
duration = WorkEntry.objects.aggregate(Sum('duration'))['duration__sum']
kwargs['total_hours'] = duration.total_seconds() / 3600
return super().get_context_data(**kwargs)
def get_queryset(self):
now = datetime.datetime.now()
qs = super().get_queryset()
filters = {'project__%s' % key: value for key, value in self._get_project_filters().items()}
return qs.annotate(year=ExtractYear('date')).filter(**filters).filter(year=now.year)
| {
"content_hash": "df31d80980e2fab6960ca3ddf7676391",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 100,
"avg_line_length": 40.58139534883721,
"alnum_prop": 0.642406876790831,
"repo_name": "Clarity-89/clarityv2",
"id": "580c548b4ec38f9869e83bfb98da3d1f33cf9d96",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clarityv2/work_entries/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272"
},
{
"name": "Dockerfile",
"bytes": "2230"
},
{
"name": "HTML",
"bytes": "46778"
},
{
"name": "JavaScript",
"bytes": "5460"
},
{
"name": "Python",
"bytes": "131598"
},
{
"name": "SCSS",
"bytes": "18878"
},
{
"name": "Shell",
"bytes": "2008"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['127.0.0.1']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "tr"
# Supported languages
LANGUAGES = (
('tr', _('Turkish')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"evrimagaciorg.theme",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.pages",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
# "mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
import dj_database_url
DATABASES['default'] = dj_database_url.config()
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
exec(open(f, "rb").read())
else:
if os.environ.has_key('SECRET_KEY'):
SECRET_KEY = os.environ['SECRET_KEY']
if os.environ.has_key('NEVERCACHE_KEY'):
NEVERCACHE_KEY = os.environ['NEVERCACHE_KEY']
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| {
"content_hash": "148f77ee74eed7571b7d47fa8845213b",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 79,
"avg_line_length": 35.38622754491018,
"alnum_prop": 0.6948980455199255,
"repo_name": "gungorbudak/evrimagaciorg",
"id": "67b9977c220a34643b9cb12d9f06ce32b6fccc96",
"size": "11820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evrimagaciorg/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6933"
},
{
"name": "Python",
"bytes": "16900"
}
],
"symlink_target": ""
} |
from bisect import bisect
import hashlib
from math import ceil
import sys
if sys.version_info[0] >= 3:
_ord = lambda x: x
else:
_ord = ord
class NodeRing(object):
def __init__(self, nodes, weights=None, virtual_count=None):
assert nodes
self.ring = {}
self.sorted_keys = []
self.weights = weights or {}
# If number of virtual nodes per real node is not given aim for 1000 nodes in
# total. That will provide a fairly decent distribution without too much overhead
# when creating the circle or adding/removing nodes.
self.virtual_count = virtual_count if virtual_count else int(ceil(1000.0 / len(nodes)))
self.add_nodes(nodes)
def add_node(self, node, weight=None):
if weight:
self.weights[node] = weight
self.add_nodes([node])
def remove_node(self, node):
node_keys = set(self.keys_for_node(node))
self.sorted_keys = [key for key in self.sorted_keys if key not in node_keys]
for node_key in node_keys:
del self.ring[node_key]
self.weights.pop(node, None)
def keys_for_node(self, node):
return [generate_key("{node}-{i}".format(node=node, i=i))
for i in range(self.weights.get(node, 1) * self.virtual_count)]
def add_nodes(self, nodes):
for node in nodes:
for key in self.keys_for_node(node):
self.ring[key] = node
self.sorted_keys.append(key)
self.sorted_keys.sort()
def get_node(self, string_key):
if not self.sorted_keys:
return None
key = generate_key(string_key)
pos = bisect(self.sorted_keys, key)
pos %= len(self.sorted_keys)
return self.ring[self.sorted_keys[pos]]
def hash_digest(key):
m = hashlib.md5()
m.update(bytes(key.encode('utf-8')))
return [_ord(b) for b in m.digest()]
def generate_key(key):
byte_key = hash_digest(key)
return (byte_key[3] << 24) | (byte_key[2] << 16) | (byte_key[1] << 8) | byte_key[0]
| {
"content_hash": "d4ea5895cf6a213ae16c0189a594782c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 95,
"avg_line_length": 30,
"alnum_prop": 0.5985507246376811,
"repo_name": "tobgu/qcache-client",
"id": "28ebe7dea9b068f9d8d9c96341a2b12be481dd21",
"size": "2070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qclient/node_ring.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37792"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from .utils import ensure_asyncio
ensure_asyncio()
import asyncio
from .ddp_client import DDPClient
from .pubsub.future import Future
__all__ = ['ConcurrentDDPClient']
class ConcurrentDDPClient(object):
def __init__(self, url, debug=False):
self._client = None
self._condition = threading.Condition()
self._loop = None
self._ready = False
self._thread = threading.Thread(
target=self._run,
name='DDPClient',
args=(url, debug),
)
def _run(self, url, debug):
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self._client = DDPClient(self._loop, url, debug=debug)
self._client.open()
with self._condition:
self._ready = True
self._condition.notify_all()
self._loop.run_forever()
def start(self):
with self._condition:
self._thread.start()
while not self._ready:
self._condition.wait()
def stop(self):
self._loop.stop()
def join(self):
self._thread.join()
self._loop.close()
def call(self, method, *params):
async_future = asyncio.Future(loop=self._loop)
self._call_soon(self._client.call, async_future, method, *params)
future = Future()
def callback(async_future):
future.set(async_future.result())
async_future.add_done_callback(callback)
return future
def _call_soon(self, *args, **kwargs):
return self._loop.call_soon_threadsafe(*args, **kwargs)
| {
"content_hash": "e10f79786b2e28e6df609c84b2f8d231",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 27.523809523809526,
"alnum_prop": 0.5963091118800461,
"repo_name": "foxdog-studios/pyddp",
"id": "223f45d1c15f74969bccb6790f811d42f303aa9e",
"size": "2338",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "ddp/concurrent_ddp_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "182982"
},
{
"name": "Shell",
"bytes": "3484"
}
],
"symlink_target": ""
} |
import math
import time
from monotonic import monotonic as _now
class SpeedLimit(object):
"""Speed/limiting iterator wrapper object.
A wrapper object that uses the `token bucket`_ algorithm to limit the
rate at which values comes out of an iterable. This can be used to limit
the consumption speed of iteration of some other iterator (or iterable).
.. _token bucket: http://en.wikipedia.org/wiki/Token_bucket
"""
def __init__(self,
# How many items to yield from the provided
# wrapped iterator (per second).
items_per_second,
# Used to simulate a thread with its own 'tic rate'. Making
# this smaller affects the accuracy of the 'tic' calculation,
# which affects the accuracy of consumption (and delays).
refresh_rate_seconds=0.01,
# How *full* the initial bucket is.
initial_bucket_size=1,
# Made a keyword argument, so one could replace this
# with a eventlet.sleep or other idling function...
sleep_func=time.sleep):
self._refresh_rate_seconds = refresh_rate_seconds
self._bucket = (items_per_second *
refresh_rate_seconds * initial_bucket_size)
self._items_per_tic = items_per_second * refresh_rate_seconds
self._next_fill = _now() + refresh_rate_seconds
self._sleep = sleep_func
def _check_fill(self):
# Fill the bucket based on elapsed time.
#
# This simulates a background thread...
now = _now()
if now > self._next_fill:
d = now - self._next_fill
tics = int(math.ceil(d / self._refresh_rate_seconds))
self._bucket += tics * self._items_per_tic
self._next_fill += tics * self._refresh_rate_seconds
def speed_limit_iter(self, itr, chunk_size_cb=None):
"""Return an iterator/generator which limits after each iteration.
:param itr: an iterator to wrap
:param chunk_size_cb: a function that can calculate the
size of each chunk (if none provided this
defaults to 1)
"""
for chunk in itr:
if chunk_size_cb is None:
sz = 1
else:
sz = chunk_size_cb(chunk)
self._check_fill()
if sz > self._bucket:
now = _now()
tics = int((sz - self._bucket) / self._items_per_tic)
tm_diff = self._next_fill - now
secs = tics * self._refresh_rate_seconds
if tm_diff > 0:
secs += tm_diff
self._sleep(secs)
self._check_fill()
self._bucket -= sz
yield chunk
| {
"content_hash": "f2eae0f77fcf6f2e3dc31a1958eeea6b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 40.50704225352113,
"alnum_prop": 0.5458970792767733,
"repo_name": "harlowja/speedlimit",
"id": "0d68511048da9c739fafea84c298cfc663e38b5d",
"size": "3653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speedlimit/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6558"
}
],
"symlink_target": ""
} |
from vsa.model.extent import Extent
from vsa.infra.infra import printsz, txt2size, tstint
from vsa.infra.params import ObjState
class SanVolume(Extent):
child_obj=['inluns']
set_params=Extent.set_params+['size','dr','stripes','stripesize']
newonly_fields=['readahead','stripes','stripesize']
#show_columns=[' ','Name','Size','Mount','InLUNs','Snaps']
#ui_buttons=[add_btn,del_btn,mapto_btn,addtotgt_btn,snap_btn,extmon_btn]
def __init__(self,guid='',name='',provider=None,size=0,auto=False):
"""
The description of __init__ comes here.
@param guid
@param name
@param provider
@param size
@param auto
@return
"""
Extent.__init__(self,guid,name,'Logical Volume')
self.exttype='virtual'
self.provider=provider
self.lvname=''
self.capabilities=[] # enabled cap: mirror/replica/backup/wcache/cdp/..
self.snapgroup=None # snapshot TBD
self.primordial=0 # primary extent, based on physical HW (e.g. not a logical volume)
self.creator=None
self.mount=''
self.size=size
self.auto=auto
self.dr=False
self.manual=False
self.stripes=0 # number of stripes to scatter
self.stripesize=8 # default stripe size 8kb
def ui_getrow(self):
"""
The description of ui_getrow comes here.
@return
"""
return [self.locked*'L',self.name,printsz(self.size),self.mount,len(self.usedinluns),'0']
def set_size(self,san,key,val='',test=0):
"""
The description of set_size comes here.
@param san
@param key
@param val
@param test
@return
"""
if self.parent and self.parent.private and san.runmode:
return (1, 'cant create volumes on private pool %s' % self.parent.name)
sign=''
# allow sign only in running state
if self.state == ObjState.running and len(val) > 1 and val[0] in ['+','-']:
sign=val[0]
val=val[1:]
(e,v)=txt2size(val)
if e:
return (e,'volume size error,'+v)
if val==v:
return (0,'')
if self.size == v and not sign:
return (0,'')
if self.state == ObjState.created:
if v > self.parent.free_space:
return (1,'Volume size is greater than volume group free space (%s)' % printsz(self.parent.free_space))
if not test : self.size=v
elif self.state==ObjState.running:
if test : return (0, '')
if sign=='+': v=self.size+v
elif sign=='-': v=self.size-v
if v > self.size : # extend
if v > self.parent.free_space + self.size:
return (1,'New volume size is greater than volume group free space (%s) + current size (%s)' % ( printsz(self.parent.free_space), printsz(self.size) ))
return self._extend(v)
elif v < self.size : # reduce
if v < 1:
return (1,'New volume size cannot be lower than 1M')
return self._reduce(v)
else:
return (1,'cannot change size at current state')
return (0,'')
def _extend(self, size):
"""
The description of _extend comes here.
@param size
@return
"""
e,r = self.provider.extend_volume(self, size)
if e:
return e,r
self._update_in_luns()
return (0,'')
def _reduce(self, size):
"""
The description of _reduce comes here.
@param size
@return
"""
e,r = self.provider.reduce_volume(self, size)
if e:
return e,r
self._update_in_luns()
return (0,'')
def _update_in_luns(self):
"""
The description of _update_in_luns comes here.
@return
"""
for lun in self.usedinluns:
lun.update_devfile()
def set_stripes(self,san,key,val='',test=0):
"""
The description of set_stripes comes here.
@param san
@param key
@param val
@param test
@return
"""
v = tstint(val)
if v < 0 :
return (1,'Invalid number of stripes')
if v > len(self.parent.slaves):
return (1, 'Number of stripes must not exceed number of physical volumes')
if not test :
self.stripes = v
return (0,'')
def delete(self, force=False):
"""
The description of delete comes here.
@param force
@return
"""
if self.state == ObjState.created:
return (0,'')
if self.parent.private and not force:
return (1,'this is a private volume. use force to delete')
if self.auto and not force:
return (1,'Cannot Delete Auto Generated Volumes')
self.state = ObjState.delete
(e,o) = self.provider.del_volume(self)
if e:
return (e,o)
if self in self.parent.usedby:
self.parent.usedby.remove(self)
return (e,o)
def update(self,flags=''):
"""
The description of update comes here.
@param flags
@return
"""
if self.state==ObjState.created and self.size:
self.manual=True
(e,pvds)=self.parent.find_space(self.size,self.dr)
if e : return (e,pvds)
# TBD add DRBD in case of .dr
for p in pvds:
(e,r)=p.add_volume(self)
print e,r
if e: return (e,r)
# self._flush()
return (0,'')
def show(self,mode=0,level=0,ident=''):
"""
The description of show comes here.
@param mode
@param level
@param ident
@return
"""
tmp = '%sv%-3d %-30s %6d %10s %-10s' % \
(ident,self.idx,self.name,len(self.basedon),printsz(self.size),self.exttype)
if level>0:
tmp+='\n'+ident+' Based on:'
for p in self.basedon : tmp+='\n'+p.show(mode,level-1,ident+' ')
tmp+='\n'
return tmp
| {
"content_hash": "f6eee0ec8fa293e711b0248bf13288b8",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 171,
"avg_line_length": 32.614583333333336,
"alnum_prop": 0.525710635579687,
"repo_name": "mellanox-openstack/vsa",
"id": "103416e906397f0ed78c9b9ba4218219f83b9e0d",
"size": "6901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/vsa/model/san_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45402"
},
{
"name": "JavaScript",
"bytes": "981113"
},
{
"name": "Perl",
"bytes": "1608"
},
{
"name": "Python",
"bytes": "624875"
},
{
"name": "Shell",
"bytes": "64195"
}
],
"symlink_target": ""
} |
"""Fichier contenant le contexte d'édition d'une carte d'étendue d'eau."""
import re
from primaires.interpreteur.contexte import Contexte
# Constantes
RE_SIMPLE_REL = re.compile(r"^([A-Pa-p])([0-9]+)")
RE_SIMPLE_ABS = re.compile(r"^(-?[0-9]+)\.(-?[0-9]+)")
class CarteEtendue(Contexte):
"""Contexte permettant d'e voir et éditer la carte d'une étendue d'eau.
Grâce à ce contexte, voir la carte d'une étendue d'eau sur
différents plans (elle peut être assez grande) et ajouter des
obstacles ou des liens doit être facile. Au lieu d'utiliser les
coordonnées absolues, le contexte est programmé pour utiliser une
grille de repérage simple (15 lignes de A à P et 20 colonnes de
1 à 20) qui permet de préciser facilement un point repéré sur la
carte, ou une suite de points.
Voici les choses que l'on doit pouvoir faire :
Place un obstacle "forêt" sur le point A8
Trace une côte de type "falaise" de A1 à C15
Trace un lien entre l'étendue A et B de B6 à L6
Supprime l'obstacle A5
Les coordonnées absolues peuvent être utilisées également. Dans
ce cas elles sont séparées par un point :
Trace une côte de type "montagne" de 20.-15 à 300.-10
"""
nom = "salle:etendue:carte"
def __init__(self, pere, x=None, y=None):
"""Constructeur du contexte"""
Contexte.__init__(self, pere)
self.opts.prompt_prf = ""
self.opts.prompt_clr = ""
self.opts.nl = False
self.etendue = None
self.x = x
self.y = y
self.nb_lignes = 16
self.nb_col = 30
def actualiser(self):
"""Actualise le contexte (affiche simplement l'accueil)."""
self.pere << self.accueil()
def accueil(self):
"""Message d'accueil du contexte"""
if self.x is None or self.y is None:
self.x, self.y = min(self.etendue.points.keys())
etendue = self.etendue
x, y = self.x, self.y
nb_lignes = self.nb_lignes
nb_col = self.nb_col
limite_x = x + nb_col
limite_y = y - nb_lignes
res = "|tit|Carte de l'étendue {} :|ff|\n".format(
self.etendue.cle)
res += "\nCoin supérieur gauche : X={}, Y={}".format(
self.x, self.y)
res += "\nOptions disponibles :"
res += "\n |cmd|/?|ff| pour obtenir de l'aide général sur ce contexte"
res += "\n |cmd|/?d|ff| pour apprendre à se déplacer sur la carte"
res += "\n |cmd|/?o|ff| pour apprendre à manipuler les obstacles"
res += "\n |cmd|/?l|ff| pour apprendre à manipuler les liens"
lignes = []
haut = " 1 3 5 7 9 11 13 15 17 19 21 23 " \
"25 27 29 "
lignes.append(haut)
points = etendue.points.copy()
liens = etendue.liens.copy()
lettres = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", \
"K", "L", "M", "N", "O", "P"]
for i in range(y, limite_y, -1):
ligne = lettres[0]
for j in range(x, limite_x):
ligne += " "
point = points.get((j, i))
if point is None:
lien = liens.get((j, i))
if lien is None:
ligne += " "
else:
ligne += "+"
else:
if hasattr(point, "nom"):
ligne += point.nom[0].capitalize()
elif hasattr(point, "nom_terrain"):
ligne += point.nom_terrain[0].capitalize()
else:
ligne += "?"
ligne += " " + lettres[0]
lignes.append(ligne)
del lettres[0]
lignes.append(haut)
res += "\n\n" + "\n".join(lignes)
return res
def interpreter(self, msg):
"""Méthode d'interprétation du contexte"""
options = {
"q": self.opt_quitter,
"i": self.opt_info,
"a": self.opt_placer,
"d": self.opt_supprimer,
"l": self.opt_lier,
}
if msg.startswith("/"):
opt = msg.split(" ")[0][1:].lower()
reste = " ".join(msg.split(" ")[1:])
if opt in options:
return options[opt](reste)
else:
self.pere << "|err|Option inconnue.|ff|"
elif msg and msg[0].lower() in "oens":
self.deplacer(msg)
elif msg:
self.pere << "|err|Déplacement invalide. Entrez |cmd|/?d|err| " \
"pour de l'aide.|ff|"
else:
self.actualiser()
def deplacer(self, msg):
"""Déplace la carte dans la direction indiquée.
Le paramètre 'msg' peut contenir différents formats :
s -- une page vers le sud
s.5 -- 5 cases vers le sud
s*5 -- 5 pages vers le sud
"""
msg = msg.lower()
directions = {
"o": (-1, 0),
"n": ( 0, 1),
"e": ( 1, 0),
"s": ( 0, -1),
}
direction = directions[msg[0]]
x, y = direction
msg = msg[1:]
if msg == "":
x *= self.nb_col
y *= self.nb_lignes
elif msg[0] == ".":
try:
nb = int(msg[1:])
assert nb > 0
except (ValueError, AssertionError):
self.pere << "|err|Le nombre spécifié est invalide.|ff|"
return
else:
x *= nb
y *= nb
elif msg[0] == "*":
try:
nb = int(msg[1:])
assert nb > 0
except (ValueError, AssertionError):
self.pere << "|err|Le nombre spécifié est invalide.|ff|"
return
else:
x *= nb * self.nb_col
y *= nb * self.nb_lignes
else:
self.pere << "|err|Syntaxe invalide. Entrez |cmd|/?d|err| " \
"pour de l'aide.|ff|"
return
self.x += x
self.y += y
self.actualiser()
def opt_quitter(self, reste):
"""Quitte le contexte."""
self.fermer()
self.pere << "Fermeture de la carte.|ff|"
def opt_info(self, reste):
"""Cherche à donner l'information sur le point précisé."""
coords, message = self.point_unique(reste)
if coords is None:
self.pere << message
return
etendue = self.etendue
points = etendue.points
msg = "Information sur le point {}.{} :\n".format(*coords)
point = points.get(coords)
lien = etendue.liens.get(coords)
if point:
if hasattr(point, "nom_terrain"):
msg += "\n Salle : {}".format(point.ident)
msg += "\n Terrain : {}".format(point.nom_terrain)
else:
msg += "\n Obstacle de type {}".format(point.nom)
elif lien:
msg += "\n Lien vers l'étendue {}".format(lien.cle)
else:
self.pere << "Ce point est vide ou inexistant."
return
self.pere << msg
def opt_placer(self, reste):
"""Place un nouveau point sur la carte."""
coords, message = self.point_unique(reste)
if coords is None:
self.pere << message
return
etendue = self.etendue
message = message.strip()
terrain = importeur.salle.get_terrain(message)
if not terrain:
self.pere << "|err|Terrain {} inconnu.|ff|".format(repr(message))
return
obstacle = importeur.salle.obstacles[terrain.nom]
if coords in etendue.obstacles:
del etendue.obstacles[coords]
if coords in etendue.liens:
del etendue.liens[coords]
etendue.ajouter_obstacle(coords, obstacle)
self.actualiser()
def opt_lier(self, reste):
"""Ajoute un lien à l'étendue."""
coords, message = self.point_unique(reste)
if coords is None:
self.pere << message
return
etendue = self.etendue
message = message.strip()
try:
liee = importeur.salle.etendues[message.lower()]
except KeyError:
self.pere << "|err|Étendue {} inconnue.|ff|".format(
repr(message))
return
if coords in etendue.obstacles:
del etendue.obstacles[coords]
if coords in etendue.cotes:
del etendue.cotes[coords]
etendue.ajouter_lien(coords, liee)
self.actualiser()
def opt_supprimer(self, reste):
"""Supprime un point existant sur la carte."""
coords, message = self.point_unique(reste)
if coords is None:
self.pere << message
return
etendue = self.etendue
if coords in etendue.obstacles:
etendue.supprimer_obstacle(coords)
elif coords in etendue.cotes:
salle = etendue.cotes[coords]
salle.etendue = None
del etendue.cotes[coords]
elif coords in etendue.liens:
etendue.supprimer_lien(coords)
else:
self.pere << "|err|Ce point n'a pu être trouvé dans l'étendue.|ff|"
return
self.actualiser()
def point_unique(self, msg):
"""Retourne le point unique identifié par msg.
Le point peut être identifié de deux manières :
Soit en position relative (A8, C13, L17)
Soit en position absolue (5.-2)
On retourne :
(None, None) -- le point n'a pas été trouvé
((x, y), reste) -- le point si trouvé
"""
res = RE_SIMPLE_REL.search(msg)
if res is None:
res = RE_SIMPLE_ABS.search(msg)
if res is None:
return (None, "|err|Syntaxe invalide. Consultez " \
"|cmd|/?l|err| pour de l'aide.|ff|")
else:
x, y = res.groups()
try:
x = int(x)
y = int(y)
except ValueError:
return (None, "|err|Syntaxe invalide. Consultez " \
"|cmd|/?l|err| pour de l'aide.|ff|")
else:
return ((x, y), msg[res.end():])
else:
lettre, nombre = res.groups()
lettres = "abcdefghijklmnop"
x = int(nombre) - 1
y = lettres.index(lettre.lower())
if x < 0:
return (None, "|err|Syntaxe invalide. Consultez " \
"|cmd|/?l|err| pour de l'aide.|ff|")
else:
return ((self.x + x, self.y - y), msg[res.end():])
| {
"content_hash": "cfa91f37e59a5ce9def3b4d91945f1e1",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 79,
"avg_line_length": 33.68111455108359,
"alnum_prop": 0.49646107178968657,
"repo_name": "stormi/tsunami",
"id": "4660de60ece1d4bab6a69594f4b6d3968f548251",
"size": "12509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/contextes/carte.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from cloudmesh.util.config import read_yaml_config
from cloudmesh.config.cm_config import cm_config_server
from cloudmesh.provisioner.baremetal_status import BaremetalStatus
import requests
import json
from time import sleep
import threading
from cloudmesh_base.logger import LOGGER
#
# SETTING UP A LOGGER
#
log = LOGGER(__file__)
class CobblerRestAPI:
"""Cobbler REST Service API
This class provide the access to cobbler provision API via REST service.
"""
def __init__(self):
self.server_url = self.get_server_url()
self.bm_status = BaremetalStatus()
def get_cobbler_distro_list(self):
"""
shortcut for get cobbler distro list
"""
return self.get_cobbler_object_list("distro")
def get_cobbler_profile_list(self):
"""
shortcut for get cobbler profile list
"""
return self.get_cobbler_object_list("profile")
def get_cobbler_system_list(self):
"""
shortcut for get cobbler system list
"""
return self.get_cobbler_object_list("system")
def get_cobbler_kickstart_list(self):
"""
shortcut for get cobbler kickstart list
"""
return self.get_cobbler_object_list("kickstart")
def get_cobbler_iso_list(self):
"""
shortcut for get cobbler iso file list
"""
return self.get_cobbler_object_list("iso")
def get_cobbler_object_list(self, cobbler_object):
"""list the cobbler objects.
:param string cobbler_object: one of the cobbler objects, currently support four objects, 'distro', 'profile', 'system', 'kickstart'
:return: a list with the formation ['name1', 'name2', ] of corresponding objects if cobbler_object is a valid object, otherwise None
:rtype: list
"""
url = "/cm/v1/cobbler/{0}s".format(cobbler_object)
rest_data = self.request_rest_api("get", url)
return rest_data["data"] if rest_data["result"] else None
def get_cobbler_distro_children(self, name):
"""
shortcut for get cobbler children of distro *name*
"""
return self.get_cobbler_object_children("distro", name)
def get_cobbler_profile_children(self, name):
"""
shortcut for get cobbler children of profile *name*
"""
return self.get_cobbler_object_children("profile", name)
def get_cobbler_object_children(self, cobbler_object, name):
"""list the children of cobbler objects.
:param string cobbler_object: one of the cobbler objects, currently support four objects, 'distro', 'profile',
:return: a list with the formation ['name1', 'name2', ] of corresponding objects if cobbler_object is a valid object, otherwise None
:rtype: list
"""
if cobbler_object in ["distro", "profile"]:
url = "/cm/v1/cobbler/{0}s/{1}/child".format(cobbler_object, name)
rest_data = self.request_rest_api("get", url)
return rest_data["data"] if rest_data["result"] else None
return None
def get_cobbler_profile_based_kickstart(self, ks_filename):
"""
get cobbler profile defined with the kickstart filename *ks_filename*
"""
url = "/cm/v1/cobbler/kickstarts/{0}/profile".format(ks_filename)
rest_data = self.request_rest_api("get", url)
return rest_data["data"] if rest_data["result"] else None
def get_cobbler_distro_report(self, name):
"""
shortcut for get cobbler distro report
"""
return self.get_cobbler_object_report("distro", name)
def get_cobbler_profile_report(self, name):
"""
shortcut for get cobbler profile report
"""
return self.get_cobbler_object_report("profile", name)
def get_cobbler_system_report(self, name):
"""
shortcut for get cobbler system report
"""
return self.get_cobbler_object_report("system", name)
def get_cobbler_kickstart_report(self, name):
"""shortcut for get cobbler kickstart report
:param string name: the filename of kickstart file, it supports UNIX wildcard
:return: a list of the format [{"name": "full name", "contents": [lines...]}, {}]
"""
return self.get_cobbler_object_report("kickstart", name)
def get_cobbler_object_report(self, cobbler_object, name):
"""get the detail report of cobbler object
:param string cobbler_object: one of the cobbler objects, currently support four objects, 'distro', 'profile', 'system', 'kickstart'
:param string name: the name of the cobbler object. It supports UNIX wildcards
:return: a list of corresponding object with full name if cobbler_object is a valid object and named object exists, otherwise None
The formation of result is: [{"name": "full_name", "field1": "value1",}, {}]
"""
url = "/cm/v1/cobbler/{0}s/{1}".format(cobbler_object, name)
rest_data = self.request_rest_api("get", url)
return [v["data"] for v in rest_data["data"]] if rest_data["result"] else None
def add_cobbler_distro(self, data):
"""add a distribution to cobbler.
:param dict data: a json data structure. The formation is {"name": "your distro name", "url": "full url of iso"}
:return: a dict {"distro": "name", "profile": "name"} means add distro operation success, otherwise None means failed
"""
return self.add_cobbler_object("distro", data, flag_return_data=True)
def add_cobbler_profile(self, data):
"""add a profile to cobbler.
:param dict data: a json data structure. The formation is {"name": "your profile name", "distro": "distro", "kickstart": "kickstart.file",}
:return: True means add profile operation success, otherwise Failed
"""
return self.add_cobbler_object("profile", data)
def add_cobbler_system(self, data):
"""add a system to cobbler.
:param dict data: a json data structure. The formation is {"name": "your system name", "profile": "profile", "power": {}, "interfaces": [{}, {},]}
:return: True means add system operation success, otherwise Failed
"""
return self.add_cobbler_object("system", data)
def add_cobbler_kickstart(self, data):
"""add a kickstart to cobbler.
:param dict data: a json data structure. The formation is {"name": "your kickstart filename", "contents": [line, line,...]}
:return: True means add kickstart operation success, otherwise Failed
"""
return self.add_cobbler_object("kickstart", data)
def add_cobbler_object(self, cobbler_object, data, flag_return_data=False):
"""add a specific object to cobbler.
:param string cobbler_object: one of the cobbler objects, currently support four objects, 'distro', 'profile', 'system', 'kickstart'
:param dict data: a json data structure. The formation is {"name": "your object name", ...}
:return: True means add an object operation success, otherwise Failed
"""
url = "/cm/v1/cobbler/{0}s/{1}".format(cobbler_object, data["name"])
rest_data = self.request_rest_api("post", url, data)
if flag_return_data:
return rest_data["data"] if rest_data["result"] else None
return rest_data["result"]
def update_cobbler_distro(self, data):
"""update a distro to cobbler.
:param dict data: a json data structure. The formation is {"name": "your distro name", "comment": "your comment", "owners": "specified owners",}
:return: True means update distro operation success, otherwise Failed
"""
return self.update_cobbler_object("distro", data)
def update_cobbler_profile(self, data):
"""update a profile to cobbler.
:param dict data: a json data structure. The formation is {"name": "your profile name", "distro": "distro", "kickstart": "kickstart.file", "comment": "your comment", "owners": "specified owners",}
:return: True means update profile operation success, otherwise Failed
"""
return self.update_cobbler_object("profile", data)
def update_cobbler_system(self, data):
"""update a system to cobbler.
:param dict data: a json data structure. The formation is {"name": "your system name", "profile": "profile", "comment": "your comment", "owners": "specified owners", "power": {}, "interfaces": [{}, {},]}
:return: True means update system operation success, otherwise Failed
"""
return self.update_cobbler_object("system", data)
def update_cobbler_kickstart(self, data):
"""update a kickstart to cobbler.
:param dict data: a json data structure. The formation is {"name": "your kickstart filename", "contents": [line, line,...]}
:return: True means update kickstart operation success, otherwise Failed
"""
return self.update_cobbler_object("kickstart", data)
def update_cobbler_object(self, cobbler_object, data):
"""update a specific object to cobbler.
:param string cobbler_object: one of the cobbler objects, currently support THREE objects, 'profile', 'system', 'kickstart'
:param dict data: a json data structure. The formation is {"name": "your object name", ...}
:return: True means update an object operation success, otherwise Failed
"""
url = "/cm/v1/cobbler/{0}s/{1}".format(cobbler_object, data["name"])
rest_data = self.request_rest_api("put", url, data)
return rest_data["result"]
def remove_cobbler_distro(self, name):
"""
shortcut for remove cobbler distro item `name`
"""
return self.remove_cobbler_object("distro", name)
def remove_cobbler_profile(self, name):
"""
shortcut for remove cobbler profile item `name`
"""
return self.remove_cobbler_object("profile", name)
def remove_cobbler_system(self, name):
"""
shortcut for get cobbler system item `name`
"""
return self.remove_cobbler_object("system", name)
def remove_cobbler_kickstart(self, name):
"""
shortcut for get cobbler kickstart item `name`
"""
return self.remove_cobbler_object("kickstart", name)
def remove_cobbler_object(self, cobbler_object, name):
"""remove a specific object to cobbler.
:param string cobbler_object: one of the cobbler objects, currently support FOUR objects, 'distro', 'profile', 'system', 'kickstart'
:param string name: the name of the cobbler object.
:return: True means remove an object operation success, otherwise Failed
"""
url = "/cm/v1/cobbler/{0}s/{1}".format(cobbler_object, name)
rest_data = self.request_rest_api("delete", url)
return rest_data["result"]
def remove_cobbler_system_interface(self, system_name, if_name):
"""remove a specific interface *if_name* from the cobbler system *system_name*.
:param string system_name: the name of a cobbler system
:param string if_name: the name of an interface on the cobbler system.
:return: True means remove an interface operation success, otherwise Failed
"""
url = "/cm/v1/cobbler/systems/{0}/{1}".format(system_name, if_name)
rest_data = self.request_rest_api("delete", url)
return rest_data["result"]
def monitor_cobbler_system(self, name):
"""monitor the status of baremetal system *name* via ping service.
:param string name: the name of a cobbler system or host
:return: True means system is ON, otherwise OFF.
"""
url = "/cm/v1/cobbler/baremetal/{0}".format(name)
rest_data = self.request_rest_api("get", url)
return rest_data["result"]
def deploy_cobbler_system(self, name):
"""deploy baremetal system *name*.
The cycle of deply system status is False(OFF) --> True(ON) --> False(OFF).
:param string name: the name of a cobbler system or host
:return: Always return True, which ONLY means has sent deploy command through IPMI. You MUST call `monitor_cobbler_system` to get the status of the system `name`.
"""
# init deploy status
self.bm_status.init_deploy_status(name)
# call deploy REST
url = "/cm/v1/cobbler/baremetal/{0}".format(name)
rest_data = self.request_rest_api("post", url)
# save deploy command result to mongodb
self.bm_status.update_deploy_command_result(name, rest_data["result"])
if rest_data["result"]:
# monitor status
self.monitor_deploy_power_status(name, "deploy")
return rest_data["result"]
def power_cobbler_system(self, name, flag_on=True):
"""power ON/OFF baremetal system *name*.
The system is ON/OFF must be call **monitor_cobbler_system** and its result is True(ON)/False(OFF)
:param string name: the name of a cobbler system or host
:param boolean flag_on: a boolean value. True means to power on the system, False means power off.
:return: Always return True, which ONLY means has sent deploy command through IPMI. You MUST call `monitor_cobbler_system` to get the status of the system `name`.
"""
# init power status
self.bm_status.init_power_status(name, flag_on)
# call power REST
url = "/cm/v1/cobbler/baremetal/{0}".format(name)
data = {"power_on": flag_on, }
rest_data = self.request_rest_api("put", url, data)
if rest_data["result"]:
# monitor status
self.monitor_deploy_power_status(name, "power", flag_on)
return rest_data["result"]
def monitor_deploy_power_status(self, name, action, flag_on=True):
"""monitor the deploy/power ON/OFF status of host name.
:param string name: the unique ID of host
:param string action: action of "deploy" or "power"
:param boolean flag_on: ONLY valid when action is power, True means power ON, False means OFF
"""
data = {"cm_id": name,
"action": action,
"flag_on": flag_on,
"time": 10, # 10 seconds
}
t = threading.Thread(target=self.monitor_status_thread, args=[data])
t.start()
def monitor_status_thread(self, data):
result = True
host = data["cm_id"]
while result:
status = "ON" if self.monitor_cobbler_system(host) else "OFF"
if data["action"] == "deploy":
result = self.bm_status.update_deploy_status(host, status)
if result:
progress = self.bm_status.get_deploy_progress(host)
elif data["action"] == "power":
result = self.bm_status.update_power_status(
host, status, data["flag_on"])
if result:
progress = self.bm_status.get_power_progress(
host, data["flag_on"])
if progress >= 100:
break
if progress < 0:
# error
log.error("Error when getting the progress of host {0} on {1}.".format(
host, data["action"]))
break
sleep(data["time"])
def get_server_url(self,):
"""
get the BASE URL of the cobbler service
"""
server_config = cm_config_server()
cobbler_config = server_config.get("cloudmesh.server.cobbler")
return "{0}://{1}:{2}".format(cobbler_config["prot"], cobbler_config["host"], cobbler_config["port"])
def request_rest_api(self, method, url, data=None):
"""
Request a REST service through requests library.
:param string method: the operation in REST service, valid value in [get, post, put, delete]
:return: a dict with the formation {"result":True|False, "data": data}
"""
method = method.lower()
headers = {"content-type": "application/json",
"accept": "application/json", }
req_api = getattr(requests, method)
req_url = self.server_url + url
if method == "get":
r = req_api(req_url)
else:
if data:
data["user_token"] = ""
else:
data = {"user_token": ""}
r = req_api(req_url, data=json.dumps(data), headers=headers)
return r.json()["cmrest"]["data"] if r.status_code == 200 else {"result": False}
if __name__ == "__main__":
bmc = CobblerRestAPI()
#"""
# get distros list
result = bmc.get_cobbler_iso_list()
#"""
"""
# get cobbler report
result = bmc.get_cobbler_object_report("kickstart", "*")
"""
"""
# add a distro, CentOS-6.5-x86_64-bin-DVD1.iso
data = {"name": "test_centos_1405023", "url": "http://mirrors.usc.edu/pub/linux/distributions/centos/6.5/isos/x86_64/CentOS-6.5-x86_64-bin-DVD1.iso"}
result = bmc.add_cobbler_distro(data)
"""
"""
# add a profile,
data = {"name": "test_profile_1405023", "distro": "test_centos_1405023-x86_64", "kickstart":"sample.ks",}
result = bmc.add_cobbler_profile(data)
"""
"""
# add a system
data = {"name": "test_system_1405023", "profile": "test_profile_1405023",
"power": {"power-address": "1.2.3.4",
"power-user": "onlytest",
"power-pass": "onlytest",
"power-type": "ipmilan",
"power-id": 1,
},
"interfaces": [
{
"name": "ee1",
"ip-address": "192.168.11.23",
"mac-address": "aa:11:cc:dd:ee:ff",
"static": True,
"netmask": "255.255.255.0"
},
{
"name": "ee2",
"ip-address": "192.168.11.123",
"mac-address": "aa:11:cc:dd:ff:ff",
"static": True,
"netmask": "255.255.255.0"
},
]
}
result = bmc.add_cobbler_system(data)
"""
"""
# update a profile
data = {"name": "test_profile_1405023", "kickstart": "default.ks",}
result = bmc.update_cobbler_profile(data)
"""
"""
# update a system
data = {"name": "test_system_1405023", "power": {"power-user": "hellouser"}}
result = bmc.update_cobbler_system(data)
"""
"""
# remove a interface of system
system_name="test_system_1405023"
if_name = "ee2"
result = bmc.remove_cobbler_system_interface(system_name, if_name)
"""
"""
# remove system
system_name="test_system_1405023"
result = bmc.remove_cobbler_system(system_name)
"""
"""
# remove a profile
profile_name="test_profile_1405023"
result = bmc.remove_cobbler_profile(profile_name)
"""
"""
# remove a distro
distro_name="test_centos_1405023-x86_64"
# MUST delete the default profile firstly
result = bmc.remove_cobbler_profile(distro_name)
result = bmc.remove_cobbler_distro(distro_name)
"""
result = bmc.get_cobbler_profile_based_kickstart("a.seed")
print("result is: ", result)
| {
"content_hash": "6f7d61a32aa7b3921b36ca38c7c75573",
"timestamp": "",
"source": "github",
"line_count": 452,
"max_line_length": 211,
"avg_line_length": 43.075221238938056,
"alnum_prop": 0.6029275808936826,
"repo_name": "rajpushkar83/cloudmesh",
"id": "317010d38344a04c5fda5afac329359666064a56",
"size": "19470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh/rain/cobbler/cobbler_rest_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
"""Script to evaluate a dataset fold under a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from magenta.models.coconet import lib_data
from magenta.models.coconet import lib_evaluation
from magenta.models.coconet import lib_graph
from magenta.models.coconet import lib_util
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('data_dir', None,
'Path to the base directory for different datasets.')
flags.DEFINE_string('eval_logdir', None,
'Path to the base directory for saving evaluation '
'statistics.')
flags.DEFINE_string('fold', None,
'Data fold on which to evaluate (valid or test)')
flags.DEFINE_string('fold_index', None,
'Optionally, index of particular data point in fold to '
'evaluate.')
flags.DEFINE_string('unit', None, 'Note or frame or example.')
flags.DEFINE_integer('ensemble_size', 5,
'Number of ensemble members to average.')
flags.DEFINE_bool('chronological', False,
'Indicates evaluation should proceed in chronological order.')
flags.DEFINE_string('checkpoint', None, 'Path to checkpoint directory.')
flags.DEFINE_string('sample_npy_path', None, 'Path to samples to be evaluated.')
EVAL_SUBDIR = 'eval_stats'
def main(unused_argv):
checkpoint_dir = FLAGS.checkpoint
if not checkpoint_dir:
# If a checkpoint directory is not specified, see if there is only one
# subdir in this folder and use that.
possible_checkpoint_dirs = tf.gfile.ListDirectory(FLAGS.eval_logdir)
possible_checkpoint_dirs = [
i for i in possible_checkpoint_dirs if
tf.gfile.IsDirectory(os.path.join(FLAGS.eval_logdir, i))]
if EVAL_SUBDIR in possible_checkpoint_dirs:
possible_checkpoint_dirs.remove(EVAL_SUBDIR)
if len(possible_checkpoint_dirs) == 1:
checkpoint_dir = os.path.join(
FLAGS.eval_logdir, possible_checkpoint_dirs[0])
tf.logging.info('Using checkpoint dir: %s', checkpoint_dir)
else:
raise ValueError(
'Need to provide a path to checkpoint directory or use an '
'eval_logdir with only 1 checkpoint subdirectory.')
wmodel = lib_graph.load_checkpoint(checkpoint_dir)
if FLAGS.eval_logdir is None:
raise ValueError(
'Set flag eval_logdir to specify a path for saving eval statistics.')
else:
eval_logdir = os.path.join(FLAGS.eval_logdir, EVAL_SUBDIR)
tf.gfile.MakeDirs(eval_logdir)
evaluator = lib_evaluation.BaseEvaluator.make(
FLAGS.unit, wmodel=wmodel, chronological=FLAGS.chronological)
evaluator = lib_evaluation.EnsemblingEvaluator(evaluator, FLAGS.ensemble_size)
if not FLAGS.sample_npy_path and FLAGS.fold is None:
raise ValueError(
'Either --fold must be specified, or paths of npy files to load must '
'be given, but not both.')
if FLAGS.fold is not None:
evaluate_fold(
FLAGS.fold, evaluator, wmodel.hparams, eval_logdir, checkpoint_dir)
if FLAGS.sample_npy_path is not None:
evaluate_paths([FLAGS.sample_npy_path], evaluator, wmodel.hparams,
eval_logdir)
tf.logging.info('Done')
def evaluate_fold(fold, evaluator, hparams, eval_logdir, checkpoint_dir):
"""Writes to file the neg. loglikelihood of given fold (train/valid/test)."""
eval_run_name = 'eval_%s_%s%s_%s_ensemble%s_chrono%s' % (
lib_util.timestamp(), fold,
'' if FLAGS.fold_index is None else FLAGS.fold_index, FLAGS.unit,
FLAGS.ensemble_size, FLAGS.chronological)
log_fname = '%s__%s.npz' % (os.path.basename(checkpoint_dir), eval_run_name)
log_fpath = os.path.join(eval_logdir, log_fname)
pianorolls = get_fold_pianorolls(fold, hparams)
rval = lib_evaluation.evaluate(evaluator, pianorolls)
tf.logging.info('Writing to path: %s' % log_fpath)
with lib_util.atomic_file(log_fpath) as p:
np.savez_compressed(p, **rval)
def evaluate_paths(paths, evaluator, unused_hparams, eval_logdir):
"""Evaluates negative loglikelihood of pianorolls from given paths."""
for path in paths:
name = 'eval_samples_%s_%s_ensemble%s_chrono%s' % (lib_util.timestamp(),
FLAGS.unit,
FLAGS.ensemble_size,
FLAGS.chronological)
log_fname = '%s__%s.npz' % (os.path.splitext(os.path.basename(path))[0],
name)
log_fpath = os.path.join(eval_logdir, log_fname)
pianorolls = get_path_pianorolls(path)
rval = lib_evaluation.evaluate(evaluator, pianorolls)
tf.logging.info('Writing evaluation statistics to %s', log_fpath)
with lib_util.atomic_file(log_fpath) as p:
np.savez_compressed(p, **rval)
def get_fold_pianorolls(fold, hparams):
dataset = lib_data.get_dataset(FLAGS.data_dir, hparams, fold)
pianorolls = dataset.get_pianorolls()
tf.logging.info('Retrieving pianorolls from %s set of %s dataset.',
fold, hparams.dataset)
print_statistics(pianorolls)
if FLAGS.fold_index is not None:
pianorolls = [pianorolls[int(FLAGS.fold_index)]]
return pianorolls
def get_path_pianorolls(path):
pianoroll_fpath = os.path.join(tf.resource_loader.get_data_files_path(), path)
tf.logging.info('Retrieving pianorolls from %s', pianoroll_fpath)
with tf.gfile.Open(pianoroll_fpath, 'r') as p:
pianorolls = np.load(p)
if isinstance(pianorolls, np.ndarray):
tf.logging.info(pianorolls.shape)
print_statistics(pianorolls)
return pianorolls
def print_statistics(pianorolls):
"""Prints statistics of given pianorolls, such as max and unique length."""
if isinstance(pianorolls, np.ndarray):
tf.logging.info(pianorolls.shape)
tf.logging.info('# of total pieces in set: %d', len(pianorolls))
lengths = [len(roll) for roll in pianorolls]
if len(np.unique(lengths)) > 1:
tf.logging.info('lengths %s', np.sort(lengths))
tf.logging.info('max_len %d', max(lengths))
tf.logging.info(
'unique lengths %s',
np.unique(sorted(pianoroll.shape[0] for pianoroll in pianorolls)))
tf.logging.info('shape %s', pianorolls[0].shape)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| {
"content_hash": "a2a85de5a150c3b111e6a7e26fd7a2b0",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 80,
"avg_line_length": 40.82802547770701,
"alnum_prop": 0.672386895475819,
"repo_name": "adarob/magenta",
"id": "6003e9c7bf9569028d9df4ab34f78a0968b1c310",
"size": "6995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magenta/models/coconet/coconet_evaluate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "2941402"
},
{
"name": "Shell",
"bytes": "24986"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from designate import exceptions
from designate import utils
from designate.schema import validators
from designate.schema import resolvers
from designate.schema import format
LOG = logging.getLogger(__name__)
class Schema(object):
def __init__(self, version, name):
self.raw_schema = utils.load_schema(version, name)
self.resolver = resolvers.LocalResolver.from_schema(
version, self.raw_schema)
if version == 'v1':
self.validator = validators.Draft3Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft3_format_checker)
elif version in ['v2', 'admin']:
self.validator = validators.Draft4Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft4_format_checker)
else:
raise Exception('Unknown API version: %s' % version)
@property
def schema(self):
return self.validator.schema
@property
def properties(self):
return self.schema['properties']
@property
def links(self):
return self.schema['links']
@property
def raw(self):
return self.raw_schema
def validate(self, obj):
LOG.debug('Validating values: %r' % obj)
errors = []
for error in self.validator.iter_errors(obj):
errors.append({
'path': ".".join([str(x) for x in error.path]),
'message': error.message,
'validator': error.validator
})
if len(errors) > 0:
LOG.debug('Errors in validation: %r' % errors)
raise exceptions.InvalidObject("Provided object does not match "
"schema", errors=errors)
def filter(self, instance, properties=None):
if not properties:
properties = self.properties
filtered = {}
for name, subschema in properties.items():
if 'type' in subschema and subschema['type'] == 'array':
subinstance = instance.get(name, None)
filtered[name] = self._filter_array(subinstance, subschema)
elif 'type' in subschema and subschema['type'] == 'object':
subinstance = instance.get(name, None)
properties = subschema['properties']
filtered[name] = self.filter(subinstance, properties)
else:
filtered[name] = instance.get(name, None)
return filtered
def _filter_array(self, instance, schema):
if 'items' in schema and isinstance(schema['items'], list):
# NOTE(kiall): We currently don't make use of this..
raise NotImplementedError()
elif 'items' in schema:
schema = schema['items']
if '$ref' in schema:
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
properties = schema['properties']
return [self.filter(i, properties) for i in instance]
elif 'properties' in schema:
schema = schema['properties']
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
return [self.filter(i, schema) for i in instance]
else:
raise NotImplementedError('Can\'t filter unknown array type')
| {
"content_hash": "08e52f98b6c13dcebc0012617dca2aeb",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 33.08571428571429,
"alnum_prop": 0.581174438687392,
"repo_name": "cneill/designate",
"id": "fe0fc7089b7facf0933e04f1ef92922457fe5557",
"size": "4100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/schema/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7596"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1866778"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13350"
}
],
"symlink_target": ""
} |
u"""F5 Networks® LBaaSv2 Driver Implementation."""
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.plugins.common import constants as plugin_constants
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as q_const
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.extensions import lbaas_agentschedulerv2
from f5lbaasdriver.v2.bigip import agent_rpc
from f5lbaasdriver.v2.bigip import exceptions as f5_exc
from f5lbaasdriver.v2.bigip import neutron_client
from f5lbaasdriver.v2.bigip import plugin_rpc
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt(
'f5_loadbalancer_pool_scheduler_driver_v2',
default=(
'f5lbaasdriver.v2.bigip.agent_scheduler.TenantScheduler'
),
help=('Driver to use for scheduling '
'pool to a default loadbalancer agent')
),
cfg.StrOpt(
'f5_loadbalancer_service_builder_v2',
default=(
'f5lbaasdriver.v2.bigip.service_builder.LBaaSv2ServiceBuilder'
),
help=('Default class to use for building a service object.')
)
]
cfg.CONF.register_opts(OPTS)
class F5NoAttachedLoadbalancerException(f5_exc.F5LBaaSv2DriverException):
"""Exception thrown when an LBaaSv2 object has not parent Loadbalancer."""
message = "Entity has no associated loadbalancer"
def __str__(self):
return self.message
class F5DriverV2(object):
u"""F5 Networks® LBaaSv2 Driver."""
def __init__(self, plugin=None, env=None):
"""Driver initialization."""
if not plugin:
LOG.error('Required LBaaS Driver and Core Driver Missing')
sys.exit(1)
self.plugin = plugin
self.env = env
self.loadbalancer = LoadBalancerManager(self)
self.listener = ListenerManager(self)
self.pool = PoolManager(self)
self.member = MemberManager(self)
self.healthmonitor = HealthMonitorManager(self)
self.l7policy = L7PolicyManager(self)
self.l7rule = L7RuleManager(self)
# what scheduler to use for pool selection
self.scheduler = importutils.import_object(
cfg.CONF.f5_loadbalancer_pool_scheduler_driver_v2)
self.service_builder = importutils.import_object(
cfg.CONF.f5_loadbalancer_service_builder_v2, self)
self.agent_rpc = agent_rpc.LBaaSv2AgentRPC(self)
self.plugin_rpc = plugin_rpc.LBaaSv2PluginCallbacksRPC(self)
self.q_client = \
neutron_client.F5NetworksNeutronClient(self.plugin)
# add this agent RPC to the neutron agent scheduler
# mixins agent_notifiers dictionary for it's env
self.plugin.agent_notifiers.update(
{q_const.AGENT_TYPE_LOADBALANCER: self.agent_rpc})
registry.subscribe(self._bindRegistryCallback(),
resources.PROCESS,
events.AFTER_INIT)
def _bindRegistryCallback(self):
# Defines a callback function with name tied to driver env. Need to
# enusre unique name, as registry callback manager references callback
# functions by name.
def post_fork_callback(resources, event, trigger):
LOG.debug("F5DriverV2 with env %s received post neutron child "
"fork notification pid(%d) print trigger(%s)" % (
self.env, os.getpid(), trigger))
self.plugin_rpc.create_rpc_listener()
post_fork_callback.__name__ += '_' + str(self.env)
return post_fork_callback
def _handle_driver_error(self, context, loadbalancer,
loadbalancer_id, status):
pass
class EntityManager(object):
'''Parent for all managers defined in this module.'''
def __init__(self, driver):
self.driver = driver
self.api_dict = None
self.loadbalancer = None
def _call_rpc(self, context, entity, rpc_method):
'''Perform operations common to create and delete for managers.'''
try:
agent_host, service = self._setup_crud(context, entity)
rpc_callable = getattr(self.driver.agent_rpc, rpc_method)
rpc_callable(context, self.api_dict, service, agent_host)
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as e:
LOG.error("Exception: %s: %s" % (rpc_method, e))
except Exception as e:
LOG.error("Exception: %s: %s" % (rpc_method, e))
raise e
def _setup_crud(self, context, entity):
'''Setup CRUD operations for managers to make calls to agent.
:param context: auth context for performing CRUD operation
:param entity: neutron lbaas entity -- target of the CRUD operation
:returns: tuple -- (agent object, service dict)
:raises: F5NoAttachedLoadbalancerException
'''
if entity.attached_to_loadbalancer() and self.loadbalancer:
(agent, service) = self._schedule_agent_create_service(context)
return agent['host'], service
raise F5NoAttachedLoadbalancerException()
def _schedule_agent_create_service(self, context):
'''Schedule agent and build service--used for most managers.
:param context: auth context for performing crud operation
:returns: tuple -- (agent object, service dict)
'''
agent = self.driver.scheduler.schedule(
self.driver.plugin,
context,
self.loadbalancer.id,
self.driver.env
)
service = self.driver.service_builder.build(
context, self.loadbalancer, agent)
return agent, service
class LoadBalancerManager(EntityManager):
"""LoadBalancerManager class handles Neutron LBaaS CRUD."""
@log_helpers.log_method_call
def create(self, context, loadbalancer):
"""Create a loadbalancer."""
driver = self.driver
self.loadbalancer = loadbalancer
try:
agent, service = self._schedule_agent_create_service(context)
agent_host = agent['host']
agent_config = agent.get('configurations', {})
LOG.debug("agent configurations: %s" % agent_config)
scheduler = self.driver.scheduler
agent_config_dict = \
scheduler.deserialize_agent_configurations(agent_config)
if not agent_config_dict.get('nova_managed', False):
# Update the port for the VIP to show ownership by this driver
port_data = {
'admin_state_up': True,
'device_owner': 'network:f5lbaasv2',
'status': q_const.PORT_STATUS_ACTIVE
}
port_data[portbindings.HOST_ID] = agent_host
port_data[portbindings.VNIC_TYPE] = "baremetal"
port_data[portbindings.PROFILE] = {}
driver.plugin.db._core_plugin.update_port(
context,
loadbalancer.vip_port_id,
{'port': port_data}
)
else:
LOG.debug("Agent devices are nova managed")
driver.agent_rpc.create_loadbalancer(
context, loadbalancer.to_api_dict(), service, agent_host)
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as e:
LOG.error("Exception: loadbalancer create: %s" % e)
driver.plugin.db.update_status(
context,
models.LoadBalancer,
loadbalancer.id,
plugin_constants.ERROR)
except Exception as e:
LOG.error("Exception: loadbalancer create: %s" % e.message)
raise e
@log_helpers.log_method_call
def update(self, context, old_loadbalancer, loadbalancer):
"""Update a loadbalancer."""
driver = self.driver
self.loadbalancer = loadbalancer
try:
agent, service = self._schedule_agent_create_service(context)
agent_host = agent['host']
driver.agent_rpc.update_loadbalancer(
context,
old_loadbalancer.to_api_dict(),
loadbalancer.to_api_dict(),
service,
agent_host
)
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as e:
LOG.error("Exception: loadbalancer update: %s" % e)
driver._handle_driver_error(context,
models.LoadBalancer,
loadbalancer.id,
plugin_constants.ERROR)
except Exception as e:
LOG.error("Exception: loadbalancer update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, loadbalancer):
"""Delete a loadbalancer."""
driver = self.driver
self.loadbalancer = loadbalancer
try:
agent, service = self._schedule_agent_create_service(context)
agent_host = agent['host']
driver.agent_rpc.delete_loadbalancer(
context, loadbalancer.to_api_dict(), service, agent_host)
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as e:
LOG.error("Exception: loadbalancer delete: %s" % e)
driver.plugin.db.delete_loadbalancer(context, loadbalancer.id)
except Exception as e:
LOG.error("Exception: loadbalancer delete: %s" % e)
raise e
@log_helpers.log_method_call
def refresh(self, context, loadbalancer):
"""Refresh a loadbalancer."""
pass
@log_helpers.log_method_call
def stats(self, context, loadbalancer):
driver = self.driver
try:
agent = driver.scheduler.schedule(
driver.plugin,
context,
loadbalancer.id,
driver.env
)
service = driver.service_builder.build(context,
loadbalancer,
agent)
driver.agent_rpc.update_loadbalancer_stats(
context,
loadbalancer.to_api_dict(),
service,
agent['host']
)
except (lbaas_agentschedulerv2.NoEligibleLbaasAgent,
lbaas_agentschedulerv2.NoActiveLbaasAgent) as e:
LOG.error("Exception: update_loadbalancer_stats: %s" % e.message)
driver._handle_driver_error(context,
models.LoadBalancer,
loadbalancer.id,
plugin_constants.ERROR)
except Exception as e:
LOG.error("Exception: update_loadbalancer_stats: %s" % e.message)
raise e
class ListenerManager(EntityManager):
"""ListenerManager class handles Neutron LBaaS listener CRUD."""
@log_helpers.log_method_call
def create(self, context, listener):
"""Create a listener."""
self.loadbalancer = listener.loadbalancer
self.api_dict = listener.to_dict(
loadbalancer=False, default_pool=False)
self._call_rpc(context, listener, 'create_listener')
@log_helpers.log_method_call
def update(self, context, old_listener, listener):
"""Update a listener."""
driver = self.driver
self.loadbalancer = listener.loadbalancer
try:
agent_host, service = self._setup_crud(context, listener)
driver.agent_rpc.update_listener(
context,
old_listener.to_dict(loadbalancer=False,
default_pool=False),
listener.to_dict(loadbalancer=False, default_pool=False),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: listener update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, listener):
"""Delete a listener."""
self.loadbalancer = listener.loadbalancer
self.api_dict = listener.to_dict(
loadbalancer=False, default_pool=False)
self._call_rpc(context, listener, 'delete_listener')
class PoolManager(EntityManager):
"""PoolManager class handles Neutron LBaaS pool CRUD."""
def _get_pool_dict(self, pool):
pool_dict = pool.to_dict(
healthmonitor=False,
listener=False,
listeners=False,
loadbalancer=False,
l7_policies=False,
members=False,
session_persistence=False)
pool_dict['provisioning_status'] = pool.provisioning_status
pool_dict['operating_status'] = pool.operating_status
return pool_dict
@log_helpers.log_method_call
def create(self, context, pool):
"""Create a pool."""
self.loadbalancer = pool.loadbalancer
self.api_dict = self._get_pool_dict(pool)
self._call_rpc(context, pool, 'create_pool')
@log_helpers.log_method_call
def update(self, context, old_pool, pool):
"""Update a pool."""
driver = self.driver
self.loadbalancer = pool.loadbalancer
try:
agent_host, service = self._setup_crud(context, pool)
driver.agent_rpc.update_pool(
context,
self._get_pool_dict(old_pool),
self._get_pool_dict(pool),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: pool update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, pool):
"""Delete a pool."""
self.loadbalancer = pool.loadbalancer
self.api_dict = self._get_pool_dict(pool)
self._call_rpc(context, pool, 'delete_pool')
class MemberManager(EntityManager):
"""MemberManager class handles Neutron LBaaS pool member CRUD."""
@log_helpers.log_method_call
def create(self, context, member):
"""Create a member."""
self.loadbalancer = member.pool.loadbalancer
self.api_dict = member.to_dict(pool=False)
self._call_rpc(context, member, 'create_member')
@log_helpers.log_method_call
def update(self, context, old_member, member):
"""Update a member."""
driver = self.driver
self.loadbalancer = member.pool.loadbalancer
try:
agent_host, service = self._setup_crud(context, member)
driver.agent_rpc.update_member(
context,
old_member.to_dict(pool=False),
member.to_dict(pool=False),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: member update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, member):
"""Delete a member."""
self.loadbalancer = member.pool.loadbalancer
driver = self.driver
try:
agent_host, service = self._setup_crud(context, member)
driver.agent_rpc.delete_member(
context, member.to_dict(pool=False), service, agent_host)
except Exception as e:
LOG.error("Exception: member delete: %s" % e.message)
raise e
class HealthMonitorManager(EntityManager):
"""HealthMonitorManager class handles Neutron LBaaS monitor CRUD."""
@log_helpers.log_method_call
def create(self, context, health_monitor):
"""Create a health monitor."""
self.loadbalancer = health_monitor.pool.loadbalancer
self.api_dict = health_monitor.to_dict(pool=False)
self._call_rpc(context, health_monitor, 'create_health_monitor')
@log_helpers.log_method_call
def update(self, context, old_health_monitor, health_monitor):
"""Update a health monitor."""
driver = self.driver
self.loadbalancer = health_monitor.pool.loadbalancer
try:
agent_host, service = self._setup_crud(context, health_monitor)
driver.agent_rpc.update_health_monitor(
context,
old_health_monitor.to_dict(pool=False),
health_monitor.to_dict(pool=False),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: health monitor update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, health_monitor):
"""Delete a health monitor."""
self.loadbalancer = health_monitor.pool.loadbalancer
self.api_dict = health_monitor.to_dict(pool=False)
self._call_rpc(context, health_monitor, 'delete_health_monitor')
class L7PolicyManager(EntityManager):
"""L7PolicyManager class handles Neutron LBaaS L7 Policy CRUD."""
@log_helpers.log_method_call
def create(self, context, policy):
"""Create an L7 policy."""
self.loadbalancer = policy.listener.loadbalancer
self.api_dict = policy.to_dict(listener=False, rules=False)
self._call_rpc(context, policy, 'create_l7policy')
@log_helpers.log_method_call
def update(self, context, old_policy, policy):
"""Update a policy."""
driver = self.driver
self.loadbalancer = policy.listener.loadbalancer
try:
agent_host, service = self._setup_crud(context, policy)
driver.agent_rpc.update_l7policy(
context,
old_policy.to_dict(listener=False),
policy.to_dict(listener=False),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: l7policy update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, policy):
"""Delete a policy."""
self.loadbalancer = policy.listener.loadbalancer
self.api_dict = policy.to_dict(listener=False, rules=False)
self._call_rpc(context, policy, 'delete_l7policy')
class L7RuleManager(EntityManager):
"""L7RuleManager class handles Neutron LBaaS L7 Rule CRUD."""
@log_helpers.log_method_call
def create(self, context, rule):
"""Create an L7 rule."""
self.loadbalancer = rule.policy.listener.loadbalancer
self.api_dict = rule.to_dict(policy=False)
self._call_rpc(context, rule, 'create_l7rule')
@log_helpers.log_method_call
def update(self, context, old_rule, rule):
"""Update a rule."""
driver = self.driver
self.loadbalancer = rule.policy.listener.loadbalancer
try:
agent_host, service = self._setup_crud(context, rule)
driver.agent_rpc.update_l7rule(
context,
old_rule.to_dict(policy=False),
rule.to_dict(policy=False),
service,
agent_host
)
except Exception as e:
LOG.error("Exception: l7rule update: %s" % e.message)
raise e
@log_helpers.log_method_call
def delete(self, context, rule):
"""Delete a rule."""
self.loadbalancer = rule.policy.listener.loadbalancer
self.api_dict = rule.to_dict(policy=False)
self._call_rpc(context, rule, 'delete_l7rule')
| {
"content_hash": "3c9352c818e49ec79c43456d58b5d053",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 78,
"avg_line_length": 36.05584642233857,
"alnum_prop": 0.5998547918683447,
"repo_name": "jputrino/f5-openstack-lbaasv2-driver",
"id": "1a7bac7a5fc998e40d0864bf8ebc0d352201d89b",
"size": "20677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5lbaasdriver/v2/bigip/driver_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "849"
},
{
"name": "Python",
"bytes": "280360"
},
{
"name": "Shell",
"bytes": "6261"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import numpy as np
np.random.seed(1234) # for reproducibility?
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import lasagne
os.environ["THEANO_FLAGS"] = "cuda.root=/usr/local/cuda,device=gpu,floatX=float32"
# specifying the gpu to use
import theano.sandbox.cuda
from collections import OrderedDict
theano.sandbox.cuda.use('gpu1')
import theano
import theano.tensor as T
# from http://blog.christianperone.com/2015/08/convolutional-neural-networks-and-feature-extraction-with-python/
# import matplotlib
# import matplotlib.pyplot as plt
# import matplotlib.cm as cm
import numpy as np
import logging
import code.lipreading.formatting
logger_lip = logging.getLogger('lipreading')
logger_lip.setLevel(logging.DEBUG)
FORMAT = '[$BOLD%(filename)s$RESET:%(lineno)d][%(levelname)-5s]: %(message)s '
formatter = logging.Formatter(code.lipreading.formatting.formatter_message(FORMAT, False))
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger_lip.addHandler(ch)
# User - created files
import binary_net
import lasagne.layers as L
debug = True
binary = True
justTest = True
def main():
# BN parameters
batch_size = 100
logger_lip.info("batch_size = %s", batch_size)
# alpha is the exponential moving average factor
alpha = .1
logger_lip.info("alpha = %s", alpha)
epsilon = 1e-4
logger_lip.info("epsilon = %s", epsilon)
# BinaryOut
activation = binary_net.binary_tanh_unit
print("activation = binary_tanh_unit")
stochastic = True
print("stochastic = " + str(stochastic))
# (-H,+H) are the two binary values
#H = "Glorot"
H = 1.
print("H = " + str(H))
# W_LR_scale = 1.
W_LR_scale = "Glorot" # "Glorot" means we are using the coefficients from Glorot's paper
print("W_LR_scale = " + str(W_LR_scale))
# Training parameters
num_epochs = 50
logger_lip.info("num_epochs = %s", num_epochs)
# Decaying LR
LR_start = 0.1
logger_lip.info("LR_start = %s", LR_start)
LR_fin = 0.0000003
logger_lip.info("LR_fin = %s", LR_fin)
# LR_decay = (LR_fin / LR_start) ** (1. / num_epochs)
LR_decay = 0.5 # sqrt(0.5)
logger_lip.info("LR_decay = %s", LR_decay)
# BTW, LR decay might good for the BN moving average...
shuffle_parts = 1
logger_lip.info("shuffle_parts = %s", shuffle_parts)
if binary: oneHot = True
else: oneHot = False
##############################################
network_type = "google"
viseme = False # will set nbClasses and store path vis: 6.498.828 phn: 7.176.231
if viseme:
nbClasses = 12
else:
nbClasses = 39
# get the database
# If it's small (lipspeakers) -> generate X_train, y_train etc here
# otherwise we need to load and generate each speaker seperately in the training loop
dataset = "TCDTIMIT"
root_dir = os.path.join(os.path.expanduser('~/TCDTIMIT/lipreading/' + dataset))
results_dir = root_dir + "/results/CNN_binaryNet";
if not os.path.exists(results_dir): os.makedirs(results_dir)
if viseme:
database_binaryDir = root_dir + '/binaryViseme'
else:
database_binaryDir = root_dir + '/binary'
datasetType = "lipspeakers" # "lipspeakers" #"volunteers" #"volunteers" # lipspeakers or volunteers"
##############################################
if datasetType == "lipspeakers":
loadPerSpeaker = False # only lipspeakers small enough to fit in CPU RAM, generate X_train etc here
storeProcessed = True
processedDir = database_binaryDir + "_allLipspeakersProcessed"
# TODO: prepLip_all can be used to generate pkl containing all the lipspeaker data. Not sure if this stil works, so use with care!
if not oneHot: pkl_path = processedDir + os.sep + datasetType + ".pkl"
else:
pkl_path = processedDir + os.sep + datasetType + "_oneHot" + ".pkl"
if not os.path.exists(pkl_path):
logger_lip.info("dataset not yet processed. Processing...")
code.lipreading.preprocessLipreading.prepLip_all(data_path=database_binaryDir, store_path=pkl_path, trainFraction=0.7, validFraction=0.1,
testFraction=0.2,
nbClasses=nbClasses, onehot=oneHot, type=datasetType, verbose=True)
datasetFiles = code.lipreading.general_tools.unpickle(pkl_path)
X_train, y_train, X_val, y_val, X_test, y_test = datasetFiles
dtypeX = 'float32'
dtypeY = 'float32'
X_train = X_train.astype(dtypeX);
y_train = y_train.astype(dtypeY);
X_val = X_val.astype(dtypeX);
y_val = y_val.astype(dtypeY);
X_test = X_test.astype(dtypeX);
y_test = y_test.astype(dtypeY);
datasetFiles = [X_train, y_train, X_val, y_val, X_test, y_test]
# These files have been generated with datasetToPkl_fromCombined, so that the train/val/test set are the same as for combinedSR.
# X_train, y_train = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersTrain.pkl"))
# X_val, y_val = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersVal.pkl"))
# X_test, y_test = unpickle(os.path.expanduser("~/TCDTIMIT/lipreading/TCDTIMIT/binary/allLipspeakersTest.pkl"))
# datasetFiles = [X_train, y_train, X_val, y_val, X_test, y_test]
else: # we need to load and preprocess each speaker before we evaluate, because dataset is too large and doesn't fit in CPU RAM
loadPerSpeaker = True
storeProcessed = True # if you have about 10GB hdd space, you can increase the speed by not reprocessing it each iteration
processedDir = database_binaryDir + "_finalProcessed"
# you can just run this program and it will generate the files the first time it encounters them, or generate them manually with datasetToPkl.py
# just get the names
testVolunteerNumbers = ["13F", "15F", "21M", "23M", "24M", "25M", "28M", "29M", "30F", "31F", "34M", "36F",
"37F", "43F", "47M", "51F", "54M"];
testVolunteers = [str(testNumber) + ".pkl" for testNumber in testVolunteerNumbers];
lipspeakers = ["Lipspkr1.pkl", "Lipspkr2.pkl", "Lipspkr3.pkl"];
allSpeakers = [f for f in os.listdir(database_binaryDir) if
os.path.isfile(os.path.join(database_binaryDir, f)) and os.path.splitext(f)[1] == ".pkl"]
trainVolunteers = [f for f in allSpeakers if not (f in testVolunteers or f in lipspeakers)];
trainVolunteers = [vol for vol in trainVolunteers if vol is not None]
if datasetType == "combined":
trainingSpeakerFiles = trainVolunteers + lipspeakers
testSpeakerFiles = testVolunteers
elif datasetType == "volunteers":
trainingSpeakerFiles = trainVolunteers
testSpeakerFiles = testVolunteers
else:
raise Exception("invalid dataset entered")
datasetFiles = [trainingSpeakerFiles, testSpeakerFiles]
model_name = datasetType + "_" + network_type + "_" + ("viseme" if viseme else "phoneme") + str(nbClasses) \
+ ("_binary" if binary else "")
model_save_name = os.path.join(results_dir, model_name)
# log file
logFile = results_dir + os.sep + model_name + '.log'
# if os.path.exists(logFile):
# fh = logging.FileHandler(logFileT) # append to existing log
# else:
fh = logging.FileHandler(logFile, 'w') # create new logFile
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger_lip.addHandler(fh)
logger_lip.info('Building the CNN...')
# Prepare Theano variables for inputs and targets
inputs = T.tensor4('inputs')
if oneHot:
targets = T.matrix('targets')
else:
targets = T.ivector('targets')
LR = T.scalar('LR', dtype=theano.config.floatX)
# get the network structure
l_out = code.lipreading.buildNetworks.build_network_google_binary(activation, alpha, epsilon, inputs, binary, stochastic, H,
W_LR_scale) # 7176231 params
for layer in L.get_all_layers(l_out):
print(layer)
# print het amount of network parameters
logger_lip.info("Using the %s network", network_type)
logger_lip.info("The number of parameters of this network: %s", L.count_params(l_out))
logger_lip.info("loading %s", model_save_name + '.npz')
load_model(model_save_name + '.npz', l_out)
logger_lip.info("* COMPILING FUNCTIONS...")
train_output = lasagne.layers.get_output(l_out, deterministic=False)
# squared hinge loss
loss = T.mean(T.sqr(T.maximum(0., 1. - targets * train_output)))
# W updates
W = lasagne.layers.get_all_params(l_out, binary=True)
W_grads = binary_net.compute_grads(loss, l_out)
updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)
updates = binary_net.clipping_scaling(updates, l_out)
# other parameters updates
params = lasagne.layers.get_all_params(l_out, trainable=True, binary=False)
updates = OrderedDict(
updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())
test_output = lasagne.layers.get_output(l_out, deterministic=True)
out_fn = theano.function([inputs], test_output)
test_loss = T.mean(T.sqr(T.maximum(0., 1. - targets * test_output)))
test_acc = T.mean(T.eq(T.argmax(test_output, axis=1), T.argmax(targets, axis=1)), dtype=theano.config.floatX)
k=3
test_top3_acc = T.zeros((1,))
topk_acc_fn = theano.function([],test_top3_acc)
val_fn = theano.function([inputs,targets],[test_loss, test_acc, test_top3_acc])
if debug:
nb = 3
debugX = X_train[0:nb]
debugY = y_train[0:nb]
out = out_fn(debugX)
val = val_fn(debugX, debugY)
import pdb;pdb.set_trace()
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
train_fn = theano.function([inputs, targets, LR], loss, updates=updates)
logger_lip.info('Training...')
import code.lipreading.train_lipreading
code.lipreading.train_lipreading.train(
train_fn=train_fn, val_fn=val_fn, out_fn=out_fn, topk_acc_fn=topk_acc_fn, k=k,
network_output_layer=l_out,
batch_size=batch_size,
LR_start=LR_start, LR_decay=LR_decay,
num_epochs=num_epochs,
dataset=datasetFiles,
database_binaryDir=database_binaryDir,
storeProcessed=storeProcessed,
processedDir=processedDir,
loadPerSpeaker=loadPerSpeaker, justTest=justTest,
save_name=model_save_name,
shuffleEnabled=True)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
a = cPickle.load(fo)
fo.close()
return a
def load_model(model_path, network_output_layer, logger=logger_lip):
try:
logger.info("Loading stored model...")
# restore network weights
with np.load(model_path) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
import pdb;pdb.set_trace()
try:
lasagne.layers.set_all_param_values(network_output_layer, param_values)
except:
lasagne.layers.set_all_param_values(network_output_layer, *param_values)
logger.info("Loading parameters successful.")
return 0
except IOError as e:
logger.info("%s", os.strerror(e.errno))
logger.info('Model: %s not found. No weights loaded', model_path)
return -1
if __name__ == "__main__":
main()
| {
"content_hash": "16512e0d932c9c744351c72c65638dd2",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 152,
"avg_line_length": 39.625,
"alnum_prop": 0.6382201560684044,
"repo_name": "matthijsvk/multimodalSR",
"id": "86c7c093ba7dda0fa44db0f71ab16d2f768cac25",
"size": "12046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/lipreading/binary/old/lipreading_binary_v2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3372053"
},
{
"name": "C++",
"bytes": "159084"
},
{
"name": "CSS",
"bytes": "1594827"
},
{
"name": "Cuda",
"bytes": "24531"
},
{
"name": "JavaScript",
"bytes": "4924"
},
{
"name": "Jupyter Notebook",
"bytes": "15270096"
},
{
"name": "M4",
"bytes": "7412"
},
{
"name": "Makefile",
"bytes": "62415"
},
{
"name": "Matlab",
"bytes": "54469"
},
{
"name": "Perl",
"bytes": "130963"
},
{
"name": "Python",
"bytes": "3811156"
},
{
"name": "Shell",
"bytes": "42206"
},
{
"name": "TeX",
"bytes": "1433023"
}
],
"symlink_target": ""
} |
import re
import inspect
from pymongo import MongoClient
class MongoDBService(object):
ID_FIELD = "_id"
KEY_FUNC = "unique_key"
def __init__(self, db_uri=""):
self.client = MongoClient(db_uri if db_uri else "mongodb://localhost:27017/default")
def get_collection(self, cls_or_instance):
database = self.client.get_default_database()
collection = self.get_collection_name(cls_or_instance)
return database[collection]
def save(self, instance):
collection = self.get_collection(instance)
_id = None
if hasattr(instance, self.ID_FIELD) and getattr(instance, self.ID_FIELD):
_id = instance._id
elif hasattr(instance, self.KEY_FUNC):
key = getattr(instance, self.KEY_FUNC)()
same = collection.find_one({self.KEY_FUNC: key})
if same:
_id = same._id
instance_dic = self.object_to_dict(instance)
if _id:
collection.update_one({"_id": _id}, instance_dic)
else:
collection.insert_one(instance_dic)
@classmethod
def object_to_dict(cls, instance):
dic = {}
for a in inspect.getmembers(instance, lambda a: not inspect.isroutine(a)):
if not a[0].startswith("_"):
dic[a[0]] = a[1]
if hasattr(instance, cls.ID_FIELD) and instance._id:
dic[cls.ID_FIELD] = instance._id
if hasattr(instance, cls.KEY_FUNC):
key = getattr(instance, cls.KEY_FUNC)()
if key:
dic[cls.KEY_FUNC] = key
return dic
@classmethod
def get_collection_name(cls, cls_or_instance):
cls_name = ""
if type(cls_or_instance) is type:
cls_name = cls_or_instance.__name__
else:
cls_name = type(cls_or_instance).__name__
collection_name = cls.__camel_to_snake(cls_name)
return collection_name
@classmethod
def __camel_to_snake(cls, name):
n1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) # Ab -> A_b
n2 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', n1) # aB -> a_B
n3 = n2.lower() # A_b & a_B -> a_b & a_b
return n3
| {
"content_hash": "693ecd818a5043f2b4a37ce05f45d454",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 92,
"avg_line_length": 32.38235294117647,
"alnum_prop": 0.555858310626703,
"repo_name": "icoxfog417/kanaria",
"id": "c1d5779cecd64929027cfa0be9abc458d38fafb8",
"size": "2202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kanaria/core/service/db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47953"
}
],
"symlink_target": ""
} |
from boto.connection import AWSAuthConnection
from boto.exception import BotoServerError
import boto
import boto.jsonresponse
import urllib
import base64
class SESConnection(AWSAuthConnection):
ResponseError = BotoServerError
DefaultHost = 'email.us-east-1.amazonaws.com'
APIVersion = '2010-12-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key, True, port, proxy,
proxy_port, debug=debug)
def _required_auth_capability(self):
return ['ses']
def _build_list_params(self, params, items, label):
"""Add an AWS API-compatible parameter list to a dictionary.
:type params: dict
:param params: The parameter dictionary
:type items: list
:param items: Items to be included in the list
:type label: string
:param label: The parameter list's name
"""
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def _make_request(self, action, params=None):
"""Make a call to the SES API.
:type action: string
:param action: The API method to use (e.g. SendRawEmail)
:type params: dict
:param params: Parameters that will be sent as POST data with the API
call.
"""
ct = 'application/x-www-form-urlencoded; charset=UTF-8'
headers = {'Content-Type': ct}
params = params or {}
params['Action'] = action
for k, v in params.items():
if isinstance(v, basestring):
params[k] = v.encode('utf-8')
response = super(SESConnection, self).make_request(
'POST',
'/',
headers=headers,
data=urllib.urlencode(params)
)
body = response.read()
if response.status == 200:
list_markers = ('VerifiedEmailAddresses', 'SendDataPoints')
e = boto.jsonresponse.Element(list_marker=list_markers)
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def send_email(self, source, subject, body, to_addresses, cc_addresses=None,
bcc_addresses=None, format='text', reply_addresses=None,
return_path=None, text_body=None, html_body=None):
"""Composes an email message based on input data, and then immediately
queues the message for sending.
:type source: string
:param source: The sender's email address.
:type subject: string
:param subject: The subject of the message: A short summary of the
content, which will appear in the recipient's inbox.
:type body: string
:param body: The message body.
:type to_addresses: list of strings or string
:param to_addresses: The To: field(s) of the message.
:type cc_addresses: list of strings or string
:param cc_addresses: The CC: field(s) of the message.
:type bcc_addresses: list of strings or string
:param bcc_addresses: The BCC: field(s) of the message.
:type format: string
:param format: The format of the message's body, must be either "text"
or "html".
:type reply_addresses: list of strings or string
:param reply_addresses: The reply-to email address(es) for the
message. If the recipient replies to the
message, each reply-to address will
receive the reply.
:type return_path: string
:param return_path: The email address to which bounce notifications are
to be forwarded. If the message cannot be delivered
to the recipient, then an error message will be
returned from the recipient's ISP; this message will
then be forwarded to the email address specified by
the ReturnPath parameter.
:type text_body: string
:param text_body: The text body to send with this email.
:type html_body: string
:param html_body: The html body to send with this email.
"""
format = format.lower().strip()
if body is not None:
if format == "text":
if text_body is not None:
raise Warning("You've passed in both a body and a text_body; please choose one or the other.")
text_body = body
else:
if html_body is not None:
raise Warning("You've passed in both a body and an html_body; please choose one or the other.")
html_body = body
params = {
'Source': source,
'Message.Subject.Data': subject,
}
if return_path:
params['ReturnPath'] = return_path
if html_body is not None:
params['Message.Body.Html.Data'] = html_body
if text_body is not None:
params['Message.Body.Text.Data'] = text_body
if(format not in ("text","html")):
raise ValueError("'format' argument must be 'text' or 'html'")
if(not (html_body or text_body)):
raise ValueError("No text or html body found for mail")
self._build_list_params(params, to_addresses,
'Destination.ToAddresses.member')
if cc_addresses:
self._build_list_params(params, cc_addresses,
'Destination.CcAddresses.member')
if bcc_addresses:
self._build_list_params(params, bcc_addresses,
'Destination.BccAddresses.member')
if reply_addresses:
self._build_list_params(params, reply_addresses,
'ReplyToAddresses.member')
return self._make_request('SendEmail', params)
def send_raw_email(self, raw_message, source=None, destinations=None):
"""Sends an email message, with header and content specified by the
client. The SendRawEmail action is useful for sending multipart MIME
emails, with attachments or inline content. The raw text of the message
must comply with Internet email standards; otherwise, the message
cannot be sent.
:type source: string
:param source: The sender's email address. Amazon's docs say:
If you specify the Source parameter, then bounce notifications and
complaints will be sent to this email address. This takes precedence
over any Return-Path header that you might include in the raw text of
the message.
:type raw_message: string
:param raw_message: The raw text of the message. The client is
responsible for ensuring the following:
- Message must contain a header and a body, separated by a blank line.
- All required header fields must be present.
- Each part of a multipart MIME message must be formatted properly.
- MIME content types must be among those supported by Amazon SES.
Refer to the Amazon SES Developer Guide for more details.
- Content must be base64-encoded, if MIME requires it.
:type destinations: list of strings or string
:param destinations: A list of destinations for the message.
"""
params = {
'Source': source,
'RawMessage.Data': base64.b64encode(raw_message),
}
if destinations:
self._build_list_params(params, destinations,
'Destinations.member')
return self._make_request('SendRawEmail', params)
def list_verified_email_addresses(self):
"""Fetch a list of the email addresses that have been verified.
:rtype: dict
:returns: A ListVerifiedEmailAddressesResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('ListVerifiedEmailAddresses')
def get_send_quota(self):
"""Fetches the user's current activity limits.
:rtype: dict
:returns: A GetSendQuotaResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendQuota')
def get_send_statistics(self):
"""Fetches the user's sending statistics. The result is a list of data
points, representing the last two weeks of sending activity.
Each data point in the list contains statistics for a 15-minute
interval.
:rtype: dict
:returns: A GetSendStatisticsResponse structure. Note that keys must be
unicode strings.
"""
return self._make_request('GetSendStatistics')
def delete_verified_email_address(self, email_address):
"""Deletes the specified email address from the list of verified
addresses.
:type email_adddress: string
:param email_address: The email address to be removed from the list of
verified addreses.
:rtype: dict
:returns: A DeleteVerifiedEmailAddressResponse structure. Note that
keys must be unicode strings.
"""
return self._make_request('DeleteVerifiedEmailAddress', {
'EmailAddress': email_address,
})
def verify_email_address(self, email_address):
"""Verifies an email address. This action causes a confirmation email
message to be sent to the specified address.
:type email_adddress: string
:param email_address: The email address to be verified.
:rtype: dict
:returns: A VerifyEmailAddressResponse structure. Note that keys must
be unicode strings.
"""
return self._make_request('VerifyEmailAddress', {
'EmailAddress': email_address,
})
| {
"content_hash": "43484d28cb753bf1a93ac03553071536",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 115,
"avg_line_length": 37.94982078853047,
"alnum_prop": 0.5917075935020778,
"repo_name": "canvasnetworks/canvas",
"id": "ec1f881d03a83a86658c7d6de59d8d700068f001",
"size": "11737",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "common/boto/ses/connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "537625"
},
{
"name": "HTML",
"bytes": "689709"
},
{
"name": "JavaScript",
"bytes": "1313262"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6659685"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
} |
''' COSMO-VIEW
Script: drawing.py
Changes:
J. Ballabrera, December 2017
EGL, 06/2020:
No more support to python 2.7
Support to Basemap deprecated and updated to cartopy
A consola can be added to the main window (it requires
to change all prints and use instead the tools.toconsola()
function. In some cases the wid of the consola is passed to
others constructors or a heap variable MESSAGE has been
introduced to collect "print" messages.
Added limited support to loading and drawing shapefiles
Base layers of topography and relief substituted by GEBCO and
EMODNET tile services (requieres internet connection)
Limited support to geographical projections. Everything is
plotted in PlateCarree
setmap() is now deprecated
Corrected some text font managements
All color selections are now managed through tools.colsel() function
Cartopy projection can be accessed through tools.map_proj()
EGL, 12/2020:
Now multiple lagrangian trajectories can be loaded at once by
using askopenfilenames instead of askopenfile
QPB, 03/2021:
Allow for a user defined time axis
Add a distance calculator
'''
__version__ = "3.0"
__author__ = "Quim Ballabrera and Emilio García"
__date__ = "July 2020"
import sys
import os
from os.path import isfile, join
import numpy as np
import numpy.ma as ma
from scipy import interpolate
import json
import io
import ast
import math
import datetime
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.font_manager
import matplotlib.ticker as mticker
from matplotlib.font_manager import FontProperties
from matplotlib.figure import Figure
from matplotlib.offsetbox import TextArea, OffsetImage, AnnotationBbox, AnchoredText
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import cm as CM
from matplotlib import colors
import matplotlib.patches as mpatches
#EG Cartopy
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeat
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
#EG from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset,num2date
from itertools import chain
from PIL import Image, ImageTk
import matplotlib.animation as manimation
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog as filedialog
#from tkcolorpicker import askcolor
from tkinter.colorchooser import askcolor
from tkinter import font as tkfont
from tkcalendar import Calendar, DateEntry
try:
to_unicode = unicode
except:
to_unicode = str
import cosmo.tools as tools
import cosmo.contourplot as contourplot
import cosmo.vectorplot as vectorplot
import cosmo.providers as providers
import cosmo.codar as codar
import cosmo.copernicus as copernicus
import cosmo.saidin as saidin
import cosmo.lagrangian as lagrangian
import cosmo.lineplot as lineplot
import cosmo.clm as clm
import cosmo.db as db
import cosmo.geomarker as geomarker
import cosmo.dotplot as dotplot
import cosmo.json_editor as jeditor
import cosmo.legend as legend
#EG
import cosmo.shape as shape
import cosmo.geoplot as geoplot
import cosmo.field as field
import cosmo.plotxy as plotxy
import cosmo.ellipse as ellipse
import cosmo.patch as patch
import cosmo.climatology as climatology
import cosmo.feature as feature
from cosmo.tools import empty
from cosmo.tools import myround
from cosmo.tools import exists
from cosmo.tools import askforpermission
from cosmo.tools import placeontop
from cosmo.tools import get_remote
from cosmo.tools import get_Date
from cosmo.tools import folderList
from cosmo.tools import urlList
from cosmo.tools import simple_form
from cosmo.tools import haversine
from cosmo.tools import fontconfig
from cosmo.tools import setfont
from cosmo.tools import read_lines
from cosmo.tools import colsel
#EG
from cosmo.tools import map_proj
from cosmo.tools import scale_bar
#EG consola
from cosmo.tools import toconsola
from cosmo import COSMO_CONF_NAME
from cosmo import COSMO_CONF
from cosmo import COSMO_ROOT
from cosmo import COSMO_CONF_PATH
from cosmo import COSMO_CONF_DATA
from cosmo import VERSION
from cosmo import TKINTER_VERSION
from cosmo import MATPLOTLIB_VERSION
from cosmo import CARTOPY_VERSION
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME,COSMO_CONF_DATA
BGC = 'pale green' # Background color
BWC = 'lime green' # Buttons (PREV and NEXT) color
EBC = 'forest green' # Exit Buttons color
FONT = 'Helvetica 14' # Default font
# =====================
class OBJECT():
# =====================
''' Class for cosmo-view objects'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "February 2022"
def __init__(self,TYPE,OPTIONS=None):
# ======================
''' Define and initialize the class attributes '''
self.n = 0
self.TYPE = TYPE
self.DATA = []
self.LIST = []
self.INDX = tk.IntVar()
self.INDX.set(0)
self.OPTIONS = OPTIONS
# =====================
class CONTOUR():
# =====================
''' Class for 2D data contours'''
__version__ = "2.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2020"
def __init__(self,filename=None):
# ===============================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nCONTOUR class:\n"
self.ALIAS = tk.StringVar()
self.FILENAME = tk.StringVar()
self.SOURCE = 'FILE' # Default source: Read from file
self.PARENT = None # USed in mean and variance calculations
if filename is None:
pass
else:
self.FILENAME.set(filename)
self.FLD = field.fld_parameters()
self.PLOT = contourplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.show = tk.BooleanVar()
self.varname = tk.StringVar()
#self.minval = tk.DoubleVar()
#self.maxval = tk.DoubleVar()
self.landmask = tk.BooleanVar()
self.K = tk.IntVar()
self.L = tk.IntVar()
self.K.set(0)
self.L.set(0)
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.TIME_SET= False
self.landmask.set(False)
self.ALIAS.set('')
self.cbar = None
self.show.set(True)
# Selected point
self.io = tk.IntVar()
self.jo = tk.IntVar()
# Link to the Drawing Time Axis
self.LINK = tk.BooleanVar()
self.LINK.set(False)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['ALIAS'] = self.ALIAS.get()
conf['FILENAME'] = self.FILENAME.get()
conf['SOURCE'] = self.SOURCE
conf['PARENT'] = self.PARENT
conf['VARNAME'] = self.varname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['LANDMASK'] = self.landmask.get()
conf['SHOW'] = self.show.get()
conf['LINK'] = self.LINK.get()
conf['PLOT'] = self.PLOT.conf_get()
conf['FLD'] = self.FLD.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.ALIAS.set(conf['ALIAS'])
self.FILENAME.set(conf['FILENAME'])
self.SOURCE = conf['SOURCE']
self.PARENT = conf['PARENT']
self.varname.set(conf['VARNAME'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.landmask.set(conf['LANDMASK'])
self.show.set(conf['SHOW'])
self.LINK.set(conf['LINK'])
self.PLOT.conf_set(conf['PLOT'])
self.FLD.conf_set(conf['FLD'])
def read(self,**args):
# ====================
try:
wid = args["wid"]
except:
wid = None
try:
update_lims = args["update_lims"]
except:
update_lims = True
K = self.K.get()
L = self.L.get()
self.SOURCE = 'FILE'
toconsola("Reading contour, K, L = "+str(K)+", "+str(L),wid=wid)
if self.FLD.ndims == 2:
u = self.FLD.nc.variables[self.FLD.varname][:,:]
elif self.FLD.ndims == 3:
if self.FLD.icdf.ppl[self.FLD.varid] > -1:
u = self.FLD.nc.variables[self.FLD.varname][L,:,:].squeeze()
elif self.FLD.icdf.ppk[self.FLD.varid] > -1:
u = self.FLD.nc.variables[self.FLD.varname][K,:,:].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif self.FLD.ndims == 4:
u = self.FLD.nc.variables[self.FLD.varname][L,K,:,:].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(self.FLD.ndims),wid=wid)
# Eliminate NaN values in field:
fill_value = u.fill_value
_u = u.filled()
_u[np.isnan(_u)] = fill_value
u = np.ma.masked_equal(_u,fill_value)
# Min and max values
#self.FLD.minval = float(u.min())
#self.FLD.maxval = float(u.max())
self.FLD.minval = np.nanmin(u)
self.FLD.maxval = np.nanmax(u)
toconsola('Min val = '+str(self.FLD.minval),wid=wid)
toconsola('Max val = '+str(self.FLD.maxval),wid=wid)
#print(self.FLD.minval, self.FLD.maxval)
# Make sure that the missing value is NaN:
#_u = u.filled(fill_value=np.nan)
#self.FLD.data = np.ma.masked_equal(_u,np.nan); del _u
self.FLD.data = u.copy()
if update_lims:
toconsola('Setting contour intervals ...',wid=wid)
try:
self.PLOT.CONTOUR_MIN.set(myround(self.FLD.minval))
except:
self.PLOT.CONTOUR_MIN.set(self.FLD.minval)
try:
self.PLOT.CONTOUR_MAX.set(myround(self.FLD.maxval))
except:
self.PLOT.CONTOUR_MAX.set(self.FLD.maxval)
dd = self.PLOT.CONTOUR_MAX.get() - self.PLOT.CONTOUR_MIN.get()
try:
self.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
except:
self.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
else:
toconsola('Preserving contour intervals.',wid=wid)
def save(self,**args):
# ====================
filetypes=[('NetCDF','*.nc'),('ALL','*')]
nn = filedialog.asksaveasfilename(title='Save vector file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
return
else:
filename = '%s' % nn
try:
wid = args["wid"]
except:
wid = None
toconsola('Saving contour data into '+filename,wid=wid)
#nc = Dataset(filename,'w',format='NETCDF4')
nc = Dataset(filename,'w')
nc.createDimension('x',self.FLD.icdf.nx)
nc.createDimension('y',self.FLD.icdf.ny)
dimensions_list = ['y','x']
if self.FLD.icdf.idk >= 0:
nc.createDimension('z',1)
dimensions_list.insert(0,'z')
if self.FLD.icdf.idl >= 0:
nc.createDimension('t',None)
dimensions_list.insert(0,'t')
if self.FLD.icdf.grid2d:
lon = nc.createVariable('Longitude','f8',['y','x'])
nc['Longitude'].setncatts(self.FLD.nc[self.FLD.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y','x'])
nc['Latitude'].setncatts(self.FLD.nc[self.FLD.icdf.yname].__dict__)
else:
lon = nc.createVariable('Longitude','f8',['x'])
nc['Longitude'].setncatts(self.FLD.nc[self.FLD.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y'])
nc['Latitude'].setncatts(self.FLD.nc[self.FLD.icdf.yname].__dict__)
if self.FLD.icdf.idk >= 0:
depth = nc.createVariable('Depth','f8',['z'])
nc['Depth'].setncatts(self.FLD.nc[self.FLD.icdf.zname].__dict__)
if self.FLD.icdf.idl >= 0:
time = nc.createVariable('Time','f8',['t'])
nc['Time'].setncatts(self.FLD.nc[self.FLD.icdf.tname].__dict__)
aname = self.varname.get()
a = nc.createVariable(aname,'f8',dimensions_list,fill_value=1e36)
try:
long_name = self.FLD.nc[aname].long_name
nc[aname].long_name = long_name
except:
pass
try:
units = self.FLD.nc[aname].units
nc[aname].units = units
except:
pass
_a = self.FLD.data.filled(fill_value=1e36)
_a[np.isnan(_a)] = 1e36
# Write data
if self.FLD.icdf.grid2d:
lon[:,:] = self.FLD.xx
lat[:,:] = self.FLD.yy
else:
lon[:] = self.FLD.x
lat[:] = self.FLD.y
depth[0] = self.Z_LIST[self.K.get()]
time[0] = self.T_LIST[self.L.get()]
a[0,0,:,:] = _a
nc.close()
# =====================
class VECTOR():
# =====================
''' Class for 2D data (x,y) vectors'''
__version__ = "2.0"
__author__ = "Quim Ballabrera"
__date__ = "July 2020"
def __init__(self,ufile=None,vfile=None):
# =======================================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nVECTOR class:\n"
self.ALIAS = tk.StringVar()
self.UFILENAME = tk.StringVar()
self.VFILENAME = tk.StringVar()
self.two_files = 0 #By default, U and V in the same file
self.SOURCE = 'FILE'
self.PARENT = None # USed in mean and variance calculations
if ufile is None:
pass
else:
self.UFILENAME.set(ufile)
if vfile is None:
self.VFILENAME.set(ufile)
else:
self.VFILENAME.set(vfile)
self.U = field.fld_parameters()
self.V = field.fld_parameters()
self.uname = tk.StringVar()
self.vname = tk.StringVar()
self.PLOT = vectorplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.K = tk.IntVar()
self.L = tk.IntVar()
self.K.set(0)
self.L.set(0)
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.ALIAS.set('')
self.show = tk.BooleanVar()
self.show.set(True)
# Select grid type:
self.grid_type = tk.StringVar()
self.grid_type_list = ['A','B','C']
self.grid_type.set('A')
# Selected point
self.io = tk.IntVar()
self.jo = tk.IntVar()
# Variables to plot
self.reprocess = True
self.xplt = None
self.yplt = None
self.uplt = None
self.vplt = None
self.GRID_MODE_0 = -1
self.CURRENT_DX_0 = -1
self.CURRENT_DY_0 = -1
self.CURRENT_NX_0 = -1
self.CURRENT_NY_0 = -1
# Link to the drawing Time Axis
self.LINK = tk.BooleanVar()
self.LINK.set(False)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['ALIAS'] = self.ALIAS.get()
conf['UFILENAME'] = self.UFILENAME.get()
conf['VFILENAME'] = self.VFILENAME.get()
conf['TWO_FILES'] = self.two_files
conf['SOURCE'] = self.SOURCE
conf['PARENT'] = self.PARENT
conf['UNAME'] = self.uname.get()
conf['VNAME'] = self.vname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['SHOW'] = self.show.get()
conf['GRID_TYPE'] = self.grid_type.get()
conf['LINK'] = self.LINK.get()
conf['PLOT'] = self.PLOT.conf_get()
conf['U'] = self.U.conf_get()
conf['V'] = self.V.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.ALIAS.set(conf['ALIAS'])
self.UFILENAME.set(conf['UFILENAME'])
self.VFILENAME.set(conf['VFILENAME'])
self.two_files = conf['TWO_FILES']
self.SOURCE = conf['SOURCE']
self.PARENT = conf['PARENT']
self.uname.set(conf['UNAME'])
self.vname.set(conf['VNAME'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.show.set(conf['SHOW'])
self.LINK.set(conf['LINK'])
self.grid_type.set(conf['GRID_TYPE'])
self.PLOT.conf_set(conf['PLOT'])
self.U.conf_set(conf['U'])
self.V.conf_set(conf['V'])
def read(self,**args):
# ====================
try:
wid = args["wid"]
except:
wid = None
K = self.K.get()
L = self.L.get()
self.SOURCE = 'FILE'
self.reprocess = True
toconsola("Reading vector, K, L = "+str(K)+", "+str(L),wid=wid)
if self.U.ndims == 2:
u = self.U.nc.variables[self.U.varname][:,:]
v = self.V.nc.variables[self.V.varname][:,:]
elif self.U.ndims == 3:
if self.U.icdf.ppl[self.U.varid] > -1:
u = self.U.nc.variables[self.U.varname][L,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][L,:,:].squeeze()
elif self.U.icdf.ppk[self.U.varid] > -1:
u = self.U.nc.variables[self.U.varname][K,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][K,:,:].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif self.U.ndims == 4:
u = self.U.nc.variables[self.U.varname][L,K,:,:].squeeze()
v = self.V.nc.variables[self.V.varname][L,K,:,:].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(self.U.ndims),wid=wid)
# Make sure that the missing value is NaN:
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
if self.grid_type.get() == 'A' or self.grid_type.get() == 'B':
toconsola("Velocities in a A-grid",wid=wid)
self.U.data = u.copy()
self.V.data = v.copy()
return
if self.grid_type.get() == 'C':
toconsola("Regrid C-grid velocities",wid=wid)
self.U.data = 0.5*(u[1:-1,:-1]+u[1:-1,1:])
self.V.data = 0.5*(v[:-1,1:-1]+v[1:,1:-1])
return
def save(self,**args):
# ====================
filetypes=[('NetCDF','*.nc'),('ALL','*')]
nn = filedialog.asksaveasfilename(title='Save vector file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
return
else:
filename = '%s' % nn
try:
wid = args["wid"]
except:
wid = None
toconsola('Saving vector data into '+filename,wid=wid)
#nc = Dataset(filename,'w',format='NETCDF4')
nc = Dataset(filename,'w')
nc.createDimension('x',self.U.icdf.nx)
nc.createDimension('y',self.U.icdf.ny)
dimensions_list = ['y','x']
if self.U.icdf.idk >= 0:
nc.createDimension('z',1)
dimensions_list.insert(0,'z')
if self.U.icdf.idl >= 0:
nc.createDimension('t',None)
dimensions_list.insert(0,'t')
if self.U.icdf.grid2d:
lon = nc.createVariable('Longitude','f8',['y','x'])
nc['Longitude'].setncatts(self.U.nc[self.U.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y','x'])
nc['Latitude'].setncatts(self.U.nc[self.U.icdf.yname].__dict__)
else:
lon = nc.createVariable('Longitude','f8',['x'])
nc['Longitude'].setncatts(self.U.nc[self.U.icdf.xname].__dict__)
lat = nc.createVariable('Latitude','f8',['y'])
nc['Latitude'].setncatts(self.U.nc[self.U.icdf.yname].__dict__)
if self.U.icdf.idk >= 0:
depth = nc.createVariable('Depth','f8',['z'])
nc['Depth'].setncatts(self.U.nc[self.U.icdf.zname].__dict__)
if self.U.icdf.idl >= 0:
time = nc.createVariable('Time','f8',['t'])
nc['Time'].setncatts(self.U.nc[self.U.icdf.tname].__dict__)
uname = self.uname.get()
vname = self.vname.get()
u = nc.createVariable(uname,'f8',dimensions_list,fill_value=1e36)
v = nc.createVariable(vname,'f8',dimensions_list,fill_value=1e36)
try:
long_name = self.U.nc[uname].long_name
nc[uname].long_name = long_name
except:
pass
try:
units = self.U.nc[uname].units
nc[uname].units = units
except:
pass
try:
long_name = self.V.nc[vname].long_name
nc[vname].long_name = long_name
except:
pass
try:
units = self.V.nc[vname].units
nc[vname].units = units
except:
pass
_u = self.U.data.filled(fill_value=1e36)
_u[np.isnan(_u)] = 1e36
_v = self.V.data.filled(fill_value=1e36)
_v[np.isnan(_v)] = 1e36
# Write data
if self.U.icdf.grid2d:
lon[:,:] = self.U.xx
lat[:,:] = self.U.yy
else:
lon[:] = self.U.x
lat[:] = self.U.y
depth[0] = self.Z_LIST[self.K.get()]
time[0] = self.T_LIST[self.L.get()]
u[0,0,:,:] = _u
v[0,0,:,:] = _v
nc.close()
# =====================
class LAYER():
# =====================
''' Class for Drawing layers'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "August 2020"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nLAYER class:\n"
self.n = 0 # Number of layers
# self.nsequence = 0 # Number of layers attached to a SEQUENCE
# self.seqlen = 0 # SEQUENCE length
# self.leader = 0 # Points to the SEQUENCER layer
self.TYPE = [] # VEC, FLD, MARKER, ....
self.TYPE_INDEX = []
self.FILENAME = []
# self.INSEQUENCE = [] # Belongs to a SEQUENCE
# self.SEQUENCER = [] # True if SEQUENCE leader
self.NREC = [] # Number of records in layer
self.update = False
def erase(self,TYPE,ii,**args):
# =============================
try:
wid = args["wid"]
except:
wid = None
if self.n == 0:
toconsola('Invalid ERASE action in empty layer structure',wid=wid)
return
ll = -1
for i in range(self.n):
if self.TYPE[i] == TYPE and self.TYPE_INDEX[i] == ii:
ll = i
if ll == -1:
toconsola('Layer not found',wid=wid)
return
#INSEQUENCE = self.INSEQUENCE[ll].get()
#SEQUENCER = self.SEQUENCER[ll].get()
self.update = False
toconsola('Erasing '+TYPE+' layer '+str(ii),wid=wid)
del self.TYPE[ll]
del self.TYPE_INDEX[ll]
del self.FILENAME[ll]
#del self.INSEQUENCE[ll]
#del self.SEQUENCER[ll]
del self.NREC[ll]
self.n -= 1
if self.n == 0:
toconsola('Empty layer structure',wid=wid)
self.TYPE = []
self.TYPE_INDEX = []
self.FILENAME = []
#self.INSEQUENCE = []
#self.SEQUENCER = []
self.NREC = []
#self.nsequence = 0
#self.seqlen = 0
return
# If we are here , it means that the structure is not empty
# Update TYPE_INDEX
ii = -1
for i in range(self.n):
if self.TYPE[i] == TYPE:
ii += 1
self.TYPE_INDEX[i] = ii
# # If erasing a layer in the SEQUENCE:
# if INSEQUENCE:
# self.nsequence -= 1
# if self.nsequence > 0:
# if SEQUENCER:
# # If we have erased the SEQUENCER,
# # we set the first field as SEQUENCE leader
# for i in range(self.n):
# if self.INSEQUENCE[i].get():
# self.SEQUENCER[i].set(True)
# self.leader = i
# self.update = True
# else:
# self.seqlen = 0
#
def add(self,TYPE,Filename=None,N=None,**args):
# ==============================================
try:
wid = args["wid"]
except:
wid = None
self.TYPE.append(TYPE)
self.FILENAME.append(Filename)
self.NREC.append(N)
ii = 0
for i in range(self.n):
if self.TYPE[i] == TYPE:
ii += 1
self.TYPE_INDEX.append(ii)
self.n += 1
#self.INSEQUENCE.append(tk.BooleanVar(value=False))
#self.SEQUENCER.append(tk.BooleanVar(value=False))
toconsola('Adding '+TYPE+' layer ',wid=wid)
toconsola('Layer %s with index %d' %(TYPE,self.TYPE_INDEX[-1]),wid=wid)
toconsola('Number of layers: ' + str(self.n)+'\n',wid=wid)
def print(self):
# ==============
print('\n ================================== ')
print('Number of layers, n = ', self.n)
# print('Number of layers in SEQUENCE, nsequence = ', self.nsequence)
# print('SEQUENCE,lenght = ', self.seqlen)
# print('SEQUENCE leader id = ', self.leader)
for i in range(self.n):
print('> Layer ', i)
print('>> Type, Type order, num records : ', self.TYPE[i], self.TYPE_INDEX[i], self.NREC[i])
print('>> Filename : ', self.FILENAME[i])
#print('>> LINK : ', self.LINK.get())
# print('>> In sequence?, Sequence leader ? ', self.INSEQUENCE[i].get(), self.SEQUENCER[i].get())
# =====================
class fld_parameters():
# =====================
''' Class for 2D data fields'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "\nFLD_PARA:\n"
self.PLOT = contourplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.missing = tk.DoubleVar()
self.masked = tk.BooleanVar()
self.show = tk.BooleanVar()
self.masked.set(True)
self.show.set(True)
self.F = None
self.minval = None
self.maxval = None
self.mask = None
self.data = None
self.varname = None
self.units = None
self.missing_value = None
self.cbar = None
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['MISSING'] = self.missing.get()
conf['MASKED'] = self.masked.get()
conf['SHOW'] = self.show.get()
conf['PLOT'] = self.PLOT.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.missing.set(conf['MISSING'])
self.masked.set(conf['MASKED'])
self.show.set(conf['SHOW'])
self.PLOT.conf_set(conf['PLOT'])
# =====================
class vel_parameters():
# =====================
''' Class for 2D velocity fields'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.MESSAGE = "VEL_PARA:\n"
self.PLOT = vectorplot.parameters()
self.MESSAGE += self.PLOT.MESSAGE
self.u = None
self.v = None
self.xu = None # To allow grid-types
self.yu = None # To allow grid-types
self.xv = None # To allow grid-types
self.yv = None # To allow grid-types
self.xt = None # To allow grid-types
self.yt = None # To allow grid-types
self.speed = None
self.F = None
self.cbar = None
self.show = tk.BooleanVar()
self.show.set(True)
def conf_get(self):
# =================
''' Set class dictionary from class attributes '''
conf = {}
conf['SHOW'] = self.show.get()
conf['PLOT'] = self.PLOT.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class dictionary from class attributes '''
self.show.set(conf['SHOW'])
self.PLOT.conf_set(conf['PLOT'])
# =====================
class cdf_parameters():
# =====================
''' Class for NetCDF files'''
__version__ = "1.0"
__author__ = "Quim Ballabrera"
__date__ = "December 2017"
def __init__ (self):
# ==================
''' Define and initialize the class attributes '''
self.FILENAME = tk.StringVar()
self.varname = tk.StringVar()
self.uname = tk.StringVar()
self.vname = tk.StringVar()
self.K = tk.IntVar()
self.L = tk.IntVar()
self.ncid = None
self.icdf = None
self.varid = None
self.uid = None
self.vid = None
self.K_LIST = []
self.L_LIST = []
self.Z_LIST = []
self.T_LIST = []
self.DATE = []
self.TIME = []
self.K.set(0)
self.L.set(0)
self.FIELD = None
self.VEL = None
self.lon = None
self.lat = None
self.xx = None
self.yy = None
self.varname.set('')
self.uname.set('')
self.vname.set('')
# Add mutiple grid types:
self.grid_type = tk.StringVar()
self.grid_type_list = ['A','B','C']
self.grid_type.set('A')
def conf_get(self):
# =================
''' Set class dictionary from class attributes'''
conf = {}
#conf['FILENAME'] = self.FILENAME.get()
conf['varname'] = self.varname.get()
conf['uname'] = self.uname.get()
conf['vname'] = self.vname.get()
conf['K'] = self.K.get()
conf['L'] = self.L.get()
conf['varid'] = self.varid
conf['uid'] = self.uid
conf['vid'] = self.vid
conf['grid_type'] = self.grid_type.get()
if self.icdf is None:
conf['ICDF'] = None
else:
conf['ICDF'] = self.icdf.conf_get()
if self.FIELD is None:
conf['FIELD'] = None
else:
conf['FIELD'] = self.FIELD.conf_get()
if self.VEL is None:
conf['VEL'] = None
else:
conf['VEL'] = self.VEL.conf_get()
return conf
def conf_set(self,conf):
# ======================
''' Set class attributes from dictionary '''
#self.FILENAME.set(conf['FILENAME'])
self.varname.set(conf['varname'])
self.uname.set(conf['uname'])
self.vname.set(conf['vname'])
self.K.set(conf['K'])
self.L.set(conf['L'])
self.varid = conf['varid']
self.uid = conf['uid']
self.vid = conf['vid']
self.grid_type.set(conf['grid_type'])
if conf['ICDF'] == "None":
pass
else:
self.icdf.conf_set(conf['ICDF'])
if self.FIELD is None:
pass
else:
self.FIELD.conf_set(conf['FIELD'])
if self.VEL is None:
pass
else:
self.VEL.conf_set(conf['VEL'])
# ====================
class DrawingConfig():
# ====================
def __init__(self):
# ========================
self.FILECONF = '%s' % COSMO_CONF + 'drawing.conf'
self.VERSION = __version__
self.OUTPUT_FIGURE = tk.BooleanVar()
self.OUTPUT_LEAFLET = tk.BooleanVar()
self.GEOMAP = tk.BooleanVar()
self.WITH_AXIS = tk.BooleanVar()
#EG Cartopy projection and parameters
self.MAP_PROJECTION = tk.StringVar()
self.MAP_PROJ_LAT_0 = tk.DoubleVar()
self.MAP_PROJ_LON_0 = tk.DoubleVar()
self.MAP_PROJ_MIN_LAT = tk.DoubleVar()
self.MAP_PROJ_MAX_LAT = tk.DoubleVar()
self.MAP_PROJ_F_NORTH = tk.DoubleVar()
self.MAP_PROJ_F_EAST = tk.DoubleVar()
self.MAP_PROJ_LAT_T_SCA = tk.DoubleVar()
self.MAP_PROJ_T_SCA_LAT = tk.DoubleVar()
self.MAP_PROJ_SCA_FAC = tk.DoubleVar()
self.MAP_PROJ_SATELLITE_HEIGHT = tk.DoubleVar()
self.MAP_PROJ_SWEEP_AXIS = tk.StringVar()
self.MAP_RESOLUTION = tk.StringVar()
self.EPSG = tk.IntVar()
self.SOUTH = tk.DoubleVar()
self.NORTH = tk.DoubleVar()
self.WEST = tk.DoubleVar()
self.EAST = tk.DoubleVar()
self.WIDTH = tk.DoubleVar()
self.HEIGHT = tk.DoubleVar()
self.LAT_0 = tk.DoubleVar() #
self.LON_0 = tk.DoubleVar()
self.SATELLITE_HEIGHT = tk.DoubleVar()
self.COASTLINE_SHOW = tk.BooleanVar()
# EG 1:Natural-Earth 2: EMODNET
self.COASTLINE_SOURCE = tk.IntVar()
self.COASTLINE_WIDTH = tk.DoubleVar()
self.COASTLINE_COLOR = tk.StringVar()
self.COASTLINE_ZORDER = tk.IntVar()
self.COUNTRYLINE_SHOW = tk.BooleanVar()
self.COUNTRYLINE_WIDTH = tk.DoubleVar()
self.COUNTRYLINE_COLOR = tk.StringVar()
self.LAND_COLOR = tk.StringVar()
self.LAND_ZORDER = tk.IntVar()
self.WATER_COLOR = tk.StringVar()
self.WATER_ZORDER = tk.IntVar()
self.TITLE = tk.StringVar()
self.TITLEFONT = FontProperties().copy()
self.TITLE_PAD = tk.DoubleVar()
self.XLABEL = tk.StringVar()
self.YLABEL = tk.StringVar()
self.LABEL_SIZE = tk.IntVar()
self.XLABEL_PAD = tk.DoubleVar()
self.YLABEL_PAD = tk.DoubleVar()
self.ZLABEL = tk.StringVar()
self.TLABEL = tk.StringVar()
self.DPI = tk.IntVar()
self.OUT_FILENAME = None
self.FIGURE_COLOR = tk.StringVar()
self.TEXT_COLOR = tk.StringVar()
self.GRID_SHOW = tk.BooleanVar()
self.GRID_LINEWIDTH = tk.DoubleVar()
self.MERIDIAN_INI = tk.DoubleVar()
self.MERIDIAN_FIN = tk.DoubleVar()
self.MERIDIAN_INT = tk.DoubleVar()
self.PARALLEL_INI = tk.DoubleVar()
self.PARALLEL_FIN = tk.DoubleVar()
self.PARALLEL_INT = tk.DoubleVar()
self.GRID_COLOR = tk.StringVar()
self.GRID_FONTCOLOR = tk.StringVar()
self.GRID_SIZE = tk.IntVar()
self.GRID_NORTH = tk.BooleanVar()
self.GRID_SOUTH = tk.BooleanVar()
self.GRID_WEST = tk.BooleanVar()
self.GRID_EAST = tk.BooleanVar()
self.GRID_LINESTYLE = tk.StringVar()
self.GRID_ALPHA = tk.DoubleVar()
self.GRID_ZORDER = tk.IntVar()
self.SCALE_SHOW = tk.BooleanVar()
self.SCALE_X = tk.DoubleVar()
self.SCALE_Y = tk.DoubleVar()
self.SCALE_XO = tk.DoubleVar()
self.SCALE_YO = tk.DoubleVar()
self.SCALE_LENGTH = tk.DoubleVar()
self.SCALE_UNITS = tk.StringVar()
self.SCALE_STYLE = tk.StringVar()
self.SCALE_FONTSIZE = tk.IntVar()
self.SCALE_FONTCOLOR = tk.StringVar()
self.SCALE_LABELSTYLE = tk.StringVar()
self.SCALE_FORMAT = tk.StringVar()
self.SCALE_YOFFSET = tk.DoubleVar()
self.SCALE_FILLCOLOR1 = tk.StringVar()
self.SCALE_FILLCOLOR2 = tk.StringVar()
self.SCALE_LINECOLOR = tk.StringVar()
self.SCALE_LINEWIDTH = tk.IntVar()
self.SCALE_ZORDER = tk.IntVar()
self.cons = None
#self.X = None
#self.Y = None
#EG RELIEF=1 GEBCO, RELIEF=2 EMODNET
self.RELIEF_SHOW = tk.BooleanVar()
self.RELIEF = tk.IntVar()
#EG self.BLUEMARBLE = tk.BooleanVar()
#EG self.ETOPO = tk.BooleanVar()
self.BACKGROUND_SCALE = tk.DoubleVar()
self.RIVERS_SHOW = tk.BooleanVar()
self.RIVERS_WIDTH = tk.DoubleVar()
self.RIVERS_COLOR = tk.StringVar()
#EG ARCGIS changed by EMODNET
self.EMODNET_ISO = tk.BooleanVar()
#EG self.ARCGISIMAGE = tk.IntVar()
#EG self.ARCGISSERVICE = tk.StringVar()
#EG self.ARCGISSERVICE_LIST = ['ESRI_Imagery_World_2D', \
#EG 'ESRI_StreetMap_World_2D', \
#EG 'NatGEo_World_Map', \
#EG 'Ocean_Basemap', \
#EG 'World_Imagery', \
#EG 'World_Physical_Map', \
#EG 'World_Shaded_Relief', \
#EG 'World_Street_Map', \
#EG 'World_Terrain_Base', \
#EG 'World_Topo_Map']
#EG self.ARCGISPIXELS = tk.IntVar()
#EG self.ARCGISDPI = tk.IntVar()
#EG self.ARCGISVERBOSE = tk.BooleanVar()
self.LOGO_FILE = tk.StringVar()
self.LOGO_ZOOM = tk.DoubleVar()
self.LOGO_LOCATION = tk.StringVar()
self.LOGO_X = tk.DoubleVar()
self.LOGO_Y = tk.DoubleVar()
self.LOGO_DISPLAY = tk.BooleanVar()
self.TIMESTAMP_SHOW = tk.BooleanVar()
self.TIMESTAMP_BOLD = tk.BooleanVar()
self.TIMESTAMP_X = tk.DoubleVar()
self.TIMESTAMP_Y = tk.DoubleVar()
self.TIMESTAMP_SIZE = tk.IntVar()
self.TIMESTAMP_COLOR = tk.StringVar()
self.VIDEO_NAME = tk.StringVar()
self.VIDEO_TITLE = tk.StringVar()
self.VIDEO_AUTHOR = tk.StringVar()
self.VIDEO_COMMENT = tk.StringVar()
self.VIDEO_FPS = tk.IntVar()
self.VIDEO_DPI = tk.IntVar()
self.VIDEO_L1 = tk.IntVar()
self.VIDEO_L2 = tk.IntVar()
self.WINDOW_FONT_TYPE = tk.StringVar()
self.WINDOW_FONT_SIZE = tk.IntVar()
self.MAP_FONT_TYPE = tk.StringVar()
self.LEGEND = legend.LegendConfig()
self.LEGEND.SHOW.set(False)
self.CROP_PAD = tk.DoubleVar()
self.CROP_PAD.set(0.0)
# Parameters for Saving frames
self.SFRAME_PREFIX = tk.StringVar()
self.SFRAME_POSTFIX_MODE = tk.IntVar()
self.SFRAME_L1 = tk.IntVar()
self.SFRAME_L2 = tk.IntVar()
self.SFRAME_LSTEP = tk.IntVar()
self.SIZE = [9,6]
self.OUTPUT_FIGURE.set(True)
self.OUTPUT_LEAFLET.set(False)
self.GEOMAP.set(True)
self.WITH_AXIS.set(False)
#EG Default Cartopy PlateCarree and parameters
self.MAP_PROJECTION.set('PlateCarree')
self.MAP_PROJ_LAT_0.set(0.0)
self.MAP_PROJ_LON_0.set(0.0)
self.MAP_PROJ_MIN_LAT.set(-80.0)
self.MAP_PROJ_MAX_LAT.set(84.0)
self.MAP_PROJ_F_NORTH.set(0.0)
self.MAP_PROJ_F_EAST.set(0.0)
self.MAP_PROJ_LAT_T_SCA.set(0.0)
self.MAP_PROJ_T_SCA_LAT.set(-1)
self.MAP_PROJ_SCA_FAC.set(-1)
self.MAP_PROJ_SATELLITE_HEIGHT.set(35785831)
self.MAP_PROJ_SWEEP_AXIS.set('y')
self.MAP_RESOLUTION.set('50m')
self.EPSG.set(4326)
#EG self.MAP_PROJECTION.set('cyl')
#EG self.MAP_RESOLUTION.set('l')
self.SOUTH.set(-90)
self.NORTH.set(90)
self.WEST.set(-180)
self.EAST.set(180)
self.WIDTH.set(0)
self.HEIGHT.set(0)
self.LAT_0.set(0)
self.LON_0.set(0)
self.SATELLITE_HEIGHT.set(35786000)
self.COASTLINE_SHOW.set(False)
self.COASTLINE_SOURCE.set(1)
self.COASTLINE_WIDTH.set(1)
self.COASTLINE_COLOR.set('black')
self.COASTLINE_ZORDER.set(1)
self.COUNTRYLINE_SHOW.set(False)
self.COUNTRYLINE_WIDTH.set(2)
self.COUNTRYLINE_COLOR.set('grey')
self.LAND_COLOR.set('coral')
self.LAND_ZORDER.set(0)
self.WATER_COLOR.set('white')
self.WATER_ZORDER.set(0)
self.TITLE.set('')
self.TITLEFONT.set_size(22)
self.TITLEFONT.set_weight('bold')
self.TITLE_PAD.set(0)
self.XLABEL.set('Longitude')
self.YLABEL.set('Latitude')
self.LABEL_SIZE.set(16)
self.XLABEL_PAD.set(0.12)
self.YLABEL_PAD.set(0.05)
self.ZLABEL.set('')
self.TLABEL.set('')
self.DPI.set(72)
self.FIGURE_COLOR.set('white')
self.TEXT_COLOR.set('black')
self.GRID_SHOW.set(True)
self.GRID_LINEWIDTH.set(1)
self.MERIDIAN_INI.set(-180)
self.MERIDIAN_FIN.set(210)
self.MERIDIAN_INT.set(60)
self.PARALLEL_INI.set(-90)
self.PARALLEL_FIN.set(120)
self.PARALLEL_INT.set(30)
self.GRID_COLOR.set('black')
self.GRID_FONTCOLOR.set('black')
self.GRID_SIZE.set(12)
self.GRID_NORTH.set(False)
self.GRID_SOUTH.set(True)
self.GRID_WEST.set(False)
self.GRID_EAST.set(True)
self.GRID_LINESTYLE.set(':')
self.GRID_ALPHA.set(1.0)
self.GRID_ZORDER.set(2)
self.SCALE_SHOW.set(False)
self.SCALE_X.set(0)
self.SCALE_Y.set(0)
self.SCALE_XO.set(0.5)
self.SCALE_YO.set(0.05)
self.SCALE_LENGTH.set(400)
self.SCALE_UNITS.set('km')
self.SCALE_STYLE.set('fancy')
self.SCALE_FONTSIZE.set(14)
self.SCALE_FONTCOLOR.set('k')
self.SCALE_LABELSTYLE.set('simple')
self.SCALE_FORMAT.set('%d')
self.SCALE_YOFFSET.set(None)
self.SCALE_FILLCOLOR1.set('w')
self.SCALE_FILLCOLOR2.set('k')
self.SCALE_LINECOLOR.set('k')
self.SCALE_LINEWIDTH.set(3)
self.SCALE_ZORDER.set(10)
#EG RELIEF refers to GEBCO tile vms
self.RELIEF_SHOW.set(False)
self.RELIEF.set(1)
self.BACKGROUND_SCALE.set(1.0)
self.RIVERS_SHOW.set(False)
self.RIVERS_WIDTH.set(0.2)
self.RIVERS_COLOR.set('blue')
#EG EMODNET
#self.EMODNET_COAST.set(False)
self.EMODNET_ISO.set(False)
#EG self.ARCGISIMAGE.set(0)
#EG self.ARCGISSERVICE.set('ESRI_Imagery_world_2D')
#EG self.ARCGISPIXELS.set(400)
#EG self.ARCGISDPI.set(96)
#EG self.ARCGISVERBOSE.set(True)
self.LOGO_FILE.set(COSMO_CONF_PATH+'MEDOSMOSIS.png')
self.LOGO_IMAGE = image.imread(self.LOGO_FILE.get())
self.LOGO_ZOOM.set(0.20)
self.LOGO_LOCATION.set('SW')
self.LOGO_DISPLAY.set(False)
self.ISOBAT_PATH = tk.StringVar()
self.ISOBAT_PATH.set(COSMO_ROOT+'/data/isobaths/')
# self.ISOBAT_Z = [ 0, 100, 200, 400,
# 600, 800, 1000, 1200, 1400,
# 1600, 1800, 2000, 2500, 3000,
# ]
#
# self.ISOBAT_LABEL = ['coastline', '100 m', '200 m', '400 m',
# '600 m', '800 m','1000 m','1200 m','1400 m',
# '1600 m','1800 m','2000 m','2500 m','3000 m',
# ]
#
self.ISOBAT_Z = [ 0, 50, 100, 200, 250, 400, 500,
600, 750, 800, 1000, 1250, 1500, 1750,
2000, 2500, 3000, 3500, 4000, 4500, 5000]
self.ISOBAT_LABEL = ['coastline', '50 m', '100 m', '200 m',
'250 m', '400 m', '500 m', '600 m', '750 m',
'800 m','1000 m','1250 m','1500 m','1750 m',
'2000 m','2500 m','3000 m','5500 m','4000 m',
'4500 m','5000 m' ]
self.nisobat = len(self.ISOBAT_Z)
self.ISOBAT_SELEC = []
self.ISOBAT_COLOR = []
self.ISOBAT_STYLE = []
self.ISOBAT_WIDTH = []
self.ISOBAT_SHOW = []
self.ISOBAT_DATA = []
for i in range(self.nisobat):
self.ISOBAT_SELEC.append(tk.BooleanVar(value=False))
self.ISOBAT_COLOR.append(tk.StringVar(value='black'))
self.ISOBAT_STYLE.append(tk.StringVar(value='-'))
self.ISOBAT_WIDTH.append(tk.DoubleVar(value=1))
self.ISOBAT_SHOW.append(False)
self.ISOBAT_DATA.append(None)
self.ISOBAT_LABEL_SHOW = tk.BooleanVar()
self.ISOBAT_LABEL_SHOW.set(False)
self.ISOBAT_NPLOT = sum(self.ISOBAT_SHOW)
self.ISOBAT_ZPOINTER = tk.StringVar()
self.ISOBAT_ZPOINTER.set(self.ISOBAT_LABEL[0])
self.ISOBAT_selected = False
self.ISOBAT_loaded = False
self.ISOBAT_cropped = False
self.ISOBAT_LEGEND = legend.LegendConfig()
self.ISOBAT_LEGEND.TITLE.set('Isobaths')
self.ISOBAT_LEGEND.LOC.set(2)
self.TIMESTAMP_SHOW.set(False)
self.TIMESTAMP_BOLD.set(False)
self.TIMESTAMP_X.set(0.12)
self.TIMESTAMP_Y.set(0.12)
self.TIMESTAMP_COLOR.set('black')
self.TIMESTAMP_SIZE.set(15)
self.VIDEO_NAME.set('movie.mp4')
self.VIDEO_TITLE.set('COSMO-VIEW Movie')
self.VIDEO_AUTHOR.set('Matplotlib')
self.VIDEO_COMMENT.set('Ocean currents movie')
self.VIDEO_FPS.set(2)
self.VIDEO_DPI.set(100)
self.VIDEO_L1.set(0)
self.SFRAME_PREFIX.set('Frame')
self.SFRAME_POSTFIX_MODE.set(0)
self.SFRAME_L1.set(0)
self.SFRAME_LSTEP.set(1)
self.WINDOW_FONT_TYPE.set('Helvetica')
self.WINDOW_FONT_SIZE.set(14)
font_type = matplotlib.rcParams['font.family'][0]
self.MAP_FONT_TYPE.set(font_type)
self.MESSAGE = "\n"+self.LEGEND.MESSAGE+"\n"+self.ISOBAT_LEGEND.MESSAGE
if exists(self.FILECONF):
self.MESSAGE += "\nReading conf. file: "+self.FILECONF
try:
conf = self.conf_load(self.FILECONF)
self.conf_set(conf)
except:
self.MESSAGE += '\n\tError reading, using default parameters'
conf = self.conf_get()
self.conf_save(conf,self.FILECONF)
else:
self.MESSAGE += '\n\tSaving configuration file ...'
conf = self.conf_get()
self.conf_save(conf,self.FILECONF)
def conf_get(self):
# ===========================
'''Get the conf dictionnary from program variables'''
conf = {}
conf['_VERSION_'] = self.VERSION
conf['OUTPUT_FIGURE'] = self.OUTPUT_FIGURE.get()
conf['OUTPUT_LEAFLET'] = self.OUTPUT_LEAFLET.get()
conf['SIZE'] = [self.SIZE[0],self.SIZE[1]]
conf['DPI'] = self.DPI.get()
conf['FIGURE_COLOR'] = self.FIGURE_COLOR.get()
conf['TEXT_COLOR'] = self.TEXT_COLOR.get()
conf['GEOMAP'] = self.GEOMAP.get()
conf['WITH_AXIS'] = self.WITH_AXIS.get()
#EG Default Cartopy PlateCarree and parameters
conf['MAP_PROJECTION'] = self.MAP_PROJECTION.get()
conf['MAP_PROJ_LAT_0'] = self.MAP_PROJ_LAT_0.get()
conf['MAP_PROJ_LON_0'] = self.MAP_PROJ_LON_0.get()
conf['MAP_PROJ_MIN_LAT'] = self.MAP_PROJ_MIN_LAT.get()
conf['MAP_PROJ_MAX_LAT'] = self.MAP_PROJ_MAX_LAT.get()
conf['MAP_PROJ_F_NORTH'] = self.MAP_PROJ_F_NORTH.get()
conf['MAP_PROJ_F_EAST'] = self.MAP_PROJ_F_EAST.get()
conf['MAP_PROJ_LAT_T_SCA'] = self.MAP_PROJ_LAT_T_SCA.get()
conf['MAP_PROJ_T_SCA_LAT'] = self.MAP_PROJ_T_SCA_LAT.get()
conf['MAP_PROJ_SCA_FAC'] = self.MAP_PROJ_SCA_FAC.get()
conf['MAP_PROJ_SATELLITE_HEIGHT'] = self.MAP_PROJ_SATELLITE_HEIGHT.get()
conf['MAP_PROJ_SWEEP_AXIS'] = self.MAP_PROJ_SWEEP_AXIS.get()
conf['MAP_RESOLUTION'] = self.MAP_RESOLUTION.get()
conf['EPSG'] = self.EPSG.get()
conf['SOUTH'] = self.SOUTH.get()
conf['NORTH'] = self.NORTH.get()
conf['WEST'] = self.WEST.get()
conf['EAST'] = self.EAST.get()
conf['WIDTH'] = self.WIDTH.get()
conf['HEIGHT'] = self.HEIGHT.get()
conf['LAT_0'] = self.LAT_0.get()
conf['LON_0'] = self.LON_0.get()
conf['SATELLITE_HEIGHT'] = self.SATELLITE_HEIGHT.get()
conf['COASTLINE_SHOW'] = self.COASTLINE_SHOW.get()
conf['COASTLINE_SOURCE'] = self.COASTLINE_SOURCE.get()
conf['COASTLINE_WIDTH'] = self.COASTLINE_WIDTH.get()
conf['COASTLINE_COLOR'] = self.COASTLINE_COLOR.get()
conf['COASTLINE_ZORDER'] = self.COASTLINE_ZORDER.get()
conf['COUNTRYLINE_SHOW'] = self.COUNTRYLINE_SHOW.get()
conf['COUNTRYLINE_WIDTH'] = self.COUNTRYLINE_WIDTH.get()
conf['COUNTRYLINE_COLOR'] = self.COUNTRYLINE_COLOR.get()
conf['LAND_COLOR'] = self.LAND_COLOR.get()
conf['LAND_ZORDER'] = self.LAND_ZORDER.get()
conf['WATER_COLOR'] = self.WATER_COLOR.get()
conf['WATER_ZORDER'] = self.WATER_ZORDER.get()
conf['TITLE'] = self.TITLE.get()
conf['TITLEFONT'] = self.TITLEFONT.__dict__
conf['TITLE_PAD'] = self.TITLE_PAD.get()
conf['XLABEL'] = self.XLABEL.get()
conf['YLABEL'] = self.YLABEL.get()
conf['LABEL_SIZE'] = self.LABEL_SIZE.get()
conf['XLABEL_PAD'] = self.XLABEL_PAD.get()
conf['YLABEL_PAD'] = self.YLABEL_PAD.get()
conf['GRID_SHOW'] = self.GRID_SHOW.get()
conf['GRID_LINEWIDTH'] = self.GRID_LINEWIDTH.get()
conf['MERIDIAN_INI'] = self.MERIDIAN_INI.get()
conf['MERIDIAN_FIN'] = self.MERIDIAN_FIN.get()
conf['MERIDIAN_INT'] = self.MERIDIAN_INT.get()
conf['PARALLEL_INI'] = self.PARALLEL_INI.get()
conf['PARALLEL_FIN'] = self.PARALLEL_FIN.get()
conf['PARALLEL_INT'] = self.PARALLEL_INT.get()
conf['GRID_COLOR'] = self.GRID_COLOR.get()
conf['GRID_FONTCOLOR'] = self.GRID_FONTCOLOR.get()
conf['GRID_SIZE'] = self.GRID_SIZE.get()
conf['GRID_NORTH'] = self.GRID_NORTH.get()
conf['GRID_SOUTH'] = self.GRID_SOUTH.get()
conf['GRID_WEST'] = self.GRID_WEST.get()
conf['GRID_EAST'] = self.GRID_EAST.get()
conf['GRID_LINESTYLE'] = self.GRID_LINESTYLE.get()
conf['GRID_ALPHA'] = self.GRID_ALPHA.get()
conf['GRID_ZORDER'] = self.GRID_ZORDER.get()
conf['SCALE_SHOW'] = self.SCALE_SHOW.get()
conf['SCALE_X'] = self.SCALE_X.get()
conf['SCALE_Y'] = self.SCALE_Y.get()
conf['SCALE_XO'] = self.SCALE_XO.get()
conf['SCALE_YO'] = self.SCALE_YO.get()
conf['SCALE_LENGTH'] = self.SCALE_LENGTH.get()
conf['SCALE_UNITS'] = self.SCALE_UNITS.get()
conf['SCALE_STYLE'] = self.SCALE_STYLE.get()
conf['SCALE_FONTSIZE'] = self.SCALE_FONTSIZE.get()
conf['SCALE_FONTCOLOR'] = self.SCALE_FONTCOLOR.get()
conf['SCALE_LABELSTYLE'] = self.SCALE_LABELSTYLE.get()
conf['SCALE_FORMAT'] = self.SCALE_FORMAT.get()
try:
conf['SCALE_YOFFSET'] = self.SCALE_YOFFSET.get()
except:
conf['SCALE_YOFFSET'] = None
conf['SCALE_FILLCOLOR1'] = self.SCALE_FILLCOLOR1.get()
conf['SCALE_FILLCOLOR2'] = self.SCALE_FILLCOLOR2.get()
conf['SCALE_LINECOLOR'] = self.SCALE_LINECOLOR.get()
try:
conf['SCALE_LINEWIDTH'] = self.SCALE_LINEWIDTH.get()
except:
conf['SCALE_LINEWIDTH'] = None
conf['SCALE_ZORDER'] = self.SCALE_ZORDER.get()
#EG RELIEF refers to GEBCO
conf['RELIEF_SHOW'] = self.RELIEF_SHOW.get()
conf['RELIEF'] = self.RELIEF.get()
#EGconf['BLUEMARBLE'] = self.BLUEMARBLE.get()
#EGconf['ETOPO'] = self.ETOPO.get()
conf['BACKGROUND_SCALE'] = self.BACKGROUND_SCALE.get()
conf['RIVERS_SHOW'] = self.RIVERS_SHOW.get()
conf['RIVERS_WIDTH'] = self.RIVERS_WIDTH.get()
conf['RIVERS_COLOR'] = self.RIVERS_COLOR.get()
#EG EMODNET
#conf['EMODNET_COAST'] = self.EMODNET_COAST.get()
conf['EMODNET_ISO'] = self.EMODNET_ISO.get()
#EG conf['ARCGISIMAGE'] = self.ARCGISIMAGE.get()
#EG conf['ARCGISSERVICE'] = self.ARCGISSERVICE.get()
#EG conf['ARCGISPIXELS'] = self.ARCGISPIXELS.get()
#EG conf['ARCGISDPI'] = self.ARCGISDPI.get()
#EG conf['ARCGISVERBOSE'] = self.ARCGISVERBOSE.get()
conf['LOGO_FILE'] = self.LOGO_FILE.get()
conf['LOGO_ZOOM'] = self.LOGO_ZOOM.get()
conf['LOGO_LOCATION'] = self.LOGO_LOCATION.get()
conf['LOGO_X'] = self.LOGO_X.get()
conf['LOGO_Y'] = self.LOGO_Y.get()
conf['LOGO_DISPLAY'] = self.LOGO_DISPLAY.get()
conf['ISOBAT_PATH'] = self.ISOBAT_PATH.get()
conf['ISOBAT_Z'] = self.ISOBAT_Z
conf['ISOBAT_LABEL'] = self.ISOBAT_LABEL
WIDTH = []
COLOR = []
STYLE = []
SELEC = []
for i in range(self.nisobat):
WIDTH.append(self.ISOBAT_WIDTH[i].get())
COLOR.append(self.ISOBAT_COLOR[i].get())
STYLE.append(self.ISOBAT_STYLE[i].get())
SELEC.append(self.ISOBAT_SELEC[i].get())
conf['ISOBAT_WIDTH'] = WIDTH
conf['ISOBAT_COLOR'] = COLOR
conf['ISOBAT_STYLE'] = STYLE
conf['ISOBAT_SELEC'] = SELEC
conf['ISOBAT_LABEL_SHOW'] = self.ISOBAT_LABEL_SHOW.get()
conf['ISOBAT_cropped'] = self.ISOBAT_cropped
conf['ISOBAT_LEGEND'] = self.ISOBAT_LEGEND.conf_get()
conf['LEGEND'] = self.LEGEND.conf_get()
conf['TIMESTAMP_SHOW'] = self.TIMESTAMP_SHOW.get()
conf['TIMESTAMP_BOLD'] = self.TIMESTAMP_BOLD.get()
conf['TIMESTAMP_X'] = self.TIMESTAMP_X.get()
conf['TIMESTAMP_Y'] = self.TIMESTAMP_Y.get()
conf['TIMESTAMP_SIZE'] = self.TIMESTAMP_SIZE.get()
conf['TIMESTAMP_COLOR'] = self.TIMESTAMP_COLOR.get()
conf['VIDEO_NAME'] = self.VIDEO_NAME.get()
conf['VIDEO_TITLE'] = self.VIDEO_TITLE.get()
conf['VIDEO_AUTHOR'] = self.VIDEO_AUTHOR.get()
conf['VIDEO_COMMENT'] = self.VIDEO_COMMENT.get()
conf['VIDEO_FPS'] = self.VIDEO_FPS.get()
conf['VIDEO_DPI'] = self.VIDEO_DPI.get()
conf['FRAME_PREFIX'] = self.SFRAME_PREFIX.get()
conf['FRAME_POSTFIX_MODE'] = self.SFRAME_POSTFIX_MODE.get()
conf['FRAME_STEP'] = self.SFRAME_LSTEP.get()
conf['WINDOW_FONT_TYPE'] = self.WINDOW_FONT_TYPE.get()
conf['WINDOW_FONT_SIZE'] = self.WINDOW_FONT_SIZE.get()
conf['MAP_FONT_TYPE'] = self.MAP_FONT_TYPE.get()
conf['CROP_PAD'] = self.CROP_PAD.get()
return conf
def conf_set(self,conf):
# =======================
'''Set program variables from the conf dictionnary'''
self.VERSION = conf['_VERSION_']
self.OUTPUT_FIGURE.set(conf['OUTPUT_FIGURE'])
self.OUTPUT_LEAFLET.set(conf['OUTPUT_LEAFLET'])
self.SIZE = conf['SIZE']
self.DPI.set(conf['DPI'])
self.FIGURE_COLOR.set(conf['FIGURE_COLOR'])
self.TEXT_COLOR.set(conf['TEXT_COLOR'])
self.GEOMAP.set(conf['GEOMAP'])
self.WITH_AXIS.set(conf['WITH_AXIS'])
#EG Default Cartopy PlateCarree and parameters
self.MAP_PROJECTION.set(conf['MAP_PROJECTION'])
self.MAP_PROJ_LAT_0.set(conf['MAP_PROJ_LAT_0'])
self.MAP_PROJ_LON_0.set(conf['MAP_PROJ_LON_0'])
self.MAP_PROJ_MIN_LAT.set(conf['MAP_PROJ_MIN_LAT'])
self.MAP_PROJ_MAX_LAT.set(conf['MAP_PROJ_MAX_LAT'])
self.MAP_PROJ_F_NORTH.set(conf['MAP_PROJ_F_NORTH'])
self.MAP_PROJ_F_EAST.set(conf['MAP_PROJ_F_EAST'])
self.MAP_PROJ_LAT_T_SCA.set(conf['MAP_PROJ_LAT_T_SCA'])
self.MAP_PROJ_T_SCA_LAT.set(conf['MAP_PROJ_T_SCA_LAT'])
self.MAP_PROJ_SCA_FAC.set(conf['MAP_PROJ_SCA_FAC'])
self.MAP_PROJ_SATELLITE_HEIGHT.set(conf['MAP_PROJ_SATELLITE_HEIGHT'])
self.MAP_PROJ_SWEEP_AXIS.set(conf['MAP_PROJ_SWEEP_AXIS'])
self.MAP_RESOLUTION.set(conf['MAP_RESOLUTION'])
self.EPSG.set(conf['EPSG'])
self.SOUTH.set(conf['SOUTH'])
self.NORTH.set(conf['NORTH'])
self.WEST.set(conf['WEST'])
self.EAST.set(conf['EAST'])
self.WIDTH.set(conf['WIDTH'])
self.HEIGHT.set(conf['HEIGHT'])
self.LAT_0.set(conf['LAT_0'])
self.LON_0.set(conf['LON_0'])
self.SATELLITE_HEIGHT.set(conf['SATELLITE_HEIGHT'])
self.MERIDIAN_INI.set(conf['MERIDIAN_INI'])
self.MERIDIAN_FIN.set(conf['MERIDIAN_FIN'])
self.MERIDIAN_INT.set(conf['MERIDIAN_INT'])
self.PARALLEL_INI.set(conf['PARALLEL_INI'])
self.PARALLEL_FIN.set(conf['PARALLEL_FIN'])
self.PARALLEL_INT.set(conf['PARALLEL_INT'])
self.COASTLINE_SHOW.set(conf['COASTLINE_SHOW'])
self.COASTLINE_SOURCE.set(conf['COASTLINE_SOURCE'])
self.COASTLINE_WIDTH.set(conf['COASTLINE_WIDTH'])
self.COASTLINE_COLOR.set(conf['COASTLINE_COLOR'])
self.COASTLINE_ZORDER.set(conf['COASTLINE_ZORDER'])
self.COUNTRYLINE_SHOW.set(conf['COUNTRYLINE_SHOW'])
self.COUNTRYLINE_WIDTH.set(conf['COUNTRYLINE_WIDTH'])
self.COUNTRYLINE_COLOR.set(conf['COUNTRYLINE_COLOR'])
self.LAND_COLOR.set(conf['LAND_COLOR'])
self.LAND_ZORDER.set(conf['LAND_ZORDER'])
self.WATER_COLOR.set(conf['WATER_COLOR'])
self.WATER_ZORDER.set(conf['WATER_ZORDER'])
self.TITLE.set(conf['TITLE'])
self.TITLEFONT = setfont(conf['TITLEFONT'])
self.TITLE_PAD.set(conf['TITLE_PAD'])
self.XLABEL.set(conf['XLABEL'])
self.YLABEL.set(conf['YLABEL'])
self.LABEL_SIZE.set(conf['LABEL_SIZE'])
self.XLABEL_PAD.set(conf['XLABEL_PAD'])
self.YLABEL_PAD.set(conf['YLABEL_PAD'])
self.GRID_SHOW.set(conf['GRID_SHOW'])
self.GRID_LINEWIDTH.set(conf['GRID_LINEWIDTH'])
self.GRID_COLOR.set(conf['GRID_COLOR'])
self.GRID_FONTCOLOR.set(conf['GRID_FONTCOLOR'])
self.GRID_SIZE.set(conf['GRID_SIZE'])
self.GRID_NORTH.set(conf['GRID_NORTH'])
self.GRID_SOUTH.set(conf['GRID_SOUTH'])
self.GRID_WEST.set(conf['GRID_WEST'])
self.GRID_EAST.set(conf['GRID_EAST'])
self.GRID_LINESTYLE.set(conf['GRID_LINESTYLE'])
self.GRID_ALPHA.set(conf['GRID_ALPHA'])
self.GRID_ZORDER.set(conf['GRID_ZORDER'])
self.SCALE_SHOW.set(conf['SCALE_SHOW'])
self.SCALE_X.set(conf['SCALE_X'])
self.SCALE_Y.set(conf['SCALE_Y'])
self.SCALE_XO.set(conf['SCALE_XO'])
self.SCALE_YO.set(conf['SCALE_YO'])
self.SCALE_LENGTH.set(conf['SCALE_LENGTH'])
self.SCALE_UNITS.set(conf['SCALE_UNITS'])
self.SCALE_STYLE.set(conf['SCALE_STYLE'])
self.SCALE_FONTSIZE.set(conf['SCALE_FONTSIZE'])
self.SCALE_FONTCOLOR.set(conf['SCALE_FONTCOLOR'])
self.SCALE_LABELSTYLE.set(conf['SCALE_LABELSTYLE'])
self.SCALE_FORMAT.set(conf['SCALE_FORMAT'])
self.SCALE_YOFFSET.set(conf['SCALE_YOFFSET'])
self.SCALE_FILLCOLOR1.set(conf['SCALE_FILLCOLOR1'])
self.SCALE_FILLCOLOR2.set(conf['SCALE_FILLCOLOR2'])
self.SCALE_LINECOLOR.set(conf['SCALE_LINECOLOR'])
self.SCALE_LINEWIDTH.set(conf['SCALE_LINEWIDTH'])
self.SCALE_ZORDER.set(conf['SCALE_ZORDER'])
#EG Refers to GEBCO tile vms
self.RELIEF_SHOW.set(conf['RELIEF_SHOW'])
self.RELIEF.set(conf['RELIEF'])
#EGself.BLUEMARBLE.set(conf['BLUEMARBLE'])
#EGself.ETOPO.set(conf['ETOPO'])
self.BACKGROUND_SCALE.set(conf['BACKGROUND_SCALE'])
self.RIVERS_SHOW.set(conf['RIVERS_SHOW'])
self.RIVERS_WIDTH.set(conf['RIVERS_WIDTH'])
self.RIVERS_COLOR.set(conf['RIVERS_COLOR'])
#EG EMODNET
#self.EMODNET_COAST.set(conf['EMODNET_COAST'])
self.EMODNET_ISO.set(conf['EMODNET_ISO'])
#EG self.ARCGISIMAGE.set(conf['ARCGISIMAGE'])
#EG self.ARCGISSERVICE.set(conf['ARCGISSERVICE'])
#EG self.ARCGISPIXELS.set(conf['ARCGISPIXELS'])
#EG self.ARCGISDPI.set(conf['ARCGISDPI'])
#EG self.ARCGISVERBOSE.set(conf['ARCGISVERBOSE'])
self.LOGO_FILE.set(conf['LOGO_FILE'])
self.LOGO_ZOOM.set(conf['LOGO_ZOOM'])
self.LOGO_LOCATION.set(conf['LOGO_LOCATION'])
self.LOGO_X.set(conf['LOGO_X'])
self.LOGO_Y.set(conf['LOGO_Y'])
self.LOGO_DISPLAY.set(conf['LOGO_DISPLAY'])
self.ISOBAT_PATH.set(conf['ISOBAT_PATH'])
self.ISOBAT_Z = conf['ISOBAT_Z']
self.ISOBAT_LABEL = conf['ISOBAT_LABEL']
self.ISOBAT_LABEL_SHOW.set(conf['ISOBAT_LABEL_SHOW'])
self.ISOBAT_cropped = conf['ISOBAT_cropped']
self.ISOBAT_LEGEND.conf_set(conf['ISOBAT_LEGEND'])
self.nisobat = len(self.ISOBAT_Z)
WIDTH = conf['ISOBAT_WIDTH']
COLOR = conf['ISOBAT_COLOR']
STYLE = conf['ISOBAT_STYLE']
SELEC = conf['ISOBAT_SELEC']
self.ISOBAT_WIDTH = []
self.ISOBAT_COLOR = []
self.ISOBAT_STYLE = []
self.ISOBAT_SELEC = []
self.ISOBAT_SHOW = []
self.ISOBAT_DATA = []
for i in range(self.nisobat):
self.ISOBAT_SELEC.append(tk.BooleanVar(value=SELEC[i]))
self.ISOBAT_COLOR.append(tk.StringVar(value=COLOR[i]))
self.ISOBAT_STYLE.append(tk.StringVar(value=STYLE[i]))
self.ISOBAT_WIDTH.append(tk.DoubleVar(value=WIDTH[i]))
self.ISOBAT_SHOW.append(False)
self.ISOBAT_DATA.append(None)
if sum(SELEC) == 0:
self.ISOBAT_selected = False
else:
self.ISOBAT_selected = True
self.ISOBAT_loaded = False
for i in range(self.nisobat):
if self.ISOBAT_SELEC[i].get():
filename = self.ISOBAT_PATH.get() + \
'/%04d' % self.ISOBAT_Z[i] + '.dat'
self.ISOBAT_SHOW[i] = True
self.ISOBAT_loaded = True
try:
self.ISOBAT_DATA[i] = read_lines(filename)
except:
messagebox.showinfo(message='Error: unable to read '+filemane)
self.ISOBAT_DATA[i] = None
self.ISOBAT_SHOW[i] = False
self.ISOBAT_loaded = False
self.ISOBAT_NPLOT = sum(self.ISOBAT_SHOW)
self.TIMESTAMP_SHOW.set(conf['TIMESTAMP_SHOW'])
self.TIMESTAMP_BOLD.set(conf['TIMESTAMP_BOLD'])
self.TIMESTAMP_X.set(conf['TIMESTAMP_X'])
self.TIMESTAMP_Y.set(conf['TIMESTAMP_Y'])
self.TIMESTAMP_SIZE.set(conf['TIMESTAMP_SIZE'])
self.TIMESTAMP_COLOR.set(conf['TIMESTAMP_COLOR'])
self.VIDEO_NAME.set(conf['VIDEO_NAME'])
self.VIDEO_TITLE.set(conf['VIDEO_TITLE'])
self.VIDEO_AUTHOR.set(conf['VIDEO_AUTHOR'])
self.VIDEO_COMMENT.set(conf['VIDEO_COMMENT'])
self.VIDEO_FPS.set(conf['VIDEO_FPS'])
self.VIDEO_DPI.set(conf['VIDEO_DPI'])
self.LEGEND.conf_set(conf['LEGEND'])
self.WINDOW_FONT_TYPE.set(conf['WINDOW_FONT_TYPE'])
self.WINDOW_FONT_SIZE.set(conf['WINDOW_FONT_SIZE'])
self.MAP_FONT_TYPE.set(conf['MAP_FONT_TYPE'])
self.CROP_PAD.set(conf['CROP_PAD'])
self.SFRAME_PREFIX.set(conf['FRAME_PREFIX'])
self.SFRAME_POSTFIX_MODE.set(conf['FRAME_POSTFIX_MODE'])
self.SFRAME_LSTEP.set(conf['FRAME_STEP'])
# Derived variables:
self.LOGO_IMAGE = image.imread(self.LOGO_FILE.get())
def conf_load(self,filename):
# ===========================
'''Open an read the configuration file'''
# Read configuration
with open(filename) as infile:
conf = json.load(infile)
return conf
def conf_save(self,conf,filename):
# ===============================
'''Save the configuration file'''
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
outfile.close()
# ===================
class CosmoDrawing():
# ===================
# ===============
def close(self):
# ===============
#quit() # DEBUG
if self.LAYERS.n == 0:
quit()
aa = messagebox.askquestion('Close','Are you sure?',icon='warning')
if aa == 'yes':
self.master.destroy()
self.master = None
quit()
# =======================================
def __init__ (self,master,tconsola=None):
# =======================================
# Initialization
global COSMO_CONF,COSMO_CONF_DATA
versions = 'Built with:\n'
versions += 'Tkinter '+ TKINTER_VERSION + '\n'
versions += 'Matplotlib '+ MATPLOTLIB_VERSION + '\n'
versions += 'Cartopy '+ CARTOPY_VERSION + '\n'
mess = "CONF_PATH: "+COSMO_CONF_PATH
mess += '\nCONF_DATA: '+COSMO_CONF_DATA
mess += '\nCONF: '+ COSMO_CONF
master.protocol('WM_DELETE_WINDOW',self.close)
self.master = master
self.master.configure(bg=BGC)
# EG we pass the console id
self.PLOT = DrawingConfig()
self.first = True
self.ftime = True
try:
font_name = self.PLOT.WINDOW_FONT_TYPE.get().split()[0]
font = '%s %d' % (font_name, self.PLOT.WINDOW_FONT_SIZE.get())
self.master.option_add('*Font',font)
except:
self.master.option_add('*Font',FONT)
self.default_font = tkfont.nametofont('TkDefaultFont')
# EG icon fonts for the askopenfile, etc
self.default_font2 = tkfont.nametofont("TkIconFont")
self.default_font.configure(family=self.PLOT.WINDOW_FONT_TYPE.get().
split()[0])
self.default_font.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
# EG icon fonts for the askopenfile, etc
self.default_font2.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
self.L_LIST = []
self.T_LIST = []
self.DATE = []
#self.TFILE = ''
self.L = tk.IntVar()
self.K = tk.IntVar()
self.NL = 0
self.NZ = 0
self.L.set(0)
self.K.set(0)
self.LAYERS = LAYER()
#self.nfiles = 0
#self.FILENAMES = []
#self.FILETYPES = []
#self.FILEORDER = []
#self.nsequence = 0
#self.SEQUENCES = []
#self.SEQLEADER = [] # The one provinsing the date and Nt
#self.SEQLEADER_INDX= 0
#self.SEQNTIMES = []
self.nvec = 0
self.VEC = []
self.VEC_LIST = [None]
self.VEC_INDX = tk.IntVar()
self.VEC_INDX.set(0)
self.CURRENT_OPTIONS = ['Operational', \
'HF Radar', \
'COPERNICUS', \
'Local Dataset', \
'Remote Dataset', \
'Active CONTOUR file']
self.ncdf = 0
self.CDF = []
self.CDF_LIST = [None]
self.CDF_INDX = tk.IntVar()
self.CDF_INDX.set(0)
self.cdfbar = []
self.Mcdfbar = []
self.CONTOUR_OPTIONS = ['Operational', \
'COPERNICUS', \
'Local Dataset', \
'Remote Dataset', \
'Active VECTOR file']
self.nfloat = 0
self.FLOAT = []
self.FLOAT_LIST = ['0']
self.FLOAT_INDX = tk.IntVar()
self.FLOAT_INDX.set(0)
self.FLOAT_OPTIONS = ['Local Dataset', \
'Local Folder', \
'Remote Folder', \
'Trajectories Database']
self.Lagrangian_types=[('Netcdf','*.nc'),('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
self.SAIDIN = CONTOUR()
#self.SAIDIN = cdf_parameters()
#self.SAIDIN.FIELD = fld_parameters()
self.sbar = []
self.Msbar = []
# Features: Markers or shapefiles
# Stationary information. Types: MARKER,SHAPE
#EG
self.nmarker = 0 # Numero de fixters de marcadors
self.MARKER = [] # llista de estructures de marcadors dim(self.nmarker)
self.MARKER_LIST = ['0'] # Llista marker files en configuracion
self.MARKER_INDX = tk.IntVar() # contador de files
self.MARKER_INDX.set(0)
#EG Shape files
self.nshape = 0
self.SHAPE = []
self.SHAPE_LIST = ['0']
self.SHAPE_INDX = tk.IntVar()
self.SHAPE_INDX.set(0)
self.nellipse = 0
self.ELLIPSE = []
self.ELLIPSE_LIST = ['0'] # List of ellipse files en configuracion
self.ELLIPSE_INDX = tk.IntVar() # contador de files
self.ELLIPSE_INDX.set(0)
self.ELLIPSE_OPTIONS = ['Local Dataset']
self.npatch = 0
self.PATCH = []
self.PATCH_LIST = ['0'] # List of patches en configuracion
self.PATCH_INDX = tk.IntVar() # contador de files
self.PATCH_INDX.set(0)
self.FEATURE = OBJECT('FEATURE',['Local Dataset'])
#self.FEATURE = []
#self.FEATURE_LIST = ['0'] # List of features en configuracion
#self.FEATURE_INDX = tk.IntVar() # contador de files
#self.FEATURE_INDX.set(0)
#self.FEATURE_OPTIONS = ['Local Dataset']
# Initialize CLM command:
self.CLM = clm.parameters()
# Skill - related variables
self.time_sampling = tk.DoubleVar()
self.index_s = tk.DoubleVar()
self.index_n = tk.DoubleVar()
self.release_file = tk.StringVar()
self.clm_idt = tk.IntVar()
self.out_file = tk.StringVar()
self.time_ini = tk.IntVar()
self.Fp = tk.IntVar()
self.skill_release_VALUES = ['Earliest model state','Earliest buoy location']
self.skill_release = tk.StringVar()
self.skill_release.set(self.skill_release_VALUES[0])
self.time_sampling.set(1) # hours
self.index_n.set(1) #
self.time_ini.set(0) # 0 - Model time; 1 - Buoy time
self.release_file.set('skill_ini.dat')
self.out_file.set('skill_out.nc')
self.clm_idt.set(600) # 0.5 hours
self.Fp.set(0)
tmp = matplotlib.font_manager.get_fontconfig_fonts()
if type(tmp) is dict:
flist = list(tmp)
else:
flist = tmp.copy()
del tmp
# Get rid of fonts not well defined:
nf = len(flist)
for i in range(nf-1,-1,-1):
fname = flist[i]
try:
ftype = matplotlib.font_manager.FontProperties(fname=fname).get_name()
except:
del flist[i]
FONT_TYPES = [matplotlib.font_manager.FontProperties(fname=fname).get_family() for fname in flist]
try:
self.FONT_TYPES = list(set(FONT_TYPES))
except:
self.FONT_TYPES = FONT_TYPES.copy()
self.FONT_TYPES.sort()
self.FONT_SIZES = list(range(1,25))
self.GET_TIMESTAMP_LOCATION = False
# Initialize matplotlib and Cartopy
self.params = None
self.fig = None
#print('main ', self.PLOT.SIZE)
#self.fig = plt.figure('COSMO-VIEW canvas', \
# figsize=self.PLOT.SIZE, \
# dpi=self.PLOT.DPI.get())
#
#self.fig.canvas.mpl_connect('close_event',self.on_closing_figure)
#self.fig.canvas.callbacks.connect('button_press_event',self.on_click)
#self.ax = self.fig.add_subplot(111)
self.drawmap = None
# Window design
self.CreateMenu()
gui_style = ttk.Style()
gui_style.configure('My.TFrame',background="green")
gui_style.configure('My.TLabel',background="green")
#F0 = ttk.Frame(self.master,style='My.TFrame')
#F0 = tk.Frame(self.master,bg="yellow")
#F0 = tk.Frame(self.master,bg="yellow")
#EG F0 dropped to facilitate the consola design
tk.Label(self.master,text='Time',bg=BGC).grid(row=0,column=0,padx=3)
self.lbox = ttk.Combobox(self.master,textvariable=self.L,width=5)
self.lbox.grid(row=0,column=1)
self.lbox.configure(state='disabled')
self.lbox.bind('<<ComboboxSelected>>',lambda e: self.lselection())
self.lbox.bind('<Return>',lambda e: self.lselection())
self.bprev = tk.Button(self.master,text='PREV',command=self.tprev,bg=BWC)
self.bprev.grid(row=0,column=2,padx=3,sticky='e')
tk.Entry(self.master,textvariable=self.PLOT.TLABEL, \
state='readonly',width=20,bg='white').grid(row=0,column=3, padx=3)
self.bnext = tk.Button(self.master,text='NEXT',command=self.tnext,bg=BWC)
self.bnext.grid(row=0,column=4,padx=3,stick='w')
tk.Label(self.master,bg=BGC).grid(row=0,column=5)
if len(self.DATE) <= 0:
self.bprev.configure(state='disabled')
self.lbox.configure(state='disabled')
self.bnext.configure(state='disabled')
else:
self.lbox['values'] = list(range(len(self.L_LIST)))
tk.Button(self.master,text='Draw',command=self.make_plot,bg=EBC) \
.grid(row=1,column=4,padx=3,pady=3,sticky='e')
tk.Button(self.master,text='Quit',command=self.close,bg=EBC) \
.grid(row=1,column=5,padx=3,pady=3,sticky='w')
tk.Label(self.master,text='COSMO project, July 2018',bg=BGC) \
.grid(row=2,column=4,columnspan=6,sticky='e')
#F0.grid(row=0, column=0,sticky='ew')
#EG Afegim una Consola
#EG self.cons is the widget referencing the toconsola()
if tconsola is not None:
if len(tconsola) > 0:
wiconsola = tk.Frame(self.master) # Expandimos la Consola
wiconsola.grid_rowconfigure(0, weight=1)
cscrollb = tk.Scrollbar(wiconsola)
cscrollb.grid(row=0,column=1,sticky='nswe')
myFont = tkfont.Font(family=self.PLOT.WINDOW_FONT_TYPE.get(), \
size=self.PLOT.WINDOW_FONT_SIZE.get())
self.cons = tk.Text(wiconsola,bg="black", fg="white", \
yscrollcommand=cscrollb.set)
self.cons.configure(font=myFont)
# tags to highligth different cathegories of messages by formating the the text
self.cons.tag_config("y", foreground="yellow", font="-weight bold")
self.cons.tag_config("o", foreground="orange", font="-weight bold")
self.cons.tag_config("r", foreground="red", font="-weight bold")
self.cons.grid(row=0,column=0,sticky='we')
cscrollb.config(command=self.cons.yview)
line = tconsola + '\n'+ versions + "\n"+ mess + self.PLOT.MESSAGE+ \
self.SAIDIN.FLD.MESSAGE+self.CLM.MESSAGE
self.cons.insert("end", line + "\n")
self.cons.see(tk.END)
wiconsola.grid(row=3, column=0, columnspan=6, pady=5, sticky='nsew')
#EG
self.CAPTURE_POINT = False
self.pxo = tk.DoubleVar()
self.pyo = tk.DoubleVar()
self.pzo = tk.DoubleVar()
# Initialize window widget IDs:
self.Window_cfile = None
self.Window_legendconfig = None
self.Window_mapconfig = None
self.Window_vectorconfig = None
self.Window_contourconfig = None
self.Window_lineconfig = None
self.Window_other = None
self.Window_saidin = None
self.Window_currents = None
self.Window_currents_sel = None
self.Window_opendap = None
self.Window_copernicus = None
self.Window_codar = None
self.Window_isobat = None
self.Window_float = None
self.Window_saidinconfig = None
self.Window_floatconfig = None
self.Window_clm = None
self.Window_dpi = None
self.Window_anim = None
self.Window_sframe = None
self.Window_ncdf = None
self.Window_vec = None
self.Window_logo = None
self.Window_files = None
self.Window_about = None
self.Window_widgetconfig = None
self.Window_marker = None
self.Window_markerconfig = None
self.Window_dotconfig = None
self.Window_editor = None
#EG SHAPE files
self.Window_shapefile = None
self.Window_shapeconfig = None
self.Window_geoconfig = None
self.Window_xysel = None
self.Window_markered = None
self.Window_gellipse = None
self.Window_cellipse = None
self.Window_ellipseconfig = None
self.Window_featureconfig = None
self.Window_patch = None
self.Window_patchconfig = None
self.Window_skill = None
self.Window_converter = None
self.Window_settime = None
self.Window_feature = None
self.legendtabs = None
self.Window_mapa = None
## CosmoDrawing EVENTS Handlers ##############
# =============================
def canvas_closing(self,event):
# =============================
''' Update PLOT.SIZE variable according to the window size'''
self.PLOT.SIZE = list(self.fig.get_size_inches())
# Destruir lo que queda en memoria
self.fig = None
# =============================
def canvas_resizing(self,event):
# =============================
''' Update PLOT.SIZE variable according to the window size'''
self.PLOT.SIZE = list(self.fig.get_size_inches())
# ===========================
def canvas_click(self,event):
# ===========================
if self.PLOT.LEGEND.GET_XY:
if event.inaxes is not None:
toconsola("Getting Legend coordinates",wid=self.cons)
trans = self.ax.transData.transform((event.xdata,event.ydata))
trans = self.ax.transAxes.inverted().transform(trans)
self.PLOT.LEGEND.BBx.set(np.round(trans[0],3))
self.PLOT.LEGEND.BBy.set(np.round(trans[1],3))
self.PLOT.LEGEND.GET_XY = False
return
if self.PLOT.ISOBAT_LEGEND.GET_XY:
if event.inaxes is not None:
toconsola("Getting Legend coordinates",wid=self.cons)
trans = self.ax.transData.transform((event.xdata,event.ydata))
trans = self.ax.transAxes.inverted().transform(trans)
self.PLOT.ISOBAT_LEGEND.BBx.set(np.round(trans[0],3))
self.PLOT.ISOBAT_LEGEND.BBy.set(np.round(trans[1],3))
self.PLOT.ISOBAT_LEGEND.GET_XY = False
return
if self.GET_TIMESTAMP_LOCATION:
toconsola("EG Click_event: self.GET_TIMESTAMP_LOCATION",wid=self.cons)
try:
self.time_stamp.remove()
except:
pass
self.GET_TIMESTAMP_LOCATION = False
xx = event.x/self.PLOT.DPI.get()/self.PLOT.SIZE[0]
yy = event.y/self.PLOT.DPI.get()/self.PLOT.SIZE[1]
self.PLOT.TIMESTAMP_X.set(np.round(xx,3))
self.PLOT.TIMESTAMP_Y.set(np.round(yy,3))
if self.PLOT.TIMESTAMP_SHOW.get():
font_family = self.PLOT.MAP_FONT_TYPE.get()
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.ax.annotate(self.DATE[self.L.get()],xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(),fontfamily=font_family, \
fontweight=font_weight,annotation_clip=False)
self.canvas.draw()
return
if self.nvec > 0:
ii = self.VEC_INDX.get()
if self.VEC[ii].PLOT.KEY_GETXY:
self.VEC[ii].PLOT.KEY_GETXY = False
xx = event.x/self.PLOT.DPI.get()/self.PLOT.SIZE[0]
yy = event.y/self.PLOT.DPI.get()/self.PLOT.SIZE[1]
self.VEC[ii].PLOT.KEY_X.set(np.round(xx,3))
self.VEC[ii].PLOT.KEY_Y.set(np.round(yy,3))
self.VEC[ii].PLOT.KEY_OBJ.X = xx
self.VEC[ii].PLOT.KEY_OBJ.Y = yy
self.canvas.draw()
return
if event.inaxes is not None:
#EG xo,yo = self.m(event.xdata,event.ydata,inverse=True)
p_ref = map_proj('PlateCarree')
p_local = map_proj(self.PLOT.MAP_PROJECTION.get())
latlon = p_ref['proj'].transform_point(event.xdata, event.ydata, \
p_local['proj'])
# Coordinates selected point:
xo = latlon[0]; yo = latlon[1]
toconsola("Selected Point : "+str(latlon[0])+" - "+str(latlon[1]),wid=self.cons)
#print('Current speed = ', self.CURRENTS.F(event.xdata,event.ydata))
#if not empty(self.SAIDIN.FILENAME.get()):
# print('SAIDIN SST = ', self.SAIDIN.FIELD.F(xo,yo))
self.CLM.xo.set(latlon[0])
self.CLM.yo.set(latlon[1])
if self.CAPTURE_POINT:
self.pxo.set(xo)
self.pyo.set(yo)
return
if self.nvec > 0:
ii = self.VEC_INDX.get()
dis = (xo-self.VEC[ii].U.xx)**2 + (yo-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
self.VEC[ii].jo.set(ind[0])
self.VEC[ii].io.set(ind[1])
print('Vector selected point: ', self.VEC[ii].io.get(), self.VEC[ii].jo.get())
if self.ncdf > 0:
ii = self.CDF_INDX.get()
dis = (xo-self.CDF[ii].FLD.xx)**2 + (yo-self.CDF[ii].FLD.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
self.CDF[ii].jo.set(ind[0])
self.CDF[ii].io.set(ind[1])
print('Contour selected point: ', self.CDF[ii].io.get(), self.CDF[ii].jo.get())
# ===========================
def on_xlims_change(self,event):
# ===========================
lims = self.ax.get_xlim()
self.PLOT.WEST.set(lims[0])
self.PLOT.EAST.set(lims[1])
self.drawmap = True
# ===========================
def on_ylims_change(self,event):
# ===========================
lims = self.ax.get_ylim()
self.PLOT.SOUTH.set(lims[0])
self.PLOT.NORTH.set(lims[1])
self.drawmap = True
# ====================
def CreateMenu (self):
# ====================
''' Create options menu'''
menubar = tk.Menu(self.master)
plotmenu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='File',menu=plotmenu)
plotmenu.add_command(label='Save figure',
command=self.figure_save)
plotmenu.add_command(label='Read figure',
command=self.figure_read)
plotmenu.add_separator()
plotmenu.add_command(label='Plot layers',
command=self.layers)
plotmenu.add_separator()
plotmenu.add_command(label='Save plot',
command=self.save)
plotmenu.add_command(label='Save plot as',
command=self.saveas)
plotmenu.add_separator()
plotmenu.add_command(label='Quit',
command=self.close)
insmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Import/Select',menu=insmenu)
insmenu.add_command(label='Vector field',command=self.get_vector)
insmenu.add_command(label='Satellite SST',command=self.get_saidin)
insmenu.add_command(label='Contour field',command=self.get_contour)
insmenu.add_command(label='Trajectory',command=self.get_lagrangian)
insmenu.add_command(label='Marker',command=self.get_marker)
#EG Shapefile and WMS server
insmenu.add_command(label='Shapefile',command=self.get_shapefile)
insmenu.add_command(label='Ellipse',command=self.get_ellipse)
insmenu.add_command(label='Feature',command=self.get_feature)
insmenu.add_command(label='WMS Service',state="disable",command=self.get_wms)
confmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Configure',menu=confmenu)
confmenu.add_command(label='Widgets',command=self.widget_config)
confmenu.add_command(label='Map',command=self.map_config)
confmenu.add_command(label='Legends',command=self.legend_config)
confmenu.add_command(label='Logo',command=self.logo_config)
confmenu.add_separator()
confmenu.add_command(label='Vector field',
command=self.currents_config)
confmenu.add_command(label='Satellite SST', \
command=self.saidin_config)
confmenu.add_command(label='Contour field',
command=self.contour_config)
confmenu.add_command(label='Trajectory',
command=self.lagrangian_config)
confmenu.add_command(label='Marker',
command=self.marker_config)
confmenu.add_command(label='Shape geometry',
command=self.shape_config)
confmenu.add_command(label='Variance ellipse',
command=self.ellipse_config)
confmenu.add_command(label='Patch',
command=self.patch_config)
confmenu.add_command(label='Feature',
command=self.feature_config)
confmenu.add_command(label='Time axis',
command=self.set_time)
confmenu.add_separator()
confmenu.add_command(label='Select configuration',
command=self.configuration_file)
toolmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Tools',menu=toolmenu)
toolmenu.add_command(label='Trajectory model evaluation',
command=self.skill_Liu)
toolmenu.add_command(label='Vector series',
command=self.vector_series)
toolmenu.add_command(label='Vector mean',
command=self.vector_mean)
toolmenu.add_command(label='Contour mean',
command=self.contour_mean)
toolmenu.add_command(label='Ellipse of variance',
command=self.calc_ellipse)
toolmenu.add_command(label='Contour variance',
command=self.contour_var)
toolmenu.add_command(label='Trajectory editor',
command=self.trajectory_editor)
toolmenu.add_command(label='Marker editor',
command=self.marker_editor)
toolmenu.add_command(label='Add patch',
command=self.get_patch)
toolmenu.add_command(label='Download Atlas of currents',
command=self.atlas)
toolmenu.add_command(label='Make animation',
command=self.make_anim)
toolmenu.add_command(label='Save frames',
command=self.save_frames)
toolmenu.add_command(label='COSMO Lagrangian Model (CLM)',
command=self.clm)
calcmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Calculators',menu=calcmenu)
calcmenu.add_command(label='Coordinate converter',
command=self.converter)
calcmenu.add_command(label='Distance estimation',
command=self.ruler)
helpmenu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='Help',menu=helpmenu)
helpmenu.add_command(label='About',command=self.about)
try:
self.master.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.master.tk.call(master, "config", "-menu", menubar)
# ============================
def about(self):
# ============================
'''Widget to print some help '''
def _close():
self.Window_about.destroy()
self.Window_about = None
if self.Window_about is None:
self.Window_about = tk.Toplevel(self.master)
self.Window_about.title('About')
self.Window_about.resizable(width=False,height=False)
self.Window_about.protocol('WM_DELETE_WINDOW',_close)
photoimage = ImageTk.PhotoImage(Image.open(self.PLOT.LOGO_FILE.get()).resize((200,100)))
panel1 = tk.Label(self.Window_about,image=photoimage)
panel1.grid(row=0,column=0,sticky='we')
# save the panel's image from 'garbage collection'
panel1.image = photoimage
_author = 'Authors: Quim Ballabrera (ICM/CSIC) \n Emilio Garcia (ICM/CSIC)'
_description = ' Ocean visualization tool for the COSMO and MED OSMOSIS projects\n V1.0 - Oct 2019 (COSMO project) \n V2.0 - July 2020 (COSMO project) \n V3.0 - April 2021 (MED OSMOSIS project)'
tk.Label(self.Window_about,text='COSMO-VIEW'). \
grid(row=1,column=0,sticky='ew')
tk.Label(self.Window_about,text='Version '+VERSION). \
grid(row=2,column=0,sticky='ew')
tk.Label(self.Window_about,text=_author) \
.grid(row=3,column=0,sticky='ew')
tk.Label(self.Window_about,text=_description). \
grid(row=4,column=0,sticky='ew')
tk.Button(self.Window_about,text='Close',command=_close). \
grid(row=5,column=0,sticky='ew')
else:
self.Window_about.lift()
# ============================
def get_vector(self):
# ============================
'''Widget to read files with currents (U,V) '''
self.VSOURCE = tk.StringVar()
self.VSOURCE.set('Operational')
def _close():
# ===========
self.Window_currents.destroy()
self.Window_currents = None
def _done():
# ==========
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
#EG Corrected exception when the user tries to plot before
#EG importing product
#try:
# self.read_UV(self.VEC[ii])
#except:
# toconsola("Press Import to select a product",tag="o", wid=self.cons)
# return
if self.VEC[ii].SOURCE == 'FILE':
self.VEC[ii].read(wid=self.cons)
#self.read_UV(self.VEC[ii])
_close()
self.make_plot()
if self.Window_vectorconfig is not None:
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
self.currents_config()
def _clear():
# ===========
if self.nvec == 0:
return
# When erasing, we must erase two kinds of informations, the
# information in the LAYER structure and the VECTOR information
# Attention, if erasing the SEQUENCE leader, we need to update the
# DATE and TIMES of the SEQUENCE
ii = self.VEC_INDX.get()
self.LAYERS.erase('VEC',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing data field '+str(ii),wid=self.cons)
del self.VEC[ii]
self.nvec -= 1
ii = self.nvec-1 if ii>= self.nvec else ii
self.VEC_INDX.set(ii)
_refill(ii)
if self.LAYERS.update:
toconsola('Updating TIME and DATE values of SEQUENCE',wid=self.cons)
LEADER_TYPE = self.LAYERS.TYPE[self.LAYERS.leader]
jj = self.LAYERS.TYPE_INDEX[self.LAYERS.leader]
if LEADER_TYPE == 'VEC':
self.DATE = self.VEC[jj].DATE.copy()
self.TIME = self.VEC[jj].TIME.copy()
elif LEADER_TYPE == 'FLD':
self.DATE = self.FLD[jj].DATE.copy()
self.TIME = self.DATE.toordinal()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.make_plot()
def _reget():
# ===========
self.VEC_INDX.set(_wsel.get())
ii = self.VEC_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.VEC_LIST = list(range(self.nvec))
_wsel.configure(state='!disabled')
_wsel['values'] = self.VEC_LIST
_went['textvariable'] = self.VEC[ii].UFILENAME
_went2['textvariable'] = self.VEC[ii].VFILENAME
_uvar.configure(state='!disabled')
_uvar['textvariable'] = self.VEC[ii].uname
_uvar['values'] = self.VEC[ii].U.icdf.VAR_MENU
_vvar.configure(state='!disabled')
_vvar['textvariable'] = self.VEC[ii].vname
_vvar['values'] = self.VEC[ii].V.icdf.VAR_MENU
_kbox.configure(state='!disabled')
_kbox['textvariable'] = self.VEC[ii].K
_kbox['values'] = self.VEC[ii].K_LIST
_lbox.configure(state='!disabled')
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_aent.configure(state='!disabled')
_aent['textvariable'] = self.VEC[ii].ALIAS
if self.VEC[ii].U.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
if self.VEC[ii].U.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
else:
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
_show['variable'] = self.VEC[ii].show
#_wsav.configure(state='normal')
else:
self.VEC = []
self.VEC_LIST = [None]
self.VEC_INDX = tk.IntVar()
self.VEC_INDX.set(0)
_wsel.configure(state='disabled')
_uvar.configure(state='disabled')
_vvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_wsel['values'] = self.VEC_LIST
_went['textvariable'] = ''
_uvar['textvariable'] = ''
_uvar['values'] = ['']
_uvar.configure(state='disabled')
_vvar['textvariable'] = ''
_vvar['values'] = ['']
_vvar.configure(state='disabled')
_kbox['textvariable'] = ''
_kbox['values'] = ['']
_zbox['text'] = '--'
_lbox['text'] = ''
_lbox['values'] = ['']
_lbox['textvariable'] = ''
_lbox['values'] = ['']
_dbox['text'] = ['--']
_wsav.configure(state='disabled')
def _add(SOURCE):
# ===============
# Initialize VECTOR instance:
VEC = VECTOR()
def _cancel():
# ============
self.Window_currents_sel.destroy()
self.Window_currents_sel = None
def _done():
# ==========
global _uvar,_vvar
if empty(VEC.uname.get()):
VEC.U.varid = None
else:
VEC.U.varid = VEC.U.icdf.vname.index(VEC.uname.get())
if empty(VEC.vname.get()):
VEC.V.varid = None
else:
VEC.V.varid = VEC.V.icdf.vname.index(VEC.vname.get())
if VEC.U.varid is None or VEC.V.varid is None:
messagebox.showinfo(parent=self.Window_currents_sel,message='Select velocity components')
return
toconsola('2D-grid axes : '+'%s'%VEC.U.icdf.grid2d,wid=self.cons)
# Seems a suitable location for those statements:
VEC.U.varname = VEC.uname.get()
#VEC.U.varid = VEC.U.icdf.vname.index(VEC.U.varname)
VEC.U.ndims = VEC.U.icdf.ndims[VEC.U.varid]
VEC.U.get_info(wid=self.cons)
VEC.U.get_grid()
VEC.V.varname = VEC.vname.get()
#VEC.V.varid = VEC.V.icdf.vname.index(VEC.V.varname)
VEC.V.ndims = VEC.V.icdf.ndims[VEC.V.varid]
VEC.V.get_info(wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
VEC.V.icdf.VAR_MENU = VEC.U.icdf.VAR_MENU[:]
else:
VEC.V.get_grid()
if VEC.grid_type.get() == 'C':
VEC.U.icdf.grid2d = True
VEC.V.icdf.grid2d = True
# X-center
xmu0 = 0.5*(VEC.U.xx[:,:-1]+VEC.U.xx[:,1:])
xmv0 = VEC.V.xx[:,1:-1]
ymu0 = 0.5*(VEC.U.yy[:,:-1]+VEC.U.yy[:,1:])
ymv0 = VEC.V.yy[:,1:-1]
# Y-center
VEC.V.xx = 0.5*(xmv0[:-1,:]+xmv0[1:,:])
VEC.U.xx = xmu0[1:-1,:]
VEC.V.yy = 0.5*(ymv0[:-1,:]+ymv0[1:,:])
VEC.U.yy = ymu0[1:-1,:]
toconsola('Regridding field. Updating array shapes',wid=self.cons)
aa = VEC.U.xx.shape
VEC.U.icdf.nx = aa[1]
VEC.U.icdf.ny = aa[0]
VEC.V.icdf.nx = aa[1]
VEC.V.icdf.ny = aa[0]
#self.read_lonlat(VEC,VEC.icdf.xname,VEC.icdf.yname)
VEC.K_LIST = list(range(VEC.U.icdf.nz))
VEC.L_LIST = list(range(VEC.U.icdf.nt))
#VEC.K.set(0)
VEC.Z_LIST = VEC.U.get_zlist()
#VEC.L.set(0)
VEC.T_LIST, VEC.DATE, VEC.TIME = VEC.U.get_tlist()
#self.DepthandDate(VEC)
VEC.show.set(True)
# Adding a VECTOR in the Drawing class
#
nt = VEC.U.icdf.nt
self.LAYERS.add(TYPE='VEC',Filename=VEC.UFILENAME.get(),N=nt,wid=self.cons)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
n = self.LAYERS.n
#self.nfiles += 1
#self.FILENAMES.append(VEC.UFILENAME.get())
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False)) # By default, no attached
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(VEC.U.icdf.nt)
ii = self.VEC_INDX.get() # Points to the new VECTOR
if self.first:
if self.drawmap is None:
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
self.PLOT.WEST.set(self.VEC[ii].U.xmin)
self.PLOT.EAST.set(self.VEC[ii].U.xmax)
self.PLOT.SOUTH.set(self.VEC[ii].U.ymin)
self.PLOT.NORTH.set(self.VEC[ii].U.ymax)
else:
self.PLOT.WEST.set(max(self.VEC[ii].U.xmin,self.VEC[ii].V.xmin))
self.PLOT.EAST.set(min(self.VEC[ii].U.xmax,self.VEC[ii].V.xmax))
self.PLOT.SOUTH.set(max(self.VEC[ii].U.ymin,self.VEC[ii].V.ymin))
self.PLOT.NORTH.set(min(self.VEC[ii].U.ymax,self.VEC[ii].V.ymax))
self.plot_initialize()
#try:
# self.PLOT.XLABEL.set(self.VEC[ii].U.icdf.xname)
#except:
# self.PLOT.XLABEL.set('Longitude')
#try:
# self.PLOT.YLABEL.set(self.VEC[ii].U.icdf.yname)
#except:
# self.PLOT.YLABEL.set('Latitude')
self.DATE = self.VEC[ii].DATE.copy()
self.TIME = self.VEC[ii].TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
# if nt == 1:
# #if self.SEQNTIMES[ii] == 1:
# self.lbox.configure(state='disabled')
# else:
# self.lbox.configure(state='!disabled')
# self.lbox['values'] = self.L_LIST
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
#
# self.PLOT.TLABEL.set(self.VEC[ii].DATE[self.L.get()])
# if len(self.DATE) > 1:
# self.bnext.configure(state='normal')
#
# # CAROUSEL MANAGEMENT - VECTOR
# #if self.SEQNTIMES[-1] > 1:
# n = self.LAYERS.n
# if nt > 1:
# toconsola('Vector initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n].set(True)
# self.LAYERS.SEQUENCER[n].set(True)
# self.LAYERS.leader = n
#
# #self.nsequence = 1
# #self.SEQUENCES[-1].set(True)
# #self.SEQLEADER[-1].set(True) # Is the first field
# #self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# self.L.set(self.VEC[ii].L.get())
# self.L_LIST = list(range(nt))
# self.NL = nt
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# Is this field member of the SEQUENCE?
# Is this field a member of the SEQUENCE?
if nt > 1:
if self.NL == 0:
toconsola('Vector initiates Time axis',wid=self.cons)
self.VEC[ii].LINK.set(True)
self.TIME = self.VEC[ii].TIME.copy()
self.DATE = self.VEC[ii].DATE.copy()
self.NL = nt
self.L.set(self.VEC[ii].L.get())
self.L_LIST = list(range(nt))
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
elif self.NL == nt:
toconsola('Linking Vector to Time axis',wid=self.cons)
self.VEC[ii].LINK.set(True)
self.VEC[ii].L.set(self.L.get()) #Synchronize records
# if self.LAYERS.nsequence == 0:
# toconsola('Vector initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(True)
# self.LAYERS.leader = n-1
# self.LAYERS.seqlen = nt
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(True)
## self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.VEC[ii].DATE.copy()
# self.TIME = self.VEC[ii].TIME.copy()
# self.L.set(self.VEC[ii].L.get())
# self.L_LIST = list(range(nt))
# self.NL = nt
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# if nt == self.LAYERS.seqlen:
# toconsola('Adding vector to SEQUENCE list',wid=self.cons)
# self.VEC[ii].LINK.set(True)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
## self.nsequence += 1
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(False)
# self.VEC[ii].L.set(self.L.get()) #Synchronize records
_refill(ii)
self.Window_currents_sel.destroy()
self.Window_currents_sel = None
self.LAYERS.print()
def _arakawa():
# =============
toconsola('Selected Arakawa '+VEC.grid_type.get()+' grid ',wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
else:
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
def _vselect():
# =============
global Vsel
nn = filedialog.askopenfilename(parent=self.Window_currents, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
VEC.two_files = 1
VEC.VFILENAME.set(filename)
VEC.V.nc = Dataset(filename)
VEC.V.icdf = tools.geocdf(filename, wid=self.cons)
toconsola('Opening meridional velocity file '+VEC.VFILENAME.get(),wid=self.cons)
FV = ttk.Frame(FVmain,padding=5,borderwidth=5)
vaxesid = tools.WinGeoaxes(VEC.V.icdf,VEC.V.nc,FV)
FV.grid(row=1,column=0,columnspan=5)
Vsel['values'] = VEC.V.icdf.VAR_MENU
Vsel.bind('<<ComboboxSelected>>',lambda e: vaxesid.selected_var(VEC.V.icdf,VEC.V.nc,Vsel))
# Main part of the function ...
#names = ['Operational','CODAR','COPERNICUS','Local']
ISOURCE = self.CURRENT_OPTIONS.index(SOURCE)
if ISOURCE == 0:
filename = self.get_opendap_filename()
elif ISOURCE == 1:
filename = self.get_codar_filename()
elif ISOURCE == 2:
filename = self.get_copernicus_filename()
elif ISOURCE == 3:
nn = filedialog.askopenfilename(parent=self.Window_currents, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
elif ISOURCE == 4:
aa = get_remote()
filename = aa.filename()
filename = 'https://cosmo.icm.csic.es/MEDSEA_100.nc'
filename = filename.decode('utf-8')
print('filename: ',filename)
else:
if self.ncdf <= 0:
messagebox.showinfo(message='No Contour file opened yet')
return
else:
jj = self.CDF_INDX.get()
filename = self.CDF[jj].FILENAME.get()
if empty(filename):
return
# Not empty filename:
'''Update to account for multiple Arakawa grids. We begin by duplicatinge
the velocity object and use the first one for the U information
and the second one for the V information. Once the grid information
has been filled, we merge the V-information of the second object
into the first one '''
#VEC = cdf_parameters()
VEC.UFILENAME.set(filename)
VEC.VFILENAME.set(filename)
#VEC.VEL = vel_parameters()
#toconsola(VEC.VEL.MESSAGE,wid=self.cons)
VEC.U.nc = Dataset(filename)
VEC.U.icdf = tools.geocdf(filename, wid=self.cons)
VEC.V.nc = Dataset(filename)
VEC.V.icdf = tools.geocdf(filename, wid=self.cons)
# Object to capture the information about the V-field
#VEC2 = cdf_parameters()
#VEC2.FILENAME.set(filename)
#VEC2.VEL = vel_parameters()
toconsola(VEC.MESSAGE,wid=self.cons)
#VEC2.ncid = Dataset(filename)
#VEC2.icdf = tools.geocdf(filename, wid=self.cons)
# self.read_lonlat(VEC,VEC.icdf.xname,VEC.icdf.yname)
# self.DepthandDate(VEC)
# VEC.VEL.show.set(True)
if self.Window_currents_sel is None:
self.Window_currents_sel = tk.Toplevel(self.master)
self.Window_currents_sel.title('SELECT VELOCITY COMPONENTS')
self.Window_currents_sel.protocol('WM_DELETE_WINDOW',self.Window_currents_sel.destroy)
#else:
# self.Window_currents_sel.lift()
# return
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
'''Now, we launch two WinGeoaxes widgets to capturate the two
components of the field'''
FAgrid = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
ttk.Label(FAgrid,text='Grid type', \
font=font_bold).grid(row=0,column=0,sticky='w')
gtype = ttk.Combobox(FAgrid,textvariable=VEC.grid_type, \
values=VEC.grid_type_list, \
width=5)
gtype.grid(row=0,column=1,columnspan=1,sticky='w')
gtype.bind('<<ComboboxSelected>>',lambda e: _arakawa())
FAgrid.grid(row=0,column=0,columnspan=5)
# -------------------------------------------------------
FUmain = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
FU = ttk.Frame(FUmain,padding=5,borderwidth=5)
uaxesid = tools.WinGeoaxes(VEC.U.icdf,VEC.U.nc,FU)
FU.grid(row=0,column=0,columnspan=5)
ttk.Label(FUmain,text='Select U', \
borderwidth=3, \
font=font_bold).grid(row=1,column=2)
Usel = ttk.Combobox(FUmain,textvariable=VEC.uname, \
values=VEC.U.icdf.VAR_MENU, \
width=20)
Usel.bind('<<ComboboxSelected>>',lambda e: uaxesid.selected_var(VEC.U.icdf,VEC.U.nc,Usel))
Usel.grid(row=1,column=3,columnspan=2)
FUmain.grid()
# -------------------------------------------------------
global Vsel
FVmain = ttk.Frame(self.Window_currents_sel,padding=5,borderwidth=5)
vselect = ttk.Button(FVmain,text='Open meridional velocity file',command=_vselect)
vselect.grid(row=0,column=0,columnspan=2)
FV = ttk.Frame(FVmain,padding=5,borderwidth=5)
vaxesid = tools.WinGeoaxes(VEC.V.icdf,VEC.V.nc,FV)
FV.grid(row=1,column=0,columnspan=5)
ttk.Label(FVmain,text='Select V', \
borderwidth=3, \
font=font_bold).grid(row=2,column=2)
Vsel = ttk.Combobox(FVmain,textvariable=VEC.vname, \
values=VEC.V.icdf.VAR_MENU, \
width=20)
Vsel.bind('<<ComboboxSelected>>',lambda e: vaxesid.selected_var(VEC.V.icdf,VEC.V.nc,Vsel))
Vsel.grid(row=2,column=3,columnspan=2)
FVmain.grid()
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
vselect['state'] = 'normal'
vaxesid.Ibox['state'] = 'normal'
vaxesid.Jbox['state'] = 'normal'
vaxesid.Kbox['state'] = 'normal'
vaxesid.Lbox['state'] = 'normal'
vaxesid.Xbox['state'] = 'normal'
vaxesid.Ybox['state'] = 'normal'
vaxesid.Zbox['state'] = 'normal'
vaxesid.Tbox['state'] = 'normal'
vaxesid.wgr['state'] = 'normal'
F1 = ttk.Frame(self.Window_currents_sel,padding=5)
cancel = ttk.Button(F1,text='Cancel',command=_cancel)
cancel.grid(row=0,column=3,sticky='e',padx=10)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F1,text='Done',command=_done)
done.grid(row=0,column=4,sticky='e',padx=10)
done.bind("<Return>",lambda e:_done())
F1.grid(sticky='we')
self.Window_currents_sel.wait_window(self.Window_currents_sel)
def _lselection():
# ================
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
def _kselection():
# ================
_zbox['text'] = self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
def _uselection():
# ================
ii = self.VEC_INDX.get()
try:
self.VEC[ii].U.varname = self.VEC[ii].uname.get()
self.VEC[ii].U.varid = self.VEC[ii].U.icdf.vname.index( \
self.VEC[ii].uname.get())
except:
self.VEC[ii].U.varname = None
self.VEC[ii].U.varid = -1
def _vselection():
# ================
ii = self.VEC_INDX.get()
try:
self.VEC[ii].V.varname = self.VEC[ii].vname.get()
self.VEC[ii].V.varid = self.VEC[ii].V.icdf.vname.index( \
self.VEC[ii].vname.get())
except:
self.VEC[ii].V.varname = None
self.VEC[ii].V.varid = -1
def _save():
# ================
ii = self.VEC_INDX.get()
print('Saving ',ii)
self.VEC[ii].save()
# Main Window
# ============
if self.Window_currents is None:
self.Window_currents = tk.Toplevel(self.master)
self.Window_currents.title("Currents selector")
self.Window_currents.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_currents.lift()
if self.nvec > 0:
ii = self.VEC_INDX.get()
else:
ii = -1
global _uvar,_vvar
self.Window_currents_sel = None
F0 = ttk.Frame(self.Window_currents,padding=5)
# Add
ttk.Button(F0,text='Import',command=lambda:_add(self.VSOURCE.get())).grid(row=1,column=0,padx=3)
_source = ttk.Combobox(F0,textvariable=self.VSOURCE, \
values=self.CURRENT_OPTIONS)
_source.grid(row=0,column=0,padx=3)
#_source.bind('<<ComboboxSelected>>', \
# lambda e: _add(self.VSOURCE.get()))
# Filename:
ttk.Label(F0,text='Netcdf file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.VEC_INDX, \
values=self.VEC_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
_went2 = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went2.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
# Velocity components:
ttk.Label(F0,text='Zonal').grid(row=2,column=1,padx=3,pady=3)
_uvar = ttk.Combobox(F0,width=15)
_uvar.grid(row=2,column=2,columnspan=2,sticky='w')
_uvar.bind('<<ComboboxSelected>>',lambda e: _uselection())
ttk.Label(F0,text='Meridional').grid(row=2,column=4,padx=3,pady=3)
_vvar = ttk.Combobox(F0,width=15)
_vvar.grid(row=2,column=5,columnspan=2,sticky='w')
_vvar.bind('<<ComboboxSelected>>',lambda e: _vselection())
# Depth:
ttk.Label(F0,text='Depth').grid(row=3,column=1,padx=3,pady=3)
_kbox = ttk.Combobox(F0,values=['0'],width=5)
_kbox.grid(row=3,column=2)
_kbox.bind('<<ComboboxSelected>>',lambda e: _kselection())
_zbox = ttk.Label(F0,width=20)
_zbox.grid(row=3,column=3,columnspan=2,sticky='w')
# Time:
ttk.Label(F0,text='Time').grid(row=4,column=1,padx=3,pady=3)
_lbox = ttk.Combobox(F0,width=5)
_lbox.grid(row=4,column=2)
_lbox.bind('<<ComboboxSelected>>',lambda e: _lselection())
_dbox = ttk.Label(F0,width=20)
_dbox.grid(row=4,column=3,columnspan=2,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=5,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=5,column=2,columnspan=2,sticky='w')
if ii == -1:
_wsel.configure(state='disabled')
_uvar.configure(state='disabled')
_vvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_aent.configure(state='disabled')
else:
_went['textvariable'] = self.VEC[ii].UFILENAME
_went2['textvariable'] = self.VEC[ii].VFILENAME
_uvar['textvariable'] = self.VEC[ii].uname
_vvar['textvariable'] = self.VEC[ii].vname
_uvar['values'] = self.VEC[ii].U.icdf.VAR_MENU
_vvar['values'] = self.VEC[ii].V.icdf.VAR_MENU
_kbox['textvariable'] = self.VEC[ii].K
_kbox['values'] = self.VEC[ii].K_LIST
_aent['textvariable'] = self.VEC[ii].ALIAS
if self.VEC[ii].U.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.VEC[ii].Z_LIST[self.VEC[ii].K.get()]
if self.VEC[ii].U.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
try:
nodate = empty(self.VEC[ii].DATE[0])
except:
nodate = False
if nodate:
_dbox['text']='--'
else:
_dbox['text'] = xelf.CDF[ii].DATE[0]
else:
_lbox['textvariable'] = self.VEC[ii].L
_lbox['values'] = self.VEC[ii].L_LIST
_dbox['text'] = self.VEC[ii].DATE[self.VEC[ii].L.get()]
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_currents,padding=5)
_wsav = ttk.Button(F1,text='Save data',command=_save)
_wsav.grid(row=1,column=0,padx=3,sticky='w')
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_wsav.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show')
_show['variable']=self.VEC[ii].show
_show.configure(command=self.make_plot)
_wsav.configure(state='normal')
_show.grid(row=1,column=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_done).grid(row=1,column=8,padx=3)
ttk.Label(F1,text=' ',width=8).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(F1,text=' ',width=8).grid(row=1,column=2,padx=3,sticky='w')
F1.grid(row=1,column=0)
# =============================
def get_opendap_filename(self):
# =============================
def _close():
self.Window_opendap.destroy()
self.Window_opendap = None
if self.Window_opendap is None:
self.Window_opendap = tk.Toplevel(self.master)
self.Window_opendap.title('Load Operational service Opendap file')
self.Window_opendap.protocol('WM_DELETE_WINDOW',_close)
a = providers.WinOpendap(self.Window_opendap)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_opendap.wait_window()
self.Window_opendap = None
filename = a.get_filename()
return filename
else:
self.Window_opendap.lift()
# =============================
def get_codar_filename(self):
# =============================
def _close():
self.Window_codar.destroy()
self.Window_codar = None
if self.Window_codar is None:
self.Window_codar = tk.Toplevel(self.master)
self.Window_codar.title('HF Radar station selector')
self.Window_codar.protocol('WM_DELETE_WINDOW',_close)
a = codar.WinCodar(self.Window_codar)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_codar.wait_window()
self.Window_codar = None
filename = a.get_filename()
return filename
else:
self.Window_codar.lift()
# ================================
def get_copernicus_filename(self):
# ================================
def _close():
self.Window_copernicus.destroy()
self.Window_copernicus = None
if self.Window_copernicus is None:
self.Window_copernicus = tk.Toplevel(self.master)
self.Window_copernicus.title('COPERNICUS file selector')
self.Window_copernicus.configure(background='#87CEEB')
self.Window_copernicus.protocol('WM_DELETE_WINDOW',_close)
a = copernicus.WinTracking(self.Window_copernicus)
toconsola(a.MESSAGE,wid=self.cons)
self.Window_copernicus.wait_window()
self.Window_copernicus = None
filename = a.out()
return filename
else:
self.Window_copernicus.lift()
# ==================
def layers(self):
# ==================
'''Display the files being opened'''
def _close():
# ===========
self.Window_files.destroy()
self.Window_files = None
def _tosequence():
# ================
if TYPE == 'FLT':
self.FLOAT[ii].MAPX = []
self.FLOAT[ii].MAPY = []
if self.FLOAT[ii].nfloats > 1:
for i in range(self.FLOAT[ii].nfloats):
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon[:,i],
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX.append(f(self.TIME))
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat[:,i],
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY.append(f(self.TIME))
# Transpose FLT.MAPX and FLT.MAPY:
self.FLOAT[ii].MAPX = np.array(self.FLOAT[ii].MAPX).T.tolist()
self.FLOAT[ii].MAPY = np.array(self.FLOAT[ii].MAPY).T.tolist()
else:
self.FLOAT[ii].Fx = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX = self.FLOAT[ii].Fx(self.TIME)
self.FLOAT[ii].Fy = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY = self.FLOAT[ii].Fy(self.TIME)
#Main Window
# =========
if self.LAYERS.n == 0:
toconsola('No layers added yet',wid=self.cons)
return
if self.Window_files is None:
self.Window_files = tk.Toplevel(self.master,width=80)
self.Window_files.title('Plot layers')
self.Window_files.resizable(width=True,height=False)
self.Window_files.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_files.lift()
return
Fh = ttk.Frame(self.Window_files,borderwidth=5,padding=5)
txt1 = 'Number layers: %d' % self.LAYERS.n
ttk.Label(Fh,text=txt1).grid(row=0,column=0,padx=3,sticky='w')
Fh.grid(sticky='w')
F0 = ttk.Frame(self.Window_files,width=80,borderwidth=5,padding=5)
ttk.Label(F0,text='SHOW').grid(row=0,column=0,padx=3,sticky='we')
ttk.Label(F0,text='TYPE').grid(row=0,column=1,padx=3,sticky='we')
ttk.Label(F0,text='SOURCE',width=10).grid(row=0,column=2,padx=3,sticky='we')
ttk.Label(F0,text='ZORDER').grid(row=0,column=3,padx=3,sticky='we')
ttk.Label(F0,text='ALPHA').grid(row=0,column=4,padx=3,sticky='we')
ttk.Label(F0,text='TIME LINK').grid(row=0,column=5,padx=3,sticky='we')
#ttk.Label(F0,text='SEQUENCE').grid(row=0,column=5,padx=3,sticky='we')
#ttk.Label(F0,text='SEQ LEADER').grid(row=0,column=6,padx=3,sticky='we')
ttk.Label(F0,text='ALIAS',width=12).grid(row=0,column=7,padx=3,sticky='we')
ttk.Label(F0,text='FILENAME').grid(row=0,column=8,sticky='we')
nvec = -1
nfld = -1
nflo = -1
i = 0
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
noseq = False
if TYPE == 'VEC':
ttk.Checkbutton(F0,variable=self.VEC[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.VEC[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.VEC[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.VEC[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
# Link
cc = ttk.Checkbutton(F0,variable=self.VEC[ii].LINK)
cc.grid(row=i+1,column=5,padx=3)
if self.VEC[ii].U.icdf.nt != self.NL:
cc.configure(state='disabled')
# Alias
ttk.Label(F0,text=self.VEC[ii].ALIAS.get(),justify='left',
width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'FLD':
ttk.Checkbutton(F0,variable=self.CDF[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.CDF[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.CDF[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.CDF[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
# Link
cc = ttk.Checkbutton(F0,variable=self.CDF[ii].LINK)
cc.grid(row=i+1,column=5,padx=3)
if self.CDF[ii].FLD.icdf.nt != self.NL:
cc.configure(state='disabled')
ttk.Label(F0,text=self.CDF[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'FLOAT':
ttk.Checkbutton(F0,variable=self.FLOAT[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.FLOAT[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.FLOAT[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.FLOAT[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.FLOAT[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
if TYPE == 'SAIDIN':
ttk.Checkbutton(F0,variable=self.SAIDIN.show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.SAIDIN.SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.SAIDIN.PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.SAIDIN.PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.SAIDIN.ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
# Show, Zorder
if TYPE == 'MARKER':
ttk.Checkbutton(F0,variable=self.MARKER[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.MARKER[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.MARKER[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.MARKER[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.MARKER[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'SHAPE':
ttk.Checkbutton(F0,variable=self.SHAPE[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.SHAPE[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.SHAPE[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.SHAPE[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.SHAPE[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'ELLIPSE':
ttk.Checkbutton(F0,variable=self.ELLIPSE[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.ELLIPSE[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.ELLIPSE[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.ELLIPSE[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.ELLIPSE[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'FEATURE':
ttk.Checkbutton(F0,variable=self.FEATURE.DATA[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.FEATURE.DATA[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.FEATURE.DATA[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.FEATURE.DATA[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.FEATURE.DATA[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
if TYPE == 'PATCH':
ttk.Checkbutton(F0,variable=self.PATCH[ii].show,\
command=self.make_plot). \
grid(row=i+1,column=0,padx=3)
ttk.Label(F0,text=self.PATCH[ii].SOURCE,justify='left',width=10).grid(row=i+1,column=2,padx=3)
zz = ttk.Entry(F0,textvariable=self.PATCH[ii].PLOT.ZORDER,width=3)
zz.grid(row=i+1,column=3,padx=3)
zz.bind("<Return>",lambda f: self.make_plot())
aa = ttk.Entry(F0,textvariable=self.PATCH[ii].PLOT.ALPHA,width=3)
aa.grid(row=i+1,column=4,padx=3)
aa.bind("<Return>",lambda f: self.make_plot())
ttk.Label(F0,text=self.PATCH[ii].ALIAS.get(),justify='left',width=12).grid(row=i+1,column=7,padx=3)
noseq = True
# Type
ttk.Label(F0,text=TYPE, \
width=7,justify='left').grid(row=i+1, \
column=1, \
columnspan=1,padx=3,sticky='we')
# # Sequence
# cc = ttk.Checkbutton(F0,variable=self.LAYERS.INSEQUENCE[i],command=_tosequence)
# cc.grid(row=i+1,column=5,padx=3)
# if self.LAYERS.NREC[ii] != self.LAYERS.NREC[self.LAYERS.leader]:
# cc.configure(state='disabled')
# # Sequence leader
# bb = ttk.Checkbutton(F0,variable=self.LAYERS.SEQUENCER[i])
# bb.grid(row=i+1,column=6,padx=3)
#
# if self.LAYERS.NREC[i] <= 1 or noseq:
# cc.configure(state='disabled')
# bb.configure(state='disabled')
# Filename
try:
base = os.path.basename(self.LAYERS.FILENAME[i])
except:
base = '-'
ttk.Label(F0,text=base, \
width=60,justify='left').grid(row=i+1, \
column=8, \
columnspan=2,padx=3)
F0.grid()
for i in range(self.LAYERS.n):
toconsola('%s as %s' % (self.LAYERS.FILENAME[i],self.LAYERS.TYPE[i]),wid=self.cons)
# ===========================
def configuration_file(self):
# ===========================
''' Launch the Configuration file script '''
new_conf = tk.StringVar()
new_conf.set(COSMO_CONF_NAME)
# -----------
def _done():
# -----------
'''Close the widget'''
if exists(self.PLOT.FILECONF):
toconsola('Reading configuration file '+self.PLOT.FILECONF,wid=self.cons)
try:
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
except:
toconsola('Error reading, using default parameters',wid=self.cons)
conf = self.PLOT.conf_get()
self.PLOT.conf_save(conf,self.PLOT.FILECONF)
else:
toconsola('Saving configuration file ...',wid=self.cons)
conf = self.PLOT.conf_get()
self.PLOT.conf_save(conf,self.PLOT.FILECONF)
conf = {}
conf['COSMO_CONF_NAME']=COSMO_CONF_NAME
with io.open(COSMO_CONF_DATA,'w',encoding='utf8') as outfile:
_str = json.dumps(conf,ensure_ascii=False,
sort_keys=False,
indent=2,
separators=(',',': '))
outfile.write(to_unicode(_str)+'\n')
outfile.close()
self.Window_cfile.destroy()
self.Window_cfile = None
# -----------
def _cancel():
# -----------
'''Recover backup value and close the widget'''
with open(COSMO_CONF_DATA) as infile:
conf = json.load(infile)
COSMO_CONF_NAME = conf['COSMO_CONF_NAME']
COSMO_CONF = COSMO_CONF_PATH + COSMO_CONF_NAME + os.sep
self.Window_cfile.destroy()
self.Window_cfile = None
def _select():
# ============
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME
nn = tk.filedialog.askdirectory(parent=self.Window_cfile,
initialdir=COSMO_CONF_PATH)
if len(nn) == 0:
return
if os.path.isdir(nn):
# Check that the user has not just selected the folde, but also opened it
#
if nn == COSMO_CONF_PATH or nn+os.sep == COSMO_CONF_PATH:
toconsola('Configuration name must be "Opened" in dialog, not just selected ;-)',wid=self.cons)
toconsola('The configuration has not changed',wid=self.cons)
return
toconsola('Configuration folder exists',wid=self.cons)
COSMO_CONF_NAME = '%s' % os.path.basename(os.path.normpath(nn))
COSMO_CONF = nn + os.sep
# else:
# toconsola('New Configuration folder',wid=self.cons)
# os.makedirs(nn)
# COSMO_CONF_NAME = '%s' % os.path.basename(os.path.normpath(nn))
# COSMO_CONF = nn + os.sep
new_conf.set(COSMO_CONF_NAME)
message ='COSMO_CONF_PATH = '+COSMO_CONF_PATH+"\n"+ \
'COSMO_CONF_NAME = '+COSMO_CONF_NAME+"\n"+ \
'COSMO_CONF = '+COSMO_CONF
toconsola(message,wid=self.cons)
self.PLOT.FILECONF = COSMO_CONF + 'drawing.conf'
toconsola('self.PLOT.FILECONF = '+self.PLOT.FILECONF,wid=self.cons)
def _create(event=None):
# ======================
global COSMO_CONF,COSMO_CONF_PATH,COSMO_CONF_NAME
if empty(new_conf.get()):
toconsola('Empty configuration name',wid=self.cons)
new_conf.set(COSMO_CONF_NAME)
return
COSMO_CONF_NAME = '%s' % new_conf.get()
COSMO_CONF = COSMO_CONF_PATH+COSMO_CONF_NAME+os.sep
if os.path.isdir(COSMO_CONF):
toconsola('Configuration ' + COSMO_CONF + ' already exists',wid=self.cons)
toconsola('Overwriting it !',wid=self.cons)
else:
toconsola('Writing in configuration folder '+COSMO_CONF,wid=self.cons)
os.makedirs(COSMO_CONF)
self.PLOT.FILECONF = COSMO_CONF + 'drawing.conf'
# Main window
# -----------
if self.Window_cfile is not None:
self.Window_cfile.lift()
return
self.Window_cfile = tk.Toplevel(self.master)
self.Window_cfile.title('Configuration file')
self.Window_cfile.resizable(width=False,height=False)
self.Window_cfile.protocol('WM_DELETE_WINDOW',_cancel)
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
F0 = ttk.Frame(self.Window_cfile,borderwidth=5,padding=5)
ttk.Label(F0,text='Configuration PATH: ',
font=font_bold).grid(row=0,column=0)
ttk.Label(F0,text=COSMO_CONF_PATH,width=40,
justify='left').grid(row=0,column=1,columnspan=4)
ttk.Label(F0,text='Configuration name: ',
font=font_bold).grid(row=1,column=0)
bb = ttk.Label(F0,textvariable=new_conf,width=40,
justify='left')
bb.grid(row=1,column=1,columnspan=4)
ttk.Label(F0,text='Load configuratione',
font=font_bold).grid(row=2,column=0)
ttk.Button(F0,text='Select',command=_select).grid(row=2,column=1,padx=3)
ttk.Label(F0,text='New configuration',
font=font_bold).grid(row=3,column=0)
aa = ttk.Entry(F0,textvariable=new_conf,width=30)
aa.grid(row=3,column=1,columnspan=3)
bb = ttk.Button(F0,text='Create',command=_create)
bb.grid(row=3,column=4,padx=3)
bb.bind("<Return>",lambda f: _create())
#bb.bind("<Return>",_create())
# AAAAAA
cancel = ttk.Button(F0,text='Cancel',command=_cancel)
cancel.grid(row=4,column=0,padx=3)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F0,text='Done',command=_done)
done.grid(row=4,column=1,padx=3)
done.bind("<Return>",lambda e:_done())
F0.grid()
# ==================================
def figure_save(self):
# ==================================
''' Saving Drawing configuration using json'''
toconsola('Saving figure ...',wid=self.cons)
CONF = []
# Add the main PLOT class:
#
conf = self.PLOT.conf_get()
CONF.append(conf)
# Add the FILES (SAIDIN; CONTOURS, VECTORS, TRAJECTORIES):
# Types: VEC, FLD, SAIDIN, FLOAT
# ZZZ
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
conf = {}
conf['FILENAME'] = self.LAYERS.FILENAME[i]
conf['TYPE'] = TYPE
conf['NREC'] = self.LAYERS.NREC[i]
if TYPE == 'FLD':
conf['CDF'] = self.CDF[ii].conf_get()
elif TYPE == 'VEC':
conf['VEC'] = self.VEC[ii].conf_get()
elif TYPE == 'SAIDIN':
conf['SAIDIN'] = self.SAIDIN.conf_get()
elif TYPE == 'FLOAT':
conf['FLOAT'] = self.FLOAT[ii].conf_get()
elif TYPE == 'MARKER':
conf['MARKER'] = self.MARKER[ii].conf_get()
elif TYPE == 'SHAPE':
conf['SHAPE'] = self.SHAPE[ii].conf_get()
elif TYPE == 'ELLIPSE':
conf['ELLIPSE'] = self.ELLIPSE[ii].conf_get()
elif TYPE == 'PATCH':
conf['PATCH'] = self.PATCH[ii].conf_get()
elif TYPE == 'FEATURE':
conf['FEATURE'] = self.FEATURE.DATA[ii].conf_get()
else:
toconsola('Unknown layer type',wid=self.cons)
return
CONF.append(conf)
# Request output configuration filename:
#
filetypes = [('COSMO-VIEW','.cvw')]
nn = filedialog.asksaveasfilename(title='Save plot configuration',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
# Write JSON file:
#
self.save_conf(CONF,nn)
toconsola('done !',wid=self.cons)
# ==================================
def figure_read(self,filename=None):
# ==================================
''' Load Figure configuration from json'''
self.cons = None
self.first = True
if filename is None:
nn = filedialog.askopenfilename(title='Load plot configuration',
initialdir='./')
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Restoring figure configuration from '+filename,wid=self.cons)
CONF = json.load(open(filename))
# The PLOT:
#
self.PLOT.conf_set(CONF[0])
if self.PLOT.ISOBAT_cropped:
self.isobath_crop()
# Initialize matplotlib
#
self.fig = None
self.ax = None
self.drawmap = True
#try:
# self.fig = plt.figure('COSMO-VIEW canvas', \
# figsize=self.PLOT.SIZE, \
# dpi=self.PLOT.DPI.get())
#except:
# print('Failure')
#
# self.fig.canvas.mpl_connect('close_event',self.on_closing_figure)
# self.fig.canvas.callbacks.connect('button_press_event',self.on_click)
# self.ax = self.fig.add_subplot(111)
# self.drawmap = True
for ii in range(1,len(CONF)):
filename = CONF[ii]['FILENAME']
if CONF[ii]['TYPE'] == 'FLD':
# Initialize contour class:
CDF = CONTOUR(filename)
CDF.FLD.open(filename,wid=self.cons)
nt = CDF.FLD.icdf.nt # Save the number of time records
# Update from CONF attributes:
#
CDF.conf_set(CONF[ii]['CDF'])
print('CDF.FLD.varname: ', CDF.FLD.varname)
print('CDF.FLD.varid: ', CDF.FLD.varid)
if self.first:
self.K.set(CDF.K.get())
self.L.set(CDF.L.get())
self.L_LIST = list(range(CDF.FLD.icdf.nt))
self.NL = len(self.L_LIST)
# Read data:
#
#self.read_lonlat(CDF,CDF.icdf.xname,CDF.icdf.yname)
#self.DepthandDate(CDF)
CDF.FLD.get_grid()
self.DepthandDate(CDF)
if CDF.SOURCE == 'FILE':
CDF.read(update_lims=False,wid=self.cons)
elif CDF.SOURCE == 'MEAN':
CDF.FLD.mean(nt,self.K.get(),wid=self.cons)
elif CDF.SOURCE == 'VARIANCE':
print('going to calculate the variance ...')
CDF.FLD.variance(nt,self.K.get(),wid=self.cons)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='FLD',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
self.LAYERS.print()
if self.first:
#self.TFILE = '%d' % self.nfiles
self.PLOT.TLABEL.set(CDF.DATE[self.L.get()])
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
self.DATE = CDF.DATE.copy()
self.TIME = CDF.TIME.copy()
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
#self.TFILE = '%d' % self.nfiles
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if CONF[ii]['TYPE'] == 'VEC':
print('Initialize VEC classes ...')
# Initialize classes:
#
VEC = VECTOR()
VEC.UFILENAME.set(filename)
VEC.U.nc = Dataset(filename)
VEC.U.icdf = tools.geocdf(filename, wid=self.cons)
nt = VEC.U.icdf.nt # Save the number of time records
# Check the Arakawa's grid type and read, if required, the VFILENAME
#
vv = CONF[ii]['VEC']
VEC.grid_type.set(vv['GRID_TYPE'])
VEC.two_files = vv['TWO_FILES']
if VEC.two_files == 0:
vfilename = filename
else:
vfilename = vv['VFILENAME']
print('In read_figure, VEC.grid_type: ', VEC.grid_type.get())
VEC.VFILENAME.set(vfilename)
VEC.V.nc = Dataset(vfilename)
VEC.V.icdf = tools.geocdf(vfilename, wid=self.cons)
# Update from CONF attributes:
#
VEC.conf_set(CONF[ii]['VEC'])
# Read data:
#
VEC.U.get_info(wid=self.cons)
VEC.U.get_grid()
VEC.V.varname = VEC.vname.get()
VEC.V.ndims = VEC.V.icdf.ndims[VEC.V.varid]
VEC.V.get_info(wid=self.cons)
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
VEC.V.icdf.VAR_MENU = VEC.U.icdf.VAR_MENU[:]
else:
VEC.V.get_grid()
if VEC.grid_type.get() == 'C':
VEC.U.icdf.grid2d = True
VEC.V.icdf.grid2d = True
# X-center
xmu0 = 0.5*(VEC.U.xx[:,:-1]+VEC.U.xx[:,1:])
xmv0 = VEC.V.xx[:,1:-1]
ymu0 = 0.5*(VEC.U.yy[:,:-1]+VEC.U.yy[:,1:])
ymv0 = VEC.V.yy[:,1:-1]
# Y-center
VEC.V.xx = 0.5*(xmv0[:-1,:]+xmv0[1:,:])
VEC.U.xx = xmu0[1:-1,:]
VEC.V.yy = 0.5*(ymv0[:-1,:]+ymv0[1:,:])
VEC.U.yy = ymu0[1:-1,:]
aa = VEC.U.xx.shape
print('New shape sizes: ', aa)
print('----------------------------')
VEC.K_LIST = list(range(VEC.U.icdf.nz))
VEC.L_LIST = list(range(VEC.U.icdf.nt))
VEC.Z_LIST = VEC.U.get_zlist()
VEC.T_LIST, VEC.DATE, VEC.TIME = VEC.U.get_tlist()
if VEC.SOURCE == 'FILE':
VEC.read(wid=self.cons)
elif VEC.SOURCE == 'MEAN':
VEC.U.mean(nt,self.K.get(),wid=self.cons)
VEC.V.mean(nt,self.K.get(),wid=self.cons)
# Make sure that the missing value is NaN:
_u = VEC.U.data.filled(fill_value=np.nan)
_v = VEC.V.data.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
if VEC.grid_type.get() == 'A' or VEC.grid_type.get() == 'B':
toconsola("Velocities in a A-grid",wid=self.cons)
VEC.U.data = u.copy()
VEC.V.data = v.copy()
elif VEC.grid_type.get() == 'C':
toconsola("Regrid C-grid velocities",wid=self.cons)
VEC.U.data = 0.5*(u[1:-1,:-1]+u[1:-1,1:])
VEC.V.data = 0.5*(v[:-1,1:-1]+v[1:,1:-1])
if self.first:
self.K.set(VEC.K.get())
self.L.set(VEC.L.get())
self.L_LIST = list(range(VEC.U.icdf.nt))
self.NL = len(VEC.L_LIST)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='VEC',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
#self.nfiles += 1
#self.FILENAMES.append(filename)
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=CONF[ii]['SEQUENCES']))
#self.SEQLEADER.append(tk.BooleanVar(value=CONF[ii]['SEQLEADER']))
#self.SEQNTIMES.append(CONF[ii]['SEQNTIMES'])
if self.first:
#self.TFILE = '%d' % self.nfiles
self.PLOT.TLABEL.set(VEC.DATE[self.L.get()])
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
self.DATE = VEC.DATE.copy()
self.TIME = VEC.TIME.copy()
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
#self.TFILE = '%d' % self.nfiles
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if CONF[ii]['TYPE'] == 'FLOAT':
# Initialize classes:
#
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE, wid=self.cons)
FLT.Read(filename)
# Update from CONF attributes:
FLT.conf_set(CONF[ii]['FLOAT'])
if self.first:
# Set Figure DATA and TIME reference:
self.DATE = FLT.DATE.copy()
self.TIME = FLT.TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.L_LIST = list(range(len(FLT.DATE)))
self.NL = len(self.L_LIST)
self.lbox.configure(state='!disabled')
self.lbox['values'] = self.L_LIST
if self.L.get() == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.first = False
if len(self.TIME) > 0:
if FLT.CROP.get():
print('Cropping ...')
nt = FLT.nrecords
ppi = [i for i in range(nt) if FLT.DATE[i] >= self.DATE[0]]
ppf = [i for i in range(nt) if FLT.DATE[i] > self.DATE[-1]]
pi = ppi[0]
pf = ppf[0] - 1
print('Initial index : ', pi)
print('Final index : ', pf)
print(FLT.nfloats)
print(FLT.nrecords)
if FLT.nfloats > 1:
lon = FLT.lon[pi:pf+1,:]
lat = FLT.lat[pi:pf+1,:]
date = FLT.DATE[pi:pf+1]
TIME = FLT.TIME[pi:pf+1]
FLT.lon = lon
FLT.lat = lat
FLT.DATE = date
FLT.TIME = TIME
else:
lon = FLT.lon[pi:pf+1]
lat = FLT.lat[pi:pf+1]
date = FLT.DATE[pi:pf+1]
TIME = FLT.TIME[pi:pf+1]
FLT.lon = lon
FLT.lat = lat
FLT.DATE = date
FLT.TIME = TIME
FLT.nrecords = len(date)
print('Setting MAPX and MAPY ...')
FLT.MAPX = []
FLT.MAPY = []
FLT.Fx = []
FLT.Fy = []
if FLT.nfloats > 1:
for i in range(FLT.nfloats):
FLT.Fx.append(interpolate.interp1d(FLT.TIME,np.array(FLT.lon[:,i]),
bounds_error=False, fill_value=np.NaN))
FLT.MAPX.append(list(FLT.Fx[-1](self.TIME)))
FLT.Fy.append(interpolate.interp1d(FLT.TIME,np.array(FLT.lat[:,i]),
bounds_error=False, fill_value=np.NaN))
FLT.MAPY.append(list(FLT.Fy[-1](self.TIME)))
# Transpose FLT.MAPX and FLT.MAPY:
FLT.MAPX = np.array(FLT.MAPX).T.tolist()
FLT.MAPY = np.array(FLT.MAPY).T.tolist()
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon,
bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat,
bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='FLOAT',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'SAIDIN':
# Initialize classes:
#
self.SAIDIN.FILENAME.set(filename)
self.SAIDIN.FLD.nc = Dataset(filename)
self.SAIDIN.FLD.icdf = tools.geocdf(filename, wid=self.cons)
# Update from CONF attributes:
#
self.SAIDIN.conf_set(CONF[ii]['SAIDIN'])
# Read the data:
self.SAIDIN.FLD.x = self.SAIDIN.FLD.nc.variables['lon'][:]
self.SAIDIN.FLD.y = self.SAIDIN.FLD.nc.variables['lat'][:]
self.SAIDIN.varname.set('mcsst')
self.SAIDIN.FLD.varname = 'mcsst'
self.SAIDIN.FLD.data = self.SAIDIN.FLD.nc.variables[self.SAIDIN.FLD.varname][0,:,:].squeeze()
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy = np.meshgrid(self.SAIDIN.FLD.x,self.SAIDIN.FLD.y)
self.DepthandDate(self.SAIDIN)
if self.SAIDIN.landmask.get():
toconsola('Applying land/sea mask ...',wid=self.cons)
_a = self.SAIDIN.FLD.data.copy()
tmp = self.SAIDIN.FLD.nc.variables['lsmask'][0,:,:].squeeze()
msk = ma.masked_where(tmp==1,tmp)
self.SAIDIN.FLD.data = ma.array(_a,mask=msk).copy()
nt = CONF[ii]['NREC']
self.LAYERS.add(TYPE='SAIDIN',Filename=filename,N=nt,wid=self.cons)
nm = self.LAYERS.n - 1
#self.LAYERS.INSEQUENCE[nm].set(CONF[ii]['INSEQUENCE'])
#self.LAYERS.SEQUENCER[nm].set(CONF[ii]['SEQUENCER'])
self.LAYERS.print()
#self.nfiles += 1
#self.FILENAMES.append(filename)
#self.FILETYPES.append('SAIDIN')
#self.FILEORDER.append(0)
#self.SEQUENCES.append(tk.BooleanVar(value=CONF[ii]['SEQUENCES']))
#self.SEQLEADER.append(tk.BooleanVar(value=CONF[ii]['SEQLEADER']))
#self.SEQNTIMES.append(CONF[ii]['SEQNTIMES'])
if self.first:
self.DATE = self.SAIDIN.DATE.copy()
self.TIME = self.SAIDIN.TIME.copy()
self.first = False
if CONF[ii]['TYPE'] == 'MARKER':
# Initialize classes:
#
MARKER = geomarker.parameters()
if filename is None:
toconsola('MARKER data from configuration file',wid=self.cons)
else:
MARKER.Read(filename)
# Update from CONF attributes:
#
MARKER.conf_set(CONF[ii]['MARKER'])
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=filename,N=len(MARKER.lon),wid=self.cons)
if CONF[ii]['TYPE'] == 'SHAPE':
# Initialize classes:
#
SHAPE = shape.parameters()
SHAPE.Read(filename)
# Update from CONF attributes:
#
SHAPE.conf_set(CONF[ii]['SHAPE'])
if not empty(SHAPE.LABEL_KEY.get()):
SHAPE.get_name()
if SHAPE.CROP.get() and SHAPE.type == 'POINT':
toconsola('Cropping shapefile type POINT',wid=self.cons)
nsp = SHAPE.n
x = SHAPE.lon[:].copy()
y = SHAPE.lat[:].copy()
s = SHAPE.name[:].copy()
SHAPE.lon = []
SHAPE.lat = []
SHAPE.name = []
xmin = self.PLOT.WEST.get() + self.PLOT.CROP_PAD.get()
xmax = self.PLOT.EAST.get() - self.PLOT.CROP_PAD.get()
ymin = self.PLOT.SOUTH.get() + self.PLOT.CROP_PAD.get()
ymax = self.PLOT.NORTH.get() - self.PLOT.CROP_PAD.get()
for i in range(nsp):
if x[i] > xmin:
if x[i] < xmax:
if y[i] > ymin:
if y[i] < ymax:
SHAPE.lon.append(x[i])
SHAPE.lat.append(y[i])
SHAPE.name.append(s[i])
SHAPE.n = len(SHAPE.lon)
self.nshape += 1
self.SHAPE.append(SHAPE)
self.SHAPE_INDX.set(self.nshape-1)
self.SHAPE_LIST = list(range(self.nshape))
self.LAYERS.add(TYPE='SHAPE',Filename=filename,N=SHAPE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'ELLIPSE':
# Initialize classes:
#
ELLIPSE = ellipse.ELLIPSE()
# Update from CONF attributes:
#
ELLIPSE.conf_set(CONF[ii]['ELLIPSE'])
if ELLIPSE.SOURCE == 'VIEWER':
toconsola('ELLIPSE data from configuration file',wid=self.cons)
else:
ELLIPSE.Read(filename)
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=ELLIPSE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'FEATURE':
# Initialize classes:
#
FEATURE = feature.parameters()
# Update from CONF attributes:
#
FEATURE.conf_set(CONF[ii]['FEATURE'])
FEATURE.Read(filename)
self.FEATURE.n += 1
self.FEATURE.DATA.append(FEATURE)
self.FEATURE.INDX.set(self.FEATURE.n-1)
self.FEATURE.LIST = list(range(self.FEATURE.n))
self.LAYERS.add(TYPE='FEATURE',Filename=filename,N=FEATURE.n,wid=self.cons)
self.LAYERS.print()
if CONF[ii]['TYPE'] == 'PATCH':
# Initialize classes:
#
PATCH = patch.PATCH()
# Update from CONF attributes:
#
PATCH.conf_set(CONF[ii]['PATCH'])
self.npatch += 1
self.PATCH.append(PATCH)
self.PATCH_INDX.set(self.npatch-1)
self.PATCH_LIST = list(range(self.npatch))
self.LAYERS.add(TYPE='PATCH',Filename=None,N=1,wid=self.cons)
self.LAYERS.print()
self.make_plot()
# ===========================
def save_conf(self,conf,filename):
# ===========================
# Write JSON file:
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# =============
def save(self):
# =============
'''If name has been already set, save the plot'''
if self.PLOT.OUT_FILENAME is None:
self.saveas()
# If output filename exist, we save:
if self.PLOT.OUT_FILENAME is not None:
toconsola('Saving in '+self.PLOT.OUT_FILENAME,wid=self.cons)
self.fig.savefig(self.PLOT.OUT_FILENAME,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
# ===============
def saveas(self):
# ===============
'''Get the output filename and the save the plot'''
filetypes = [('PNG file','.png'),('EPS file','.eps'),('PDF file','.pdf')]
nn = tk.filedialog.asksaveasfilename(title='Save',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if len(nn) == 0:
self.PLOT.OUT_FILENAME = None
else:
self.PLOT.OUT_FILENAME = '%s' % nn
toconsola('Saving in '+self.PLOT.OUT_FILENAME,wid=self.cons)
self.fig.savefig(self.PLOT.OUT_FILENAME,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
# ======================
def widget_config(self):
# ======================
'''Options for the widget font type and size'''
#global WINDOW_FONT_TYPE_BACKUP
#global WINDOW_FONT_SIZE_BACKUP
def _cancel():
# ===========
self.PLOT.WINDOW_FONT_TYPE.set(WINDOW_FONT_TYPE_BACKUP)
self.PLOT.WINDOW_FONT_SIZE.set(WINDOW_FONT_SIZE_BACKUP)
self.Window_widgetconfig.destroy()
self.Window_widgetconfig = None
def _close():
# ===========
self.Window_widgetconfig.destroy()
self.Window_widgetconfig = None
def _apply():
# ===========
font_name = self.PLOT.WINDOW_FONT_TYPE.get().split()[0]
font = '%s %d' % (font_name, self.PLOT.WINDOW_FONT_SIZE.get())
self.master.option_add('*Font',font)
self.default_font.configure(family=self.PLOT.WINDOW_FONT_TYPE.get().
split()[0])
self.default_font.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
self.default_font2.configure(size=self.PLOT.WINDOW_FONT_SIZE.get())
if self.Window_mapconfig is not None:
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
self.map_config()
def _loadconf():
# =============
'''Load the Widget config parameters'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.WINDOW_FONT_TYPE.set(conf['WINDOW_FONT_TYPE'])
self.PLOT.WINDOW_FONT_SIZE.set(conf['WINDOW_FONT_SIZE'])
def _saveconf():
# =============
'''Save the Widget config parameters'''
if self.widget_nowarning.get() == False:
ans = askforpermission(self.Window_widgetconfig, \
'Are you sure ?', \
self.widget_nowarning)
if ans == False:
return
toconsola('Updating widget font default values',wid=self.cons)
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
conf['WINDOW_FONT_TYPE'] = self.PLOT.WINDOW_FONT_TYPE.get()
conf['WINDOW_FONT_SIZE'] = self.PLOT.WINDOW_FONT_SIZE.get()
self.save_conf(conf,self.PLOT.FILECONF)
if self.Window_widgetconfig is not None:
self.Window_widgetconfig.lift()
return
WINDOW_FONT_TYPE_BACKUP = self.PLOT.WINDOW_FONT_TYPE.get()
WINDOW_FONT_SIZE_BACKUP = self.PLOT.WINDOW_FONT_SIZE.get()
self.Window_widgetconfig = tk.Toplevel(self.master)
self.Window_widgetconfig.title('Widget options')
self.Window_widgetconfig.resizable(width=True,height=True)
self.Window_widgetconfig.protocol('WM_DELETE_WINDOW',_close)
self.widget_nowarning = tk.BooleanVar()
self.widget_nowarning.set(False)
menubar = tk.Menu(self.Window_widgetconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_widgetconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_widgetconfig.tk.call(self.Window_widgetconfig, "config", "-menu", menubar)
F0 = ttk.Frame(self.Window_widgetconfig,borderwidth=5,padding=5)
ttk.Label(F0,text='Font style').grid(row=0,column=0,padx=3,sticky='w')
mp = ttk.Combobox(F0,textvariable=self.PLOT.WINDOW_FONT_TYPE,values=self.FONT_TYPES,width=35)
mp.grid(row=0,column=1,columnspan=7,padx=3,sticky='w')
ttk.Label(F0,text='Font size').grid(row=1,column=0,padx=3,sticky='w')
mp = ttk.Combobox(F0,textvariable=self.PLOT.WINDOW_FONT_SIZE,values=self.FONT_SIZES,width=5)
mp.grid(row=1,column=1,columnspan=1,padx=3,sticky='w')
ttk.Button(F0,text='Cancel',command=_cancel).grid(row=2,column=5,padx=3)
ttk.Button(F0,text='Apply',command=_apply).grid(row=2,column=6,padx=3)
ttk.Button(F0,text='Close',command=_close).grid(row=2,column=7,padx=3)
F0.grid()
# =====================
def isobath_crop(self):
# =====================
'''Crop isobaths from domain'''
west = self.PLOT.WEST.get() - 5
east = self.PLOT.EAST.get() + 5
south = self.PLOT.SOUTH.get() - 5
north = self.PLOT.NORTH.get() + 5
toconsola('Cropping isobaths',wid=self.cons)
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SHOW[i]:
xo = self.PLOT.ISOBAT_DATA[i]['lon']
yo = self.PLOT.ISOBAT_DATA[i]['lat']
for ii in range(len(xo)-1,-1,-1):
if xo[ii] < west:
del xo[ii]
del yo[ii]
elif xo[ii] > east:
del xo[ii]
del yo[ii]
elif yo[ii] < south:
del xo[ii]
del yo[ii]
elif yo[ii] > north:
del xo[ii]
del yo[ii]
else:
pass
self.PLOT.ISOBAT_DATA[i]['lon'] = xo
self.PLOT.ISOBAT_DATA[i]['lat'] = yo
toconsola('done',wid=self.cons)
self.PLOT.ISOBAT_cropped = True
# ======================
def legend_config(self):
# ======================
'''Options for Map limits and colors'''
def _apply():
# ===========
self.make_plot()
def _close():
# ==========
self.make_plot()
self.Window_legendconfig.destroy()
self.Window_legendconfig = None
def _loadconf():
# =============
'''Load map configuration'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
def _saveconf():
# =============
'''Save current map configuration as default'''
toconsola('Updating map default values',wid=self.cons)
conf = self.PLOT.conf_get()
self.save_conf(conf,self.PLOT.FILECONF)
self.Window_legendconfig = tk.Toplevel(self.master)
self.Window_legendconfig.title('Legend options')
self.Window_legendconfig.resizable(width=True,height=True)
self.Window_legendconfig.protocol('WM_DELETE_WINDOW',_close)
self.map_nowarning = tk.BooleanVar()
self.map_nowarning.set(False)
menubar = tk.Menu(self.Window_legendconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_legendconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_legendconfig.tk.call(self.Window_legendconfig, "config", "-menu", menubar)
# Define tabs
self.legendtabs = ttk.Notebook(self.Window_legendconfig)
page1 = ttk.Frame(self.legendtabs)
page2 = ttk.Frame(self.legendtabs)
self.legendtabs.add(page1,text='Isobaths')
self.legendtabs.add(page2,text='Markers')
self.PLOT.ISOBAT_LEGEND.Winconfig(page1)
self.PLOT.LEGEND.Winconfig(page2)
self.legendtabs.grid()
frame5 = ttk.Frame(self.Window_legendconfig,borderwidth=5,padding=5)
ttk.Button(frame5,text='Apply',command=_apply).grid(row=0,column=5,padx=3)
ttk.Button(frame5,text='Close',command=_close).grid(row=0,column=6,padx=3)
frame5.grid(row=24,column=0,columnspan=5)
# ===================
def map_config(self):
# ===================
# Options for Map limits and colors
# EG Now the list of projections is recovered from map_proj in tools
# By default 50m (50 miles) is the default
# EG pdict = {} substituted by map_proj
pdict = map_proj('defs')
rdict = {'110m':'Crude','50m':'Intermediate','10m':'High'}
LEGEND_LOCATION_LIST = ['best','upper right','upper left','lower left', \
'lower right', 'right', 'center left', 'center right', \
'lower center', 'upper center', 'center']
LEGEND_MODE_LIST = ['None','expand']
PSIZE = tk.StringVar()
PSIZE.set(str(self.PLOT.SIZE))
BACKUP = self.PLOT.conf_get()
font_norm = tkfont.Font(font='TkDefaultFont').copy()
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
def _cancel():
self.PLOT.conf_set(BACKUP)
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
if self.fig is not None:
self.make_plot()
def _close():
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
#EG Projection selection
def pselection():
''' We set the appropriate enable/disable fields for each projection
selection. We recover all the widgets of type Entry and recover
the state array of the selected projection. Finally the state
of the widgets are updated.'''
entries_id = []
wids = self.fh.winfo_children()
#entries_id = map(lambda x: x if isinstance(x,tk.ttk.Entry), wids)
#print(self.fh.winfo_children())
for wid in wids:
if isinstance(wid,tk.ttk.Entry):
entries_id.append(wid)
new_proj = self.PLOT.MAP_PROJECTION.get()
mpl.config(text=new_proj,width=25)
proj_state = map_proj(new_proj)
var_state = list(map(lambda x: "normal" if x==1 else "disabled", proj_state["state"]))
toconsola("New PROJECTION selected: "+self.PLOT.MAP_PROJECTION.get(),wid=self.cons)
for i in range(len(entries_id)): entries_id[i]["state"]=var_state[i]
self.drawmap = True
def rselection():
mrl.config(text=rdict[self.PLOT.MAP_RESOLUTION.get()],width=10)
self.drawmap = True
# EG deprecated ?
def icselection():
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
backup = self.PLOT.ISOBAT_COLOR[ii].get()
rgb, hx = askcolor(color=self.PLOT.ISOBAT_COLOR[ii].get(),
parent=self.master)
if hx is None:
self.PLOT.ISOBAT_COLOR[ii].set(backup)
else:
self.PLOT.ISOBAT_COLOR[ii].set(hx)
def lims_reset():
# ================
''' Resets the domain and grid to the default values'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.WEST.set(conf['WEST'])
self.PLOT.EAST.set(conf['EAST'])
self.PLOT.SOUTH.set(conf['SOUTH'])
self.PLOT.NORTH.set(conf['NORTH'])
self.PLOT.MERIDIAN_INI.set(conf['MERIDIAN_INI'])
self.PLOT.MERIDIAN_FIN.set(conf['MERIDIAN_FIN'])
self.PLOT.MERIDIAN_INT.set(conf['MERIDIAN_INT'])
self.PLOT.PARALLEL_INI.set(conf['PARALLEL_INI'])
self.PLOT.PARALLEL_FIN.set(conf['PARALLEL_FIN'])
self.PLOT.PARALLEL_INT.set(conf['PARALLEL_INT'])
self.drawmap = True
self.make_plot()
def iload():
# =================
'''Load from external file the selected isobaths'''
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SELEC[i].get():
filename = self.PLOT.ISOBAT_PATH.get() + \
'/%04d' % self.PLOT.ISOBAT_Z[i] + '.dat'
toconsola("New PROJECTION selected: "+self.PLOT.MAP_PROJECTION.get(),wid=self.cons)
try:
self.PLOT.ISOBAT_DATA[i] = read_lines(filename)
self.PLOT.ISOBAT_SHOW[i] = True
wwr.configure(font=font_norm)
wwr.configure(foreground='#125704')
wwr['text'] = 'Isobaths have been loaded'
self.PLOT.ISOBAT_loaded = True
except:
messagebox.showinfo(message='Error: unable to read '+filename)
self.PLOT.ISOBAT_DATA[i] = None
self.PLOT.ISOBAT_SHOW[i] = False
self.PLOT.ISOBAT_NPLOT = sum(self.PLOT.ISOBAT_SHOW)
if self.PLOT.ISOBAT_loaded:
wlr.configure(state='enabled')
else:
wlr.configure(state='disabled')
self.PLOT.ISOBATH_crop = False
def _pselect():
# =============
nn = tk.filedialog.askdirectory(parent=self.Window_mapconfig)
if not empty(nn):
self.PLOT.ISOBAT_PATH.set(nn)
def select_isobaths():
# ====================
some_selected = False
for i in range(self.PLOT.nisobat):
if self.PLOT.ISOBAT_SELEC[i].get():
some_selected = True
if some_selected:
wwr['text'] = 'Isobaths need to be loaded'
wwr.configure(font=font_bold)
wwr.configure(foreground='red')
self.PLOT.ISOBAT_selected = True
self.PLOT.ISOBAT_loaded = False
else:
wwr['text'] = 'No isobaths have been selected'
wwr.configure(font=font_norm)
wwr.configure(foreground='black')
self.PLOT.ISOBAT_selected = False
if self.PLOT.ISOBAT_selected:
wli.configure(state='enabled')
else:
wli.configure(state='disabled')
for i in range(self.PLOT.nisobat):
self.PLOT.ISOBAT_DATA[i] = None
self.PLOT.ISOBAT_SHOW[i] = False
self.PLOT.ISOBAT_NPLOT = 0
#EG We need to set a new projection object
def _updated():
# =============
self.drawmap = True
self.make_plot()
def _apply():
# ===========
toconsola("(Apply) Drawing...wait",wid=self.cons)
self.make_plot()
toconsola("Done !",wid=self.cons)
def _done():
# ==========
toconsola("(Done) Drawing...wait",wid=self.cons)
self.make_plot()
toconsola("Done !",wid=self.cons)
self.Window_mapconfig.destroy()
self.Window_mapconfig = None
def _loadconf():
# =============
'''Load map configuration'''
conf = self.PLOT.conf_load(self.PLOT.FILECONF)
self.PLOT.conf_set(conf)
def _saveconf():
# =============
'''Save current map configuration as default'''
toconsola("Saving map default values",wid=self.cons)
conf = self.PLOT.conf_get()
self.save_conf(conf,self.PLOT.FILECONF)
def legend_location():
# ====================
''' Process the location combobox'''
location_name = loc.get()
self.PLOT.LEGEND.LOC.set(LEGEND_LOCATION_LIST.index(location_name))
def legend_mode():
# ================
''' Process the location combobox'''
mode_name = mod.get()
self.PLOT.LEGEND.MODE.set(LEGEND_MODE_LIST.index(mode_name))
def sizeupdate():
# ===============
self.PLOT.SIZE = ast.literal_eval(PSIZE.get())
plt.close(self.fig)
self.fig = None
self.make_plot()
def _calculator():
# ================
SOUTH = float(self.PLOT.SOUTH.get())
NORTH = float(self.PLOT.NORTH.get())
WEST = float(self.PLOT.WEST.get())
EAST = float(self.PLOT.EAST.get())
LON_0 = 0.5*(WEST+EAST)
LAT_0 = 0.5*(SOUTH+NORTH)
width = haversine((WEST,LAT_0),(EAST,LAT_0))
height = haversine((LON_0,SOUTH),(LON_0,NORTH))
self.PLOT.LON_0.set(LON_0)
self.PLOT.LAT_0.set(LAT_0)
self.PLOT.WIDTH.set(width)
self.PLOT.HEIGHT.set(height)
if self.Window_mapconfig is not None:
self.Window_mapconfig.lift()
return
self.Window_mapconfig = tk.Toplevel(self.master)
self.Window_mapconfig.title('Map options')
self.Window_mapconfig.resizable(width=True,height=True)
self.Window_mapconfig.protocol('WM_DELETE_WINDOW',_close)
self.map_nowarning = tk.BooleanVar()
self.map_nowarning.set(False)
menubar = tk.Menu(self.Window_mapconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_mapconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_mapconfig.tk.call(self.Window_mapconfig, "config", "-menu", menubar)
# Define tabs
maptabs = ttk.Notebook(self.Window_mapconfig)
page1 = ttk.Frame(maptabs)
page2 = ttk.Frame(maptabs)
page3 = ttk.Frame(maptabs)
page4 = ttk.Frame(maptabs)
page5 = ttk.Frame(maptabs)
page7 = ttk.Frame(maptabs)
page8 = ttk.Frame(maptabs)
maptabs.add(page1,text=' Domain ')
maptabs.add(page2,text=' Background ')
maptabs.add(page3,text=' Isobaths ')
maptabs.add(page4,text=' Grid ')
maptabs.add(page5,text=' Labels ')
maptabs.add(page7,text=' Scale ')
maptabs.add(page8,text=' Other ')
#EG We get the projection from tools.map_proj instead of a list
PROJECTION_LIST = map_proj('lista')
#EG PAGE 1
f1 = ttk.Frame(page1,borderwidth=5,padding=5)
ttk.Label(f1,text='Map Projection').grid(row=0,column=0,padx=3,sticky='w')
mp = ttk.Combobox(f1,
textvariable=self.PLOT.MAP_PROJECTION,
values=PROJECTION_LIST,width=14)
mp.grid(row=0,column=1,padx=3)
mp.bind('<<ComboboxSelected>>',lambda e: pselection())
mpl = ttk.Label(f1,
text=pdict[self.PLOT.MAP_PROJECTION.get()],width=40)
mpl.grid(row=0,column=2,columnspan=3,padx=3)
ttk.Label(f1,text='Map Resolution').grid(row=1,column=0,padx=3,sticky='w')
#EG values=('c','l','i','h','f') changed by ('110m','50m','10m')
mr = ttk.Combobox(f1,
textvariable=self.PLOT.MAP_RESOLUTION,
values=('110m','50m','10m'),width=7,justify="center")
mr.grid(row=1,column=1,padx=3,sticky='w')
mr.bind('<<ComboboxSelected>>',lambda e: rselection())
mrl = ttk.Label(f1,text=rdict[self.PLOT.MAP_RESOLUTION.get()],width=10)
mrl.grid(row=1,column=2,columnspan=2,padx=3,sticky='w')
ttk.Label(f1,text='EPSG').grid(row=2,column=0,padx=3,sticky='w')
ttk.Entry(f1,textvariable=self.PLOT.EPSG,width=7,justify="center").grid(row=2,column=1,padx=3,sticky='w')
f1.grid(row=0,column=0)
f2 = ttk.Frame(page1,borderwidth=5,padding=5,relief='sunken')
ttk.Label(f2,text='Plot limits',font=font_bold).grid(row=0,column=0,padx=3,sticky='w')
ttk.Label(f2,text='North').grid(row=1,column=3,pady=5,padx=3)
eno = ttk.Entry(f2,textvariable=self.PLOT.NORTH,width=10,justify="center")
eno.grid(row=2,column=3,pady=5,padx=3)
eno.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='West').grid(row=3,column=1,pady=5,padx=3)
ewe = ttk.Entry(f2,textvariable=self.PLOT.WEST,width=10,justify="center")
ewe.grid(row=3,column=2,pady=5,padx=3)
ewe.bind('<Return>',lambda e:_updated())
eea = ttk.Entry(f2,textvariable=self.PLOT.EAST,width=10,justify="center")
eea.grid(row=3,column=4,pady=5,padx=3,sticky='w')
eea.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='East').grid(row=3,column=5,pady=5,padx=3)
eso = ttk.Entry(f2,textvariable=self.PLOT.SOUTH,width=10,justify="center")
eso.grid(row=4,column=3,pady=5,padx=3)
eso.bind('<Return>',lambda e:_updated())
ttk.Label(f2,text='South').grid(row=5,column=3,pady=5,padx=3)
ttk.Button(f2,text='Reset',command=lims_reset).grid(row=6,column=5)
f2.grid(row=1,column=0,padx=30,sticky='w')
#EG We recover the full properties of each projection
proj_state = map_proj(self.PLOT.MAP_PROJECTION.get())
var_state = list(map(lambda x: "normal" if x==1 else "disabled", proj_state["state"]))
self.params = {"central_longitude":self.PLOT.MAP_PROJ_LAT_0.get(),
"central_latitude":self.PLOT.MAP_PROJ_LON_0.get(),
"min_latitude":self.PLOT.MAP_PROJ_MIN_LAT.get(),
"max_latitude":self.PLOT.MAP_PROJ_MAX_LAT.get(),
"false_easting":self.PLOT.MAP_PROJ_F_EAST.get(),
"false_northing":self.PLOT.MAP_PROJ_F_NORTH.get(),
"latitude_true_scale":self.PLOT.MAP_PROJ_LAT_T_SCA.get(),
"true_scale_latitude":self.PLOT.MAP_PROJ_T_SCA_LAT.get(),
"scale_factor":self.PLOT.MAP_PROJ_SCA_FAC.get(),
"satellite_height":self.PLOT.MAP_PROJ_SATELLITE_HEIGHT.get(),
"sweep_axis":self.PLOT.MAP_PROJ_SWEEP_AXIS.get()}
var_proj = [self.PLOT.MAP_PROJ_LAT_0, self.PLOT.MAP_PROJ_LON_0,
self.PLOT.MAP_PROJ_F_EAST, self.PLOT.MAP_PROJ_F_NORTH,
self.PLOT.MAP_PROJ_MIN_LAT, self.PLOT.MAP_PROJ_MAX_LAT,
self.PLOT.MAP_PROJ_LAT_T_SCA, self.PLOT.MAP_PROJ_T_SCA_LAT,
self.PLOT.MAP_PROJ_SCA_FAC, self.PLOT.MAP_PROJ_SWEEP_AXIS,
self.PLOT.MAP_PROJ_SATELLITE_HEIGHT]
var_txt = ['Central Longitude','Central Latitude',
'False Easting','False Northing',
'Min. Latitude','Max. Latitude',
'Latitude true scale','True scale Latitude',
'Scale Factor','Sweep Axis',
'Satellite Height']
self.fh = ttk.Frame(page1,borderwidth=5,padding=5)
ivar = 0
for i in range(5):
ivar = 2*i
ttk.Label(self.fh,text=var_txt[ivar]).grid(row=i,column=0,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[ivar],state=var_state[ivar], width=10). \
grid(row=i,column=1,padx=3,sticky='w')
ttk.Label(self.fh,text=var_txt[ivar+1]).grid(row=i,column=2,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[ivar+1],state=var_state[ivar+1], width=10). \
grid(row=i,column=3,padx=3,sticky='w')
ttk.Label(self.fh,text=var_txt[10]).grid(row=5,column=0,padx=3,sticky='e')
ttk.Entry(self.fh,textvariable=var_proj[10], state=var_state[10], width=10). \
grid(row=5,column=1,padx=3,sticky='w')
ttk.Button(self.fh,text='Update projection',command=_updated). \
grid(row=6,column=0,pady=10,columnspan=4, sticky='ew')
'''
ttk.Label(fh,text='Width').grid(row=0,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.WIDTH, width=10).grid(row=0,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Optional map keywords').grid(row=0,column=2,padx=3,sticky='e')
ttk.Button(fh,text='Estimate', command=_calculator).grid(row=0,column=3,padx=3,sticky='ew')
ttk.Label(fh,text='Height').grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.HEIGHT, width=10).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Lon_0').grid(row=2,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.LON_0, width=10).grid(row=2,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Lat_0').grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.LAT_0, width=10).grid(row=3,column=1,padx=3,sticky='w')
ttk.Label(fh,text='Satellite height').grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(fh,textvariable=self.PLOT.SATELLITE_HEIGHT, width=10).grid(row=4,column=1,padx=3,sticky='w')
'''
self.fh.grid(row=2,column=0,padx=15,sticky='ew')
#EG PAGE 2, Background and Features
f3 = ttk.Frame(page2,borderwidth=5,padding=5)
# Styles
self.sland, self.swater = ttk.Style(), ttk.Style()
self.scoast, self.scount = ttk.Style(), ttk.Style()
self.sriv, scenter = ttk.Style(), ttk.Style()
self.sland.configure("sland.TLabel",background=self.PLOT.LAND_COLOR.get(),anchor="center")
self.swater.configure("swater.TLabel",background=self.PLOT.WATER_COLOR.get(),anchor="center")
self.scoast.configure("scoast.TLabel",background=self.PLOT.COASTLINE_COLOR.get(),anchor="center")
self.scount.configure("scount.TLabel",background=self.PLOT.COUNTRYLINE_COLOR.get(),anchor="center")
self.sriv.configure("sriv.TLabel",background=self.PLOT.RIVERS_COLOR.get(),anchor="center")
scenter.configure("scenter.TEntry",anchor="center")
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
#Land & Sea
f3_b=ttk.LabelFrame(f3,text='Basic',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_b,text='Continents').grid(row=0,column=0,padx=5)
self.LLabel = ttk.Label(f3_b,textvariable=self.PLOT.LAND_COLOR,width=7,style="sland.TLabel")
self.LLabel.grid(row=0,column=1,padx=5)
ttk.Button(f3_b,text='Select',command=lambda:colsel(self.PLOT.LAND_COLOR, \
self.sland,self.LLabel,"sland.TLabel",master=self.Window_mapconfig)). \
grid(row=0,column=2,padx=5,sticky='w')
ttk.Label(f3_b,text='Zorder').grid(row=0,column=3,padx=5,sticky='e')
ttk.Entry(f3_b,textvariable=self.PLOT.LAND_ZORDER,width=4).grid(row=0,column=4,padx=5,sticky='e')
ttk.Label(f3_b,text='Sea').grid(row=1,column=0,padx=5)
self.WLabel = ttk.Label(f3_b,textvariable=self.PLOT.WATER_COLOR,width=7,style="swater.TLabel")
self.WLabel.grid(row=1,column=1,padx=5)
ttk.Button(f3_b,text='Select',command=lambda:colsel(self.PLOT.WATER_COLOR, \
self.swater,self.WLabel,"swater.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=2,padx=5,sticky='w')
ttk.Label(f3_b,text='Zorder').grid(row=1,column=3,padx=5,sticky='e')
ttk.Entry(f3_b,textvariable=self.PLOT.WATER_ZORDER,width=4).grid(row=1,column=4,padx=5,sticky='e')
f3_b.grid(row=0,column=0,padx=5,pady=10,sticky='ewsn')
# Features: Coastlines
f3_c=ttk.LabelFrame(f3,text='Coastlines',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_c,text='').grid(row=0,column=0)
ttk.Checkbutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SHOW). \
grid(row=0,column=1,columnspan=2,padx=3,sticky='w')
ttk.Label(f3_c,text='Width').grid(row=0,column=3)
ttk.Label(f3_c,text='Color').grid(row=0,column=4,columnspan=2)
ttk.Label(f3_c,text='').grid(row=1,column=0)
ttk.Label(f3_c,text='Natural-Earth').grid(row=1,column=1,padx=3,sticky='w')
ttk.Radiobutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SOURCE,value=1).\
grid(row=1,column=2,padx=7)
ttk.Entry(f3_c,textvariable=self.PLOT.COASTLINE_WIDTH,width=7,justify="center"). \
grid(row=1,column=3,padx=3,sticky='we')
self.CoLabel = ttk.Label(f3_c,textvariable=self.PLOT.COASTLINE_COLOR,width=7,style="scoast.TLabel")
self.CoLabel.grid(row=1,column=4,padx=3)
ttk.Button(f3_c,text='Select',command=lambda:colsel(self.PLOT.COASTLINE_COLOR, \
self.scoast,self.CoLabel,"scoast.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=5,padx=3,sticky='ew')
ttk.Label(f3_c,text='EMODNET').grid(row=2,column=1,padx=5,sticky='w')
ttk.Radiobutton(f3_c,text=' Show',variable=self.PLOT.COASTLINE_SOURCE,value=2). \
grid(row=2,column=2,padx=5)
ttk.Label(f3_c,text='Zorder').grid(row=2,column=3,padx=5,sticky='e')
ttk.Entry(f3_c,textvariable=self.PLOT.COASTLINE_ZORDER,width=4).grid(row=2,column=4,padx=5,sticky='e')
f3_c.grid(row=1,column=0,padx=5,pady=10,sticky='ewsn')
# Miscelanea
f3_m=ttk.LabelFrame(f3,text='Miscelanea',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(f3_m,text='Countryline').grid(row=0,column=0,sticky='w')
ttk.Checkbutton(f3_m,text=' Show',variable=self.PLOT.COUNTRYLINE_SHOW). \
grid(row=0,column=1,padx=3)
ttk.Entry(f3_m,textvariable=self.PLOT.COUNTRYLINE_WIDTH,width=7,justify="center"). \
grid(row=0,column=2,padx=3,sticky='we')
self.CouLabel = ttk.Label(f3_m,textvariable=self.PLOT.COUNTRYLINE_COLOR,width=7,style="scount.TLabel")
self.CouLabel.grid(row=0,column=3,padx=3)
ttk.Button(f3_m,text='Select',command=lambda:colsel(self.PLOT.COUNTRYLINE_COLOR, \
self.scount,self.CouLabel,"scount.TLabel",master=self.Window_mapconfig)).\
grid(row=0,column=4,padx=3,sticky='ew')
ttk.Label(f3_m,text='Rivers').grid(row=1,column=0,padx=3,sticky='w')
ttk.Checkbutton(f3_m,text=' Show',variable=self.PLOT.RIVERS_SHOW). \
grid(row=1,column=1,padx=3)
ttk.Entry(f3_m,textvariable=self.PLOT.RIVERS_WIDTH,width=7,justify="center"). \
grid(row=1,column=2,padx=3,sticky='we')
self.RLabel = ttk.Label(f3_m,textvariable=self.PLOT.RIVERS_COLOR,width=7,style="sriv.TLabel")
self.RLabel.grid(row=1,column=3,padx=3)
ttk.Button(f3_m,text='Select',command=lambda:colsel(self.PLOT.RIVERS_COLOR, \
self.sriv,self.RLabel,"sriv.TLabel",master=self.Window_mapconfig)). \
grid(row=1,column=4,padx=3,sticky='ew')
f3_m.grid(row=2,column=0,padx=5,pady=10,sticky='ewsn')
#EG RELIEF AND ISOBATHS
f3_r = ttk.LabelFrame(f3,text='Earth Relief (WMS Tiles)',borderwidth=5,style='tpad.TLabelframe')
ttk.Checkbutton(f3_r,text=' Show',variable=self.PLOT.RELIEF_SHOW). \
grid(row=0,column=0,columnspan=3,padx=3,sticky='w')
ttk.Label(f3_r,text='GEBCO service').grid(row=1,column=0,padx=5,sticky='w')
ttk.Radiobutton(f3_r,text=' Show', variable=self.PLOT.RELIEF, value=1).\
grid(row=1,column=1,padx=3)
ttk.Label(f3_r,text='Land & Ocean relief',width=25). \
grid(row=1,column=2,padx=3)
ttk.Label(f3_r,text='EMODNET service').grid(row=2,column=0,padx=5,pady=10,sticky='w')
ttk.Radiobutton(f3_r ,text=' Show',variable=self.PLOT.RELIEF, value=2). \
grid(row=2,column=1,padx=5,pady=10)
ttk.Label(f3_r,text='Land & Ocean relief',width=25). \
grid(row=2,column=2,padx=3,pady=10)
f3_r.grid(row=3,column=0,padx=5,pady=10,sticky='ewsn')
f3.grid()
#EG PAGE 3, ISOBATHS
f4a=ttk.Frame(page3,borderwidth=5,padding=5)
ttk.Label(f4a,text='EMODNET Depth contours').grid(row=0,column=0,padx=5,pady=10,sticky='w')
ttk.Checkbutton(f4a,text=' Show',variable=self.PLOT.EMODNET_ISO). \
grid(row=0,column=1,padx=5,pady=10,columnspan=2)
f4a.grid(row=0,column=0,pady=10,padx=5,ipadx=5,sticky='w')
f4aa=ttk.LabelFrame(page3,text='Custom Isobaths (meters)',borderwidth=5,padding=5)
ttk.Label(f4aa,text='Path:',justify='right').grid(row=0,column=0)
ttk.Entry(f4aa,textvariable=self.PLOT.ISOBAT_PATH, \
justify='left',width=50).grid(row=0,column=1,padx=3,pady=10)
ttk.Button(f4aa,text='Select',command=_pselect).grid(row=0,column=2)
f4b = tk.LabelFrame(f4aa,text='Isobaths (meters)',borderwidth=5,relief='sunken')
self.w = []
for i in range(self.PLOT.nisobat):
self.w.append(tk.Checkbutton(f4b,text=str(self.PLOT.ISOBAT_Z[i]), \
variable=self.PLOT.ISOBAT_SELEC[i], \
command=select_isobaths,justify='right'))
ii, jj = 0, 1
for i in range(self.PLOT.nisobat):
self.w[i].grid(row=jj,column=ii,sticky='w')
ii += 1
if ii > 7:
ii = 0
jj += 1
wwr = ttk.Label(f4b,width=26,justify='left')
wwr.grid(row=4,column=0,columnspan=3,sticky='w',padx=5)
if self.PLOT.ISOBAT_selected:
if self.PLOT.ISOBAT_loaded:
wwr.configure(font=font_norm)
wwr.configure(foreground='#125704')
wwr['text'] = 'Isobaths have been loaded'
else:
wwr.configure(font=font_bold)
wwr.configure(foreground='red')
wwr['text'] = 'Isobaths need to be loaded'
else:
wwr['text'] = 'No isobaths have been selected'
wwr.configure(font=font_norm)
wwr.configure(foreground='black')
wli = ttk.Button(f4b,text='Load isobaths',command=iload)
wli.grid(row=4,column=3,columnspan=2,padx=3,sticky='ew')
wlr = ttk.Button(f4b,text='Crop isobaths',command=self.isobath_crop)
wlr.grid(row=4,column=5,columnspan=2,padx=3,sticky='ew')
if self.PLOT.ISOBAT_selected:
wli.configure(state='enabled')
else:
wli.configure(state='disabled')
if self.PLOT.ISOBAT_loaded:
wlr.configure(state='enabled')
else:
wlr.configure(state='disabled')
f4b.grid(row=1,column=0,columnspan=3,sticky='we',padx=10)
# ....................
def update_name():
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
wly['textvariable'] = self.PLOT.ISOBAT_STYLE[ii]
wlw['textvariable'] = self.PLOT.ISOBAT_WIDTH[ii]
wlc['textvariable'] = self.PLOT.ISOBAT_COLOR[ii]
# ....................
# Select the style, width and color of isobaths
f4c = tk.Frame(f4aa,borderwidth=5)
ii = self.PLOT.ISOBAT_LABEL.index(self.PLOT.ISOBAT_ZPOINTER.get())
wln = ttk.Combobox(f4c,width=10,justify="center",
textvariable=self.PLOT.ISOBAT_ZPOINTER,
values=self.PLOT.ISOBAT_LABEL)
wln.grid(row=0,column=0, padx=10)
wln.bind('<<ComboboxSelected>>',lambda e: update_name())
ttk.Label(f4c,text='Line style').grid(row=0,column=1,padx=5)
wly = ttk.Combobox(f4c,textvariable=self.PLOT.ISOBAT_STYLE[ii],
width=4,justify="center",
values=['-',':','--','-.',' '])
wly.grid(row=0,column=2,padx=5)
ttk.Label(f4c,text='Line width').grid(row=0,column=3,padx=5)
wlw = ttk.Entry(f4c,textvariable=self.PLOT.ISOBAT_WIDTH[ii],
width=4)
wlw.grid(row=0,column=4)
ttk.Label(f4c,text='Line color').grid(row=0,column=5,padx=3)
wlc = ttk.Entry(f4c, textvariable=self.PLOT.ISOBAT_COLOR[ii],width=10)
wlc.grid(row=0,column=6)
ttk.Button(f4c,text='Select',command=icselection).grid(row=0,column=7)
wls = ttk.Checkbutton(f4c,variable=self.PLOT.ISOBAT_LABEL_SHOW)
wls.grid(row=1, column=6, sticky='e')
# ....................
def cgrad():
R0 = CM.Blues(80)
R1 = CM.Blues(255)
N = self.PLOT.nisobat
Ra = [(R1[0]-R0[0])/(N-1),(R1[1]-R0[1])/(N-1),(R1[2]-R0[2])/(N-1),1]
for i in range(N):
self.PLOT.ISOBAT_COLOR[i].set([R0[0]+Ra[0]*i,
R0[1]+Ra[1]*i,
R0[2]+Ra[2]*i,
1])
# ....................
ttk.Button(f4c,text='Color grad',command=cgrad).grid(row=1,column=5,padx=3)
ttk.Label(f4c,text='Label isobaths').grid(row=1,column=7,sticky='w')
f4c.grid(row=2,column=0,columnspan=3,padx=10)
f4aa.grid(row=1,column=0,pady=10,padx=5,ipadx=5)
#EG PAGE 4
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
self.sgrid, self.sfgrid = ttk.Style(), ttk.Style()
self.sgrid.configure("sgrid.TLabel",background=self.PLOT.GRID_COLOR.get(),anchor="center")
self.sfgrid.configure("sfgrid.TLabel",background=self.PLOT.GRID_FONTCOLOR.get(),anchor="center")
f5 = ttk.Frame(page4,padding=5)
ttk.Label(f5,text='Show grid').grid(row=0,column=1,padx=3,sticky='e')
ttk.Checkbutton(f5,variable=self.PLOT.GRID_SHOW,command=self.make_plot) \
.grid(row=0,column=2,padx=3,sticky='w')
ttk.Label(f5,text='Meridians',font=font_bold).grid(row=1,column=0,sticky='w')
ttk.Label(f5,text='Initial').grid(row=2,column=1,sticky='w')
wxo = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_INI,justify='left',width=8)
wxo.grid(row=2,column=2)
ttk.Label(f5,text='Final').grid(row=3,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_FIN,justify='left',width=8)
wdx.grid(row=3,column=2)
ttk.Label(f5,text='Interval').grid(row=4,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.MERIDIAN_INT,justify='left',width=8)
wdx.grid(row=4,column=2)
ttk.Checkbutton(f5,text='North',
variable=self.PLOT.GRID_NORTH).grid(row=2,
column=3,padx=6)
ttk.Checkbutton(f5,text='South',
variable=self.PLOT.GRID_SOUTH).grid(row=3,
column=3,padx=6)
ttk.Label(f5,text='Parallels',font=font_bold).grid(row=5,column=0,sticky='w')
ttk.Label(f5,text='Initial').grid(row=6,column=1,sticky='w')
wxo = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_INI,justify='left',width=8)
wxo.grid(row=6,column=2)
ttk.Label(f5,text='Final').grid(row=7,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_FIN,justify='left',width=8)
wdx.grid(row=7,column=2)
ttk.Label(f5,text='Interval').grid(row=8,column=1,sticky='w')
wdx = ttk.Entry(f5,textvariable=self.PLOT.PARALLEL_INT,justify='left',width=8)
wdx.grid(row=8,column=2)
ttk.Checkbutton(f5,text='West',
variable=self.PLOT.GRID_WEST).grid(row=6,
column=3,padx=6)
ttk.Checkbutton(f5,text='East',
variable=self.PLOT.GRID_EAST).grid(row=7,
column=3,padx=6)
ttk.Label(f5,text='Configuration',font=font_bold) \
.grid(row=10,column=0,sticky='w')
ttk.Label(f5,text='Character Size').grid(row=11,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_SIZE,justify='left',width=8) \
.grid(row=11,column=2)
ttk.Label(f5,text='Font Color').grid(row=12,column=1,sticky='w')
self.Glabel = ttk.Label(f5,textvariable=self.PLOT.GRID_COLOR,style="sgrid.TLabel",width=8)
self.Glabel.grid(row=12,column=2,padx=3)
ttk.Button(f5,text='Select',command=lambda:colsel(self.PLOT.GRID_COLOR, \
self.sgrid,self.Glabel,"sgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=12,column=3,padx=3)
ttk.Label(f5,text='Line Width').grid(row=13,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_LINEWIDTH,justify='left',width=8) \
.grid(row=13,column=2)
ttk.Label(f5,text='Line Style').grid(row=14,column=1,sticky='w')
ttk.Combobox(f5,textvariable=self.PLOT.GRID_LINESTYLE,
justify='left',
#EG values=['',' ','None','--','-.','-',':'],width=8) \
values=['None','--','-.','-',':'],width=8) \
.grid(row=14,column=2)
ttk.Label(f5,text='Line alpha').grid(row=15,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_ALPHA,justify='left',width=8) \
.grid(row=15,column=2)
ttk.Label(f5,text='Line color').grid(row=16,column=1,sticky='w')
self.GFlabel = ttk.Label(f5,textvariable=self.PLOT.GRID_FONTCOLOR,style="sfgrid.TLabel",width=8)
self.GFlabel.grid(row=16,column=2,padx=3)
ttk.Button(f5,text='Select',command=lambda:colsel(self.PLOT.GRID_FONTCOLOR, \
self.sgrid,self.GFlabel,"sfgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=16,column=3,padx=3)
ttk.Label(f5,text='Zorder').grid(row=17,column=1,sticky='w')
ttk.Entry(f5,textvariable=self.PLOT.GRID_ZORDER,justify='left',width=8) \
.grid(row=17,column=2)
f5.grid()
f6 = ttk.Frame(page5,borderwidth=5,padding=5)
self.stsgrid= ttk.Style()
self.stsgrid.configure("stsgrid.TLabel",background=self.PLOT.TIMESTAMP_COLOR.get(),anchor="center")
ttk.Label(f6,text='Title').grid(row=1,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TITLE,width=40). \
grid(row=1,column=1,columnspan=4,sticky='w')
def titleprop0():
self.PLOT.TITLEFONT = fontconfig(font=self.PLOT.TITLEFONT,
sample=self.PLOT.TITLE.get())
#ttk.Label(f6,text='Title font').grid(row=2,
# column=0,
# columnspan=1,
# sticky='w')
ttk.Button(f6,text='Set font',command=titleprop0).grid(row=1,column=5,padx=5,sticky='ew')
#ttk.Checkbutton(f6,text='Bold',variable=self.PLOT.TITLE_BOLD). \
# grid(row=1,column=5)
#ttk.Label(f6,text='Size').grid(row=2,column=0,columnspan=1,sticky='w')
#ttk.Entry(f6,textvariable=self.PLOT.TITLE_SIZE,width=7). \
# grid(row=2,column=1,sticky='w')
ttk.Label(f6,text='Title Pad').grid(row=2,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TITLE_PAD,width=7). \
grid(row=2,column=1,sticky='w')
ttk.Label(f6,text='X label').grid(row=4,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.XLABEL,width=40). \
grid(row=4,column=1,columnspan=4,sticky='w')
ttk.Label(f6,text='Y label').grid(row=5,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.YLABEL,width=40). \
grid(row=5,column=1,columnspan=4,sticky='w')
ttk.Label(f6,text='Size').grid(row=6,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.LABEL_SIZE,width=5). \
grid(row=6,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='X Label Pad'). \
grid(row=7,column=0,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.XLABEL_PAD,width=5). \
grid(row=7,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='Y Label Pad'). \
grid(row=7,column=3,columnspan=1,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.YLABEL_PAD,width=5). \
grid(row=7,column=4,columnspan=1,sticky='w')
#ttk.Label(f6,text='Plot logo'). \
# grid(row=8,column=0,sticky='w')
#ttk.Checkbutton(f6,variable=self.PLOT.LOGO_DISPLAY). \
# grid(row=8,column=1,sticky='w',padx=3)
ttk.Label(f6,text='Timestamp'). \
grid(row=9,column=0,sticky='w',pady=[15,1])
ttk.Checkbutton(f6,text='Show',variable=self.PLOT.TIMESTAMP_SHOW). \
grid(row=10,column=1,sticky='w')
ttk.Checkbutton(f6,text='Bold',variable=self.PLOT.TIMESTAMP_BOLD). \
grid(row=11,column=1,sticky='w')
def getlabelpos():
# ================
self.GET_TIMESTAMP_LOCATION = True
ttk.Label(f6,text='X pos'). \
grid(row=12,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_X,width=12). \
grid(row=12,column=1,sticky='w')
ttk.Button(f6,text='Select',command=getlabelpos).grid(row=12,column=2)
ttk.Label(f6,text='Y pos'). \
grid(row=13,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_Y,width=12). \
grid(row=13,column=1,columnspan=1,sticky='w')
ttk.Label(f6,text='Size'). \
grid(row=14,column=0,sticky='w')
ttk.Entry(f6,textvariable=self.PLOT.TIMESTAMP_SIZE,width=5). \
grid(row=14,column=1,sticky='w')
ttk.Label(f6,text='Color').grid(row=15,column=0,sticky='w')
self.GTSlabel = ttk.Label(f6,textvariable=self.PLOT.TIMESTAMP_COLOR,style="stsgrid.TLabel",width=8)
self.GTSlabel.grid(row=15,column=1,sticky='w')
ttk.Button(f6,text='Select',command=lambda:colsel(self.PLOT.TIMESTAMP_COLOR, \
self.stsgrid,self.GTSlabel,"stsgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=15,column=2,sticky='w')
f6.grid()
# ---------------------------------------------
def center():
SOUTH = float(self.PLOT.SOUTH.get())
NORTH = float(self.PLOT.NORTH.get())
WEST = float(self.PLOT.WEST.get())
EAST = float(self.PLOT.EAST.get())
self.PLOT.SCALE_XO.set(0.5*(WEST+EAST))
self.PLOT.SCALE_YO.set(0.5*(SOUTH+NORTH))
# ---------------------------------------------
fs = ttk.Frame(page7,borderwidth=5,padding=5)
ttk.Label(fs,text='Show').grid(row=0,column=0,padx=3)
ttk.Checkbutton(fs,variable=self.PLOT.SCALE_SHOW).grid(row=0,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='LON = ').grid(row=1,column=0,padx=3,sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_X,
# width=10).grid(row=1,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='LAT = ').grid(row=2,column=0,padx=3,sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_Y,
# width=10).grid(row=2,column=1,padx=3,sticky='w')
#ttk.Label(fs,
# text='Map position where Scale will be drawn').grid(row=1,
# column=2,rowspan=2,columnspan=2,padx=3,pady=5)
ttk.Label(fs,text='xo = ').grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_XO,
width=10).grid(row=3,column=1,padx=3,sticky='w')
ttk.Label(fs,text='yo = ').grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_YO,
width=10).grid(row=4,column=1,padx=3,sticky='w')
ttk.Label(fs,
text='Screen position where scale will be drawn').grid(row=3,
column=2,rowspan=2,columnspan=2,padx=3,pady=5)
#ttk.Button(fs,text='Map center',command=center).grid(row=3,column=4,
# rowspan=2,padx=3)
ttk.Label(fs,text='Length = ').grid(row=5,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LENGTH,
width=10).grid(row=5,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Units = ').grid(row=6,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_UNITS,
# values=['km','mi','nmi','ft','m'],
# width=10).grid(row=6,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Bar style = ').grid(row=7,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_STYLE,
# values=['simple','fancy'],
# width=10).grid(row=7,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Yoffset = ').grid(row=8,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_YOFFSET,
# width=10).grid(row=8,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Default: 0.02*(MAXLAT-MINLAT)').grid(row=8,
# column=2,columnspan=2,padx=3,sticky='w')
#ttk.Label(fs,text='Label style = ').grid(row=9,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Combobox(fs,textvariable=self.PLOT.SCALE_LABELSTYLE,
# values=['simple','fancy'],
# width=10).grid(row=9,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Font size = ').grid(row=10,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_FONTSIZE,
width=10).grid(row=10,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Font color = ').grid(row=11,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_FONTCOLOR,
width=10).grid(row=11,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Format = ').grid(row=12,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FORMAT,
# width=10).grid(row=12,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Line width = ').grid(row=13,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LINEWIDTH,
width=10).grid(row=13,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Line color = ').grid(row=14,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_LINECOLOR,
width=10).grid(row=14,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Fill color 1 = ').grid(row=15,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FILLCOLOR1,
# width=10).grid(row=15,column=1,padx=3,sticky='w')
#ttk.Label(fs,text='Fill color 2 = ').grid(row=16,column=0,padx=3,
# pady=[5,1],sticky='e')
#ttk.Entry(fs,textvariable=self.PLOT.SCALE_FILLCOLOR2,
# width=10).grid(row=16,column=1,padx=3,sticky='w')
ttk.Label(fs,text='Zorder = ').grid(row=17,column=0,padx=3,
pady=[5,1],sticky='e')
ttk.Entry(fs,textvariable=self.PLOT.SCALE_ZORDER,
width=10).grid(row=17,column=1,padx=3,sticky='w')
fs.grid()
f8 = ttk.Frame(page8,borderwidth=5,padding=5)
self.obgrid, self.otgrid = ttk.Style(),ttk.Style()
self.obgrid.configure("obgrid.TLabel",background=self.PLOT.FIGURE_COLOR.get(),anchor="center")
self.otgrid.configure("otgrid.TLabel",background=self.PLOT.TEXT_COLOR.get(),anchor="center")
ttk.Label(f8,text='Map dots per inch (DPI): ').grid(row=0,column=0,sticky='w')
ttk.Entry(f8,textvariable=self.PLOT.DPI,width=10).grid(row=0,column=1,sticky='w')
ttk.Label(f8,text='Map window size: ').grid(row=1,column=0,sticky='w')
size = ttk.Entry(f8,textvariable=PSIZE,width=30)
size.grid(row=1,column=1,columnspan=3,sticky='w')
size.bind("<Return>",lambda f: sizeupdate())
ttk.Label(f8,text='(It will close current map) ').grid(row=1,column=4,sticky='w')
ttk.Label(f8,text='Font style').grid(row=2,column=0,sticky='w')
ttk.Combobox(f8,textvariable=self.PLOT.MAP_FONT_TYPE, \
values=self.FONT_TYPES,width=30).grid(row=2,column=1, \
columnspan=3, \
padx=3,sticky='w')
ttk.Label(f8,text='Background color').grid(row=3,column=0,sticky='w')
self.OBlabel = ttk.Label(f8,textvariable=self.PLOT.FIGURE_COLOR,style="obgrid.TLabel",width=8)
self.OBlabel.grid(row=3,column=1,padx=3)
ttk.Button(f8,text='Select',command=lambda:colsel(self.PLOT.FIGURE_COLOR, \
self.obgrid,self.OBlabel,"obgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=3,column=2,padx=3)
ttk.Label(f8,text='Text color').grid(row=4,column=0,sticky='w')
self.OTlabel = ttk.Label(f8,textvariable=self.PLOT.TEXT_COLOR,style="otgrid.TLabel",width=8)
self.OTlabel.grid(row=4,column=1,padx=3)
ttk.Button(f8,text='Select',command=lambda:colsel(self.PLOT.TEXT_COLOR, \
self.otgrid,self.OTlabel,"otgrid.TLabel",master=self.Window_mapconfig)). \
grid(row=4,column=2,padx=3)
f8.grid()
maptabs.grid()
frame5 = ttk.Frame(self.Window_mapconfig,borderwidth=5,padding=5)
ttk.Button(frame5,text='Cancel',command=_cancel).grid(row=0,column=4,padx=3)
ttk.Button(frame5,text='Apply',command=_apply).grid(row=0,column=5,padx=3)
ttk.Button(frame5,text='Close',command=_done).grid(row=0,column=6,padx=3)
frame5.grid(row=24,column=0,columnspan=5)
# ====================
def logo_config(self):
# ====================
def _close():
# ===========
self.Window_logo.destroy()
self.Window_logo = None
self.make_plot()
def new_logo():
# =============
nn = tk.filedialog.askopenfile()
if not empty(nn.name):
self.PLOT.LOGO_FILE.set(nn.name)
self.PLOT.LOGO_IMAGE = image.imread(self.PLOT.LOGO_FILE.get())
self.make_plot()
def _loadconf():
# =============
'''Load map configuration'''
cfilename = COSMO_CONF + 'drawing.conf'
try:
# Read configuration
with open(cfilename) as infile:
conf = json.load(infile)
self.PLOT.LOGO_FILE.set(conf['LOGO_FILE'])
self.PLOT.LOGO_ZOOM.set(conf['LOGO_ZOOM'])
self.PLOT.LOGO_LOCATION.set(conf['LOGO_LOCATION'])
self.PLOT.LOGO_X.set(conf['LOGO_X'])
self.PLOT.LOGO_Y.set(conf['LOGO_Y'])
self.PLOT.LOGO_IMAGE = image.imread(self.PLOT.LOGO_FILE.get())
except:
toconsola('Cannot read default configuration file '+cfilename,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Save map configuration'''
cfilename = COSMO_CONF + 'drawing.conf'
try:
# Read configuration
with open(cfilename) as infile:
conf = json.load(infile)
conf['LOGO_FILE'] = self.PLOT.LOGO_FILE.get()
conf['LOGO_ZOOM'] = self.PLOT.LOGO_ZOOM.get()
conf['LOGO_LOCATION'] = self.PLOT.LOGO_LOCATION.get()
conf['LOGO_X'] = self.PLOT.LOGO_X.get()
conf['LOGO_Y'] = self.PLOT.LOGO_Y.get()
# Write JSON file:
with io.open(cfilename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
toconsola("New default values saved in file "+cfilename,wid=self.cons)
except:
toconsola('Cannot open default configuration file '+cfilename,wid=self.cons)
# Main Window
# ============
if self.Window_logo is None:
self.Window_logo = tk.Toplevel(self.master)
self.Window_logo.title("Logo configuration")
self.Window_logo.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_logo.lift()
return
menubar = tk.Menu(self.Window_logo)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Default configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_logo.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_logo.tk.call(master, "config", "-menu", menubar)
F0 = ttk.Frame(self.Window_logo,borderwidth=5,padding=5)
ttk.Label(F0,text='Plot logo'). \
grid(row=0,column=1,sticky='w')
ttk.Checkbutton(F0,variable=self.PLOT.LOGO_DISPLAY). \
grid(row=0,column=2,sticky='w',padx=3)
ttk.Label(F0,text='File', \
font='Helvetica 12 bold').grid(row=1,column=0,sticky='w')
le = ttk.Entry(F0,textvariable=self.PLOT.LOGO_FILE, \
justify='left',width=30)
le.grid(row=1,column=1,columnspan=5,sticky='w')
le.bind('<<ComboboxSelected>>',lambda e: new_logo())
ttk.Button(F0,text='Open', \
command=new_logo).grid(row=1,column=6,sticky='w')
ttk.Label(F0,text='Zoom', \
font='Helvetica 12 bold').grid(row=2,column=0,sticky='w')
ttk.Entry(F0,textvariable=self.PLOT.LOGO_ZOOM, \
justify='left',width=8).grid(row=2,column=1,sticky='w')
ttk.Label(F0,text='Location', \
font='Helvetica 12 bold').grid(row=3,column=0,sticky='w')
ttk.Radiobutton(F0,text='SW',variable=self.PLOT.LOGO_LOCATION,\
value='SW').grid(row=4,column=1,sticky='w')
ttk.Radiobutton(F0,text='NW',variable=self.PLOT.LOGO_LOCATION,\
value='NW').grid(row=5,column=1,sticky='w')
ttk.Radiobutton(F0,text='NE',variable=self.PLOT.LOGO_LOCATION,\
value='NE').grid(row=6,column=1,sticky='w')
ttk.Radiobutton(F0,text='SE',variable=self.PLOT.LOGO_LOCATION,\
value='SE').grid(row=7,column=1,sticky='w')
ttk.Radiobutton(F0,text='Other',variable=self.PLOT.LOGO_LOCATION,\
value='OTHER').grid(row=8,column=1,sticky='w')
lx = ttk.Entry(F0,textvariable=self.PLOT.LOGO_X,\
justify='left',width=7)
lx.grid(row=8,column=2,sticky='w')
ly = ttk.Entry(F0,textvariable=self.PLOT.LOGO_Y,\
justify='left',width=7)
ly.grid(row=8,column=3,sticky='w')
ttk.Button(F0,text='Apply',command=_close,padding=5).grid(row=9,column=6)
F0.grid()
# ==================
def plot_logo(self):
# ==================
'''Add a logo in the plot'''
im = OffsetImage(self.PLOT.LOGO_IMAGE,zoom=self.PLOT.LOGO_ZOOM.get())
if self.PLOT.LOGO_LOCATION.get() == 'SW':
xx = self.PLOT.WEST.get()
yy = self.PLOT.SOUTH.get()
ba = (0,0)
elif self.PLOT.LOGO_LOCATION.get() == 'NW':
xx = self.PLOT.WEST.get()
yy = self.PLOT.NORTH.get()
ba = (0,1)
elif self.PLOT.LOGO_LOCATION.get() == 'NE':
xx = self.PLOT.EAST.get()
yy = self.PLOT.NORTH.get()
ba = (1,1)
elif self.PLOT.LOGO_LOCATION.get() == 'SE':
xx = self.PLOT.EAST.get()
yy = self.PLOT.SOUTH.get()
ba = (1,0)
else:
xx = self.PLOT.LOGO_X.get()
yy = self.PLOT.LOGO_Y.get()
ba = (0,0)
self.ab = AnnotationBbox(im,[xx,yy], xycoords='data', \
box_alignment=ba,pad=0.0,frameon=True)
# box_alignment=ba,pad=0.0,frameon=True,zorder=100)
self.with_logo = self.ax.add_artist(self.ab)
# =====================
def clm(self):
# =====================
'''Options to launch the COSMO Lagrangian Model'''
self.CLM.west.set(self.PLOT.WEST.get())
self.CLM.east.set(self.PLOT.EAST.get())
self.CLM.south.set(self.PLOT.SOUTH.get())
self.CLM.north.set(self.PLOT.NORTH.get())
try:
self.CLM.do.set(self.DATE[0])
except:
self.CLM.do.set(datetime.datetime.now())
def _close():
# ===========
self.Window_clm.destroy()
self.Window_clm = None
def _run(options):
# ================
if self.CLM.script.get():
now = datetime.datetime.now()
soptions = '# COSMO Lagrangian Model options generated by cosmo-view.\n# %s' % now + options
soptions = soptions.replace('-OU','\n-OU')
soptions = soptions.replace('-OV','\n-OV')
soptions = soptions.replace('-trajectory','\n-trajectory')
soptions = soptions.replace('-final','\n-final')
soptions = soptions.replace('-end','\n-end')
soptions = soptions.replace('-xo','\n-xo')
soptions = soptions.replace('-yo','\n-yo')
soptions = soptions.replace('-zo','\n-zo')
soptions = soptions.replace('-to','\n-to')
soptions = soptions.replace('-release','\n-release')
soptions = soptions.replace('-from','\n-from')
soptions = soptions.replace('-for','\n-for')
soptions = soptions.replace('-dt','\n-dt')
soptions = soptions.replace('-alpha','\n-alpha')
soptions = soptions.replace('-mu','\n-mu')
soptions = soptions.replace('-va','\n-va')
soptions = soptions.replace('-xmin','\n-xmin')
soptions = soptions.replace('-xmax','\n-xmax')
soptions = soptions.replace('-ymin','\n-ymin')
soptions = soptions.replace('-ymax','\n-ymax')
soptions = soptions.replace('-random','\n-random')
soptions = soptions.replace('-Rx','\n-Rx')
soptions = soptions.replace('-Ry','\n-Ry')
soptions = soptions.replace('-reverse','\n-reverse')
ofile = open(self.CLM.SFILE.get(), "w")
a = ofile.write(soptions)
ofile.close()
command = self.CLM.PATH.get() + \
self.CLM.BIN.get()
command += options
toconsola(command,wid=self.cons)
#print(command)
os.system(command)
if os.path.isfile(self.CLM.TRAJECTORY.get()):
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE,wid=self.cons)
FLT.Read(self.CLM.TRAJECTORY.get())
if FLT is None:
return
# FLT.TIME = np.array([(FLT.date[i].replace(tzinfo=None)-\
# self.DATE[0]).total_seconds() \
# for i in range(FLT.nrecords)])
FLT.MAPX = []
FLT.MAPY = []
if FLT.nfloats > 1:
for i in range(FLT.nfloats):
f = interpolate.interp1d(FLT.TIME,FLT.lon[:,i], bounds_error=False, fill_value=np.NaN)
FLT.MAPX.append(f(self.TIME))
f = interpolate.interp1d(FLT.TIME,FLT.lat[:,i], bounds_error=False, fill_value=np.NaN)
FLT.MAPY.append(f(self.TIME))
FLT.MAPX = np.array(FLT.MAPX).T.tolist()
FLT.MAPY = np.array(FLT.MAPY).T.tolist()
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
nt = len(FLT.TIME)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(FLT.FILENAME.get())
#self.FILETYPES.append('FLOAT')
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(0)
#self.FILEORDER.append(self.nfloat-1)
self.make_plot()
else:
messagebox.showinfo(message='COSMO Lagrangian Model failed')
def _help():
# ==========
options = ' --help'
_run(options)
def _run_single():
# ================
options = clm.Basic_options(self.CLM)
if empty(options):
return
_run(options)
def _run_ensemble():
# ==================
options = clm.Basic_options(self.CLM)
try:
aa = ' -random %s' % self.CLM.nfloats.get()
options += aa
except:
pass
try:
aa = ' -Rx %s' % self.CLM.Rx.get()
options += aa
except:
pass
try:
aa = ' -Ry %s' % self.CLM.Ry.get()
options += aa
except:
pass
_run(options)
# -------------------------
# Main CLM Window
# -------------------------
if self.nvec == 0:
messagebox.showinfo(message='No file with ocean currents has ben opened yet')
return
if self.Window_clm is not None:
self.Window_clm.lift()
return
# Copy the VEC information to the CLM class
#
self.CLM.VEC = self.VEC
string = self.DATE[self.L.get()]
try:
self.CLM.do.set(string.replace(' ','T'))
except:
pass
self.CLM.to.set(self.TIME[self.L.get()]-self.TIME[0])
lini = self.L.get() + 1
if lini < 2:
lini = 2
self.CLM.Lini.set(lini)
self.CLM.Tini.set(self.TIME[lini])
self.CLM.Dini.set(self.DATE[lini])
self.Window_clm = tk.Toplevel(self.master)
self.Window_clm.title('COSMO Lagrangian Model options')
self.Window_clm.resizable(width=True,height=True)
self.Window_clm.protocol('WM_DELETE_WINDOW',_close)
clm.WinConfig(self.Window_clm,self.CLM,self.TIME,self.DATE)
F0 = ttk.Frame(self.Window_clm,padding=5)
#ttk.Checkbutton(F0,text='Reverse Run',variable=self.CLM.reverse). \
# grid(row=0,column=1,padx=5)
ttk.Checkbutton(F0,text='Save options',variable=self.CLM.script). \
grid(row=0,column=1,padx=5)
ttk.Button(F0,text='Run Single',command=_run_single).grid(row=0,column=2,padx=5)
ttk.Button(F0,text='Run Ensemble',command=_run_ensemble).grid(row=0,column=3,padx=5)
ttk.Button(F0,text='Run Help',command=_help).grid(row=0,column=4,padx=5)
F0.grid()
# ==================
def make_anim(self):
# ==================
''' Launch the matplotlib animation'''
# -----------
def _close():
# -----------
self.Window_anim.destroy()
self.Window_anim = None
def _done():
# ----------
L_Backup = self.L.get()
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=self.PLOT.VIDEO_TITLE.get(),
artist=self.PLOT.VIDEO_AUTHOR.get(),
comment=self.PLOT.VIDEO_COMMENT.get())
writer = FFMpegWriter(fps=self.PLOT.VIDEO_FPS.get(),metadata=metadata)
with writer.saving(self.Mfig,self.PLOT.VIDEO_NAME.get(),self.PLOT.VIDEO_DPI.get()):
for L in range(self.PLOT.VIDEO_L1.get(),self.PLOT.VIDEO_L2.get()+1):
self.L.set(L)
self.PLOT.TLABEL.set(self.DATE[L])
print('L = ', L)
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(update_lims=False,wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(update_lims=False,wid=self.cons)
self.make_Mplot()
writer.grab_frame()
messagebox.showinfo(parent=self.Window_anim,message='Movie has been saved')
self.L.set(L_Backup)
def _loadconf():
# -------------
'''Load ANIM configuration'''
toconsola('Retrieving VIDEO defaults.',wid=self.cons)
#print('Retrieving VIDEO defaults.')
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
self.PLOT.VIDEO_NAME.set(conf['VIDEO_NAME'])
self.PLOT.VIDEO_TITLE.set(conf['VIDEO_TITLE'])
self.PLOT.VIDEO_AUTHOR.set(conf['VIDEO_AUTHOR'])
self.PLOT.VIDEO_COMMENT.set(conf['VIDEO_COMMENT'])
self.PLOT.VIDEO_FPS.set(conf['VIDEO_FPS'])
self.PLOT.VIDEO_DPI.set(conf['VIDEO_DPI'])
def _saveconf():
# -------------
'''Save ANIM configuration'''
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
toconsola('Updating VIDEO defaults.',wid=self.cons)
#print('Updating VIDEO defaults.')
conf['VIDEO_NAME'] = self.PLOT.VIDEO_NAME.get()
conf['VIDEO_TITLE'] = self.PLOT.VIDEO_TITLE.get()
conf['VIDEO_AUTHOR'] = self.PLOT.VIDEO_AUTHOR.get()
conf['VIDEO_COMMENT'] = self.PLOT.VIDEO_COMMENT.get()
conf['VIDEO_FPS'] = self.PLOT.VIDEO_FPS.get()
conf['VIDEO_DPI'] = self.PLOT.VIDEO_DPI.get()
with io.open(self.PLOT.FILECONF,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# Main
# ----
if self.LAYERS.n == 0:
messagebox.showinfo(message='No layers have been added')
return
if self.Window_anim is not None:
self.Window_anim.lift()
return
self.Window_anim = tk.Toplevel(self.master)
self.Window_anim.title('Animation creation')
self.Window_anim.resizable(width=True,height=True)
self.Window_anim.protocol('WM_DELETE_WINDOW',_close)
# Menu:
# AAA
menubar = tk.Menu(self.Window_anim)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_anim.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_anim.tk.call(self.Window_anim, "config", "-menu", menubar)
# Widgets
#
F0 = ttk.Frame(self.Window_anim,borderwidth=5,padding=5)
ttk.Label(F0,text='Output filename : ').grid(row=0,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_NAME,width=40).grid(row=0,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Video title : ').grid(row=1,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_TITLE,width=40).grid(row=1,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Author : ').grid(row=2,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_AUTHOR,width=40).grid(row=2,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Comment : ').grid(row=3,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_COMMENT,width=40).grid(row=3,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Initial frame : ').grid(row=4,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_L1,width=7).grid(row=4,column=1,sticky='w')
ttk.Label(F0,text='Final frame : ').grid(row=5,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_L2,width=7).grid(row=5,column=1,sticky='w')
ttk.Label(F0,text='FPS : ').grid(row=6,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_FPS,width=7).grid(row=6,column=1,sticky='w')
ttk.Label(F0,text='DPI : ').grid(row=7,column=0)
ttk.Entry(F0,textvariable=self.PLOT.VIDEO_DPI,width=7).grid(row=7,column=1,sticky='w')
done = ttk.Button(F0,text='Do it',command=_done)
done.grid(row=8,column=3,padx=3)
done.bind("<Return>",lambda e:_done())
close = ttk.Button(F0,text='Close',command=_close)
close.grid(row=8,column=4,padx=3)
close.bind("<Return>",lambda e:_close())
F0.grid()
F1 = ttk.Frame(self.Window_anim,borderwidth=5,padding=5)
self.Mfig = Figure(figsize=self.PLOT.SIZE,dpi=self.PLOT.DPI.get())
#EG Projection
projection = self.PLOT.MAP_PROJECTION.get()
proj = map_proj(projection)
self.Max = self.Mfig.add_subplot(111, projection=proj['proj'])
self.Mcanvas = FigureCanvasTkAgg(self.Mfig, master=F1)
self.Mcanvas.draw()
self.Mcanvas.get_tk_widget().grid(row=0,column=0,columnspan=11,sticky='wn')
self.Mdrawmap = True
F1.grid()
self.make_Mplot(proj=proj['proj'])
# =========================
def DepthandDate(self,CDF):
# =========================
'''Fill the lists: K_LIST, L_LIST, Z_LIST, T_LIST and DATE'''
CDF.K.set(0) # Default layer
CDF.L.set(0) # Default time step
CDF.K_LIST = list(range(CDF.FLD.icdf.nz))
CDF.L_LIST = list(range(CDF.FLD.icdf.nt))
# Depth selector
if CDF.FLD.icdf.idk > -1:
if self.PLOT.GEOMAP.get():
wrk = CDF.FLD.nc.variables[CDF.FLD.icdf.zname][:]
CDF.Z_LIST = list(wrk)
toconsola(str(CDF.Z_LIST),wid=self.cons)
#print(CDF.Z_LIST)
else:
CDF.Z_LIST = np.arange(CDF.FLD.icdf.nz)
else:
CDF.Z_LIST = []
# Time selector and TIME and DATE values
CDF.DATE = []
if CDF.FLD.icdf.idl > -1:
wrk = CDF.FLD.nc.variables[CDF.FLD.icdf.tname][:]
CDF.T_LIST = list(wrk)
try:
for i in range(CDF.FLD.icdf.nt):
CDF.DATE.append(num2date(CDF.T_LIST[i], \
units=CDF.FLD.icdf.time_units, \
calendar=CDF.FLD.icdf.time_calendar))
except:
for i in range(CDF.FLD.icdf.nt):
CDF.DATE.append(i)
try:
CDF.TIME = np.array([(CDF.DATE[i]-CDF.DATE[0]).total_seconds() \
for i in range(CDF.FLD.icdf.nt)])
except:
CDF.TIME = np.array([(CDF.DATE[i]-CDF.DATE[0]) \
for i in range(CDF.FLD.icdf.nt)])
else:
CDF.T_LIST = []
CDF.DATE = [' ']
CDF.TIME = np.array([0])
# # ====================================
# def read_lonlat(self,CDF,xname,yname):
# # ====================================
# '''Read 1D/2D lon lat grid '''
#
# if CDF.icdf.georef:
# vlon = CDF.ncid.variables[xname]
# vlat = CDF.ncid.variables[yname]
# toconsola(str(vlon),wid=self.cons)
# toconsola(str(vlat),wid=self.cons)
# else:
# toconsola('Georef is False',wid=self.cons)
# #print('Georef is False')
# self.PLOT.GEOMAP.set(False)
# vlon = np.arange(CDF.icdf.nx)
# vlat = np.arange(CDF.icdf.ny)
#
# CDF.lon = vlon[:].copy()
# CDF.lat = vlat[:].copy()
# if len(vlon.shape) == 1:
# CDF.xx,CDF.yy = np.meshgrid(CDF.lon,CDF.lat)
# else:
# CDF.xx = vlon[:].copy()
# CDF.yy = vlat[:].copy()
# # ====================
# def read_UV(self,VEC):
# # ====================
# '''Read 2D velocity data according to user selections'''
# #K = self.K.get()
# #L = self.L.get()
# K = VEC.K.get()
# L = VEC.L.get()
# uname = '%s' % VEC.uname.get()
# vname = '%s' % VEC.vname.get()
# ndim = VEC.icdf.ndims[VEC.uid]
#
# #VEC.K.set(K)
# #VEC.L.set(L)
#
# if ndim == 2:
# VEC.VEL.u = VEC.ncid.variables[uname][:,:]
# VEC.VEL.v = VEC.ncid.variables[vname][:,:]
# elif ndim == 3:
# if VEC.icdf.ppl[VEC.uid] > -1:
# VEC.VEL.u = VEC.ncid.variables[uname][L,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][L,:,:].squeeze()
# elif VEC.icdf.ppk[VEC.uid] > -1:
# VEC.VEL.u = VEC.ncid.variables[uname][K,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][K,:,:].squeeze()
# else:
# toconsola('Invalid file!',wid=self.cons)
# print('Invalid file!')
# return
# elif ndim == 4:
# VEC.VEL.u = VEC.ncid.variables[uname][L,K,:,:].squeeze()
# VEC.VEL.v = VEC.ncid.variables[vname][L,K,:,:].squeeze()
# else:
# toconsola("Invalid number of dimensions, "+str(ndim),wid=self.cons)
# #print('Invalid number of dimensions, '+str(ndim))
#
# _u = VEC.VEL.u.copy()
# _v = VEC.VEL.v.copy()
# msku = ma.getmask(VEC.VEL.u)
# mskv = ma.getmask(VEC.VEL.v)
# msk = ma.mask_or(msku,mskv)
# VEC.VEL.u = ma.array(_u,mask=msk).copy()
# VEC.VEL.v = ma.array(_v,mask=msk).copy()
# #VEC.VEL.speed = np.sqrt(VEC.VEL.u**2+VEC.VEL.v**2)
# #VEC.VEL.F = interpolate.interp2d(VEC.lon, \
# # VEC.lat, \
# # VEC.VEL.speed)
#
# # ===========================================
# #def read_Field(self,FIELD,ncid,icdf,sid,K,L):
# # ===========================================
# # ===========================================
# def read_CDF(self,CDF,update_lims=True):
# # ===========================================
# '''Read 2D data according to user selections'''
#
# # self.read_Field(self.CDF[ii].FIELD, \
# # self.CDF[ii].ncid, \
# # self.CDF[ii].icdf, \
# # self.CDF[ii].varid, \
# # self.CDF[ii].K.get(), \
# # self.CDF[ii].L.get())
# if CDF.varid < 0:
# CDF.FLD.data = None
# return
#
# K = CDF.K.get()
# L = CDF.L.get()
#
# vname = '%s' % CDF.varname.get()
# toconsola('READ_FIELD Reading Var, Level and Time:'+str(CDF.varid)+
# ", "+str(CDF.K.get())+
# ", "+str(CDF.L.get()),wid=self.cons)
# #print('READ_FIELD Reading Var, Level and Time:'+str(CDF.varid)+
# # ", "+str(CDF.K.get())+
# # ", "+str(CDF.L.get()))
#
# ndim = CDF.icdf.ndims[CDF.varid]
# if ndim == 2:
# CDF.FLD.data = CDF.ncid.variables[vname][:,:]
# elif ndim == 3:
# if CDF.icdf.ppl[CDF.varid] > -1:
# CDF.FLD.data = CDF.ncid.variables[vname][L,:,:].squeeze()
# elif CDF.icdf.ppk[CDF.varid] > -1:
# CDF.FLD.data = CDF.ncid.variables[vname][K,:,:].squeeze()
# else:
# messagebox.showinfo(message='Invalid variable dimensions')
# CDF.FLD.data = None
# elif ndim == 4:
# CDF.FLD.data = CDF.ncid.variables[vname][L,K,:,:].squeeze()
#
# CDF.FLD.missing_value = None
#
# if CDF.FLD.data is not None:
# CDF.FLD.varname = vname
# try:
# CDF.FLD.units = CDF.ncid.variables[vname].getncattr('units')
# except:
# CDF.FLD.units = ''
#
# try:
# CDF.FLD.missing_value = CDF.ncid.variables[vname].getncattr('_FillValue')
# except:
# try:
# CDF.FLD.missing_value = CDF.ncid.variables[vname].getncattr('missing_value')
# except:
# CDF.FLD.missing_value = None
#
# if CDF.FLD.missing_value is not None:
# CDF.FLD.mask = ma.getmask(CDF.FLD.data)
# CDF.FLD.data[CDF.FLD.data==CDF.FLD.missing_value] = np.nan
#
# # Contour intervals
# CDF.FLD.minval = float(CDF.FLD.data.min())
# CDF.FLD.maxval = float(CDF.FLD.data.max())
# toconsola('Min val = '+str(CDF.FLD.minval),wid=self.cons)
# toconsola('Max val = '+str(CDF.FLD.maxval),wid=self.cons)
# #print('Min val = '+str(CDF.FIELD.minval))
# #print('Max val = '+str(CDF.FIELD.maxval))
#
# print('Here: ', update_lims)
# print(CDF.FLD.minval)
# print(CDF.FLD.maxval)
#
# if update_lims:
# try:
# CDF.PLOT.CONTOUR_MIN.set(myround(CDF.FLD.minval))
# except:
# CDF.PLOT.CONTOUR_MIN.set(CDF.FLD.minval)
# try:
# CDF.PLOT.CONTOUR_MAX.set(myround(CDF.FLD.maxval))
# except:
# CDF.PLOT.CONTOUR_MAX.set(CDF.FLD.maxval)
#
# dd = CDF.PLOT.CONTOUR_MAX.get() \
# - CDF.PLOT.CONTOUR_MIN.get()
# try:
# CDF.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
# except:
# CDF.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
#
# ===================
def get_contour(self):
# ==================
'''Widget to read Netcdf files'''
self.CSOURCE = tk.StringVar()
self.CSOURCE.set('Local Dataset')
self.DATETIME = ''
def _close():
# ===========
self.Window_ncdf.destroy()
self.Window_ncdf = None
return
def _done():
# ===========
ii = self.CDF_INDX.get()
if self.CDF[ii].SOURCE == 'FILE':
self.CDF[ii].read(wid=self.cons)
# # The date of the data
# try:
# nodate = empty(self.DATE[0])
# except:
# nodate = False
# try:
# nodatetime = empty(self.DATETIME)
# except:
# nodatetime = False
#
# if not nodatetime:
# if nodate:
# self.DATE[0] = self.DATETIME
# else:
# if len(self.DATE[0]) == 1:
# a = self.DATE[0].__str__()
# b = self.CDF[ii].DATE[0].__str__()
# if a == b:
# self.DATE[0] = self.DATETIME
# self.CDF[ii].DATE[0] = self.DATETIME
_close()
self.make_plot()
if self.Window_contourconfig is not None:
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
self.contour_config()
def _clear():
# ===========
if self.ncdf == 0:
return
# When erasing, we must erase two kinds of informations, the
# information in the LAYER structure and the VECTOR information
# Attention, if erasing the SEQUENCE leader, we need to update the
# DATE and TIMES of the SEQUENCE
ii = self.CDF_INDX.get()
self.LAYERS.erase('FLD',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing data field '+str(ii),wid=self.cons)
#print('Erasing record '+str(ii))
del self.CDF[ii]
self.ncdf -= 1
ii = self.ncdf-1 if ii >= self.ncdf else ii
self.CDF_INDX.set(ii)
_refill(ii)
if self.LAYERS.update:
toconsola('Updating TIME and DATE values of SEQUENCE',wid=self.cons)
LEADER_TYPE = self.LAYERS.TYPE[self.LAYERS.leader]
jj = self.LAYERS.TYPE_INDEX[self.LAYERS.leader]
if LEADER_TYPE == 'VEC':
self.DATE = self.VEC[jj].DATE.copy()
self.TIME = self.VEC[jj].TIME.copy()
elif LEADER_TYPE == 'FLD':
self.DATE = self.FLD[jj].DATE.copy()
self.TIME = self.FLD[jj].TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.make_plot()
def _reget():
# ===========
self.CDF_INDX.set(_wsel.get())
ii = self.CDF_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.CDF_LIST = list(range(self.ncdf))
_wsel.configure(state='!disabled')
_wsel['values'] = self.CDF_LIST
_went['textvariable'] = self.CDF[ii].FILENAME
_wvar.configure(state='!disabled')
_wvar['textvariable'] = self.CDF[ii].varname
_wvar['values'] = self.CDF[ii].FLD.icdf.VAR_MENU
_kbox.configure(state='!disabled')
_kbox['textvariable'] = self.CDF[ii].K
_kbox['values'] = self.CDF[ii].K_LIST
_lbox.configure(state='!disabled')
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_aent.configure(state='!disabled')
_aent['textvariable'] = self.CDF[ii].ALIAS
if self.CDF[ii].FLD.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
if self.CDF[ii].FLD.icdf.idl < 0:
_lbox.configure(state='disabled')
_dbox['text']='--'
else:
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
_show['variable'] = self.CDF[ii].show
else:
self.CDF = []
self.CDF_LIST = [None]
self.CDF_INDX = tk.IntVar()
self.CDF_INDX.set(0)
_wsel.configure(state='disabled')
_wvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_aent.configure(state='disabled')
_wsel['values'] = self.CDF_LIST
_went['textvariable'] = ''
_wvar['textvariable'] = ''
_wvar['values'] = ['']
_wvar.configure(state='disabled')
_kbox['textvariable'] = ''
_kbox['values'] = ['']
_zbox['text'] = '--'
_lbox['text'] = ''
_lbox['values'] = ['']
_lbox['textvariable'] = ''
_lbox['values'] = ['']
_dbox['text'] = ['--']
_wsav.configure(state='disabled')
def _add(SOURCE):
# ===============
global Window_select
CDF = CONTOUR()
def _cancel():
# ============
global Window_select
Window_select.destroy()
Window_select = None
def _done():
# ==========
global Window_select
global _wvar
if empty(CDF.varname.get()):
messagebox.showinfo(parent=Window_select,message='Select variable')
return
toconsola('2D-grid axes : '+'%s'%CDF.FLD.icdf.grid2d,wid=self.cons)
# Seems the suitable place where to put this:
CDF.FLD.varname = CDF.varname.get()
CDF.FLD.varid = CDF.FLD.icdf.vname.index(CDF.FLD.varname)
CDF.FLD.ndims = CDF.FLD.icdf.ndims[CDF.FLD.varid]
CDF.FLD.get_info(wid=self.cons)
CDF.FLD.get_grid()
#self.read_lonlat(CDF,CDF.FLD.icdf.xname,CDF.FLD.icdf.yname)
CDF.K_LIST = list(range(CDF.FLD.icdf.nz))
CDF.L_LIST = list(range(CDF.FLD.icdf.nt))
CDF.Z_LIST = CDF.FLD.get_zlist()
CDF.T_LIST, CDF.DATE, CDF.TIME = CDF.FLD.get_tlist()
#self.DepthandDate(CDF)
CDF.show.set(True)
#if empty(CDF.DATE[0].__str__()):
# _dsel.configure(state='enabled')
# Adding the CONTOUR to the Drawing class
#
nt = CDF.FLD.icdf.nt
self.LAYERS.add(TYPE='FLD',Filename=CDF.FILENAME.get(),N=nt,wid=self.cons)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
n = self.LAYERS.n
#self.nfiles += 1
#self.FILENAMES.append(CDF.FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False)) #By default, not attached
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(CDF.FLD.icdf.nt)
ii = self.CDF_INDX.get()
#if not empty(self.DATETIME):
# self.CDF[ii].DATE.append(self.DATETIME)
if self.first:
if self.drawmap is None:
self.PLOT.WEST.set(self.CDF[ii].FLD.xmin)
self.PLOT.EAST.set(self.CDF[ii].FLD.xmax)
self.PLOT.SOUTH.set(self.CDF[ii].FLD.ymin)
self.PLOT.NORTH.set(self.CDF[ii].FLD.ymax)
self.plot_initialize()
#try:
# self.PLOT.XLABEL.set(self.CDF[ii].FLD.icdf.xname)
#except:
# self.PLOT.XLABEL.set('Longitude')
#try:
# self.PLOT.YLABEL.set(self.CDF[ii].FLD.icdf.yname)
#except:
# self.PLOT.YLABEL.set('Latitude')
self.DATE = self.CDF[ii].DATE.copy()
self.TIME = self.CDF[ii].TIME.copy()
self.PLOT.TLABEL.set(self.CDF[ii].DATE[self.CDF[ii].L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.first = False
# Is this the field member of the SEQUENCE?
# CAROUSEL MANAGEMENT - CONTOUR
if nt > 1:
if self.NL == 0:
toconsola('Contour initiates Time axis',wid=self.cons)
self.CDF[ii].LINK.set(True)
self.TIME = self.CDF[ii].TIME.copy()
self.DATE = self.CDF[ii].DATE.copy()
self.NL = nt
self.L.set(self.CDF[ii].L.get())
self.L_LIST = list(range(nt))
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
elif self.NL == nt:
toconsola('Linking Contour to Time axis',wid=self.cons)
self.CDF[ii].LINK.set(True)
self.CDF[ii].L.set(self.L.get()) #Synchronize records
# if self.LAYERS.nsequence == 0:
# toconsola('Contour initiates SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence = 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(True)
# self.LAYERS.leader = n-1
# self.LAYERS.seqlen = nt
## self.SEQUENCES[-1].set(True)
## self.SEQLEADER[-1].set(True) # Is the first field
## self.SEQLEADER_INDX = self.nfiles
# self.DATE = self.CDF[ii].DATE.copy()
# self.TIME = self.CDF[ii].TIME.copy()
# self.L.set(self.CDF[ii].L.get())
# self.L_LIST = list(range(self.CDF[ii].FLD.icdf.nt))
# self.NL = len(self.L_LIST)
# self.lbox.configure(state='normal')
# self.lbox['values'] = self.L_LIST
# if self.L.get() < self.NL-1:
# self.bnext.configure(state='normal')
# if self.L.get() > 0:
# self.bprev.configure(state='normal')
# else:
# if nt == self.LAYERS.seqlen:
# toconsola('Adding Contour to SEQUENCE list',wid=self.cons)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
# self.CDF[ii].L.set(self.L.get()) #Synchronize records
_refill(ii)
Window_select.destroy()
Window_select = None
self.DATETIME = ''
self.LAYERS.print()
ISOURCE = self.CONTOUR_OPTIONS.index(SOURCE)
if ISOURCE == 0:
filename = self.get_opendap_filename()
elif ISOURCE == 1:
filename = self.get_copernicus_filename()
elif ISOURCE == 2:
nn = filedialog.askopenfilename(parent=self.Window_ncdf, \
filetypes=[('Netcdf','*.nc'), \
('CDF','*.cdf'), \
('ALL','*')])
if len(nn) == 0:
return
else:
filename = '%s' % nn
elif ISOURCE == 3:
#aa = get_remote()
#filename2 = aa.filename()
#filename = filename2.decode('utf-8')
filename = 'https://cosmo.icm.csic.es/MEDSEA_100.nc'
print('filename: ', filename)
else:
if self.nvec <= 0:
messagebox.showinfo(message='No Trajectory file opened yet')
return
else:
jj = self.VEC_INDX.get()
filename = self.VEC[jj].UFILENAME.get()
if empty(filename):
return
# Initialize contour class:
CDF.FILENAME.set(filename)
CDF.FLD.open(filename,wid=self.cons)
# Not empty filename:
#CDF = cdf_parameters()
#CDF.FIELD = fld_parameters()
#CDF.FILENAME.set(filename)
#CDF.ncid = Dataset(filename)
#CDF.icdf = tools.geocdf(filename, wid=self.cons)
##self.read_lonlat(CDF,CDF.icdf.xname,CDF.icdf.yname)
##self.DepthandDate(CDF)
##CDF.FIELD.show.set(True)
##if empty(CDF.DATE[0].__str__()):
## _dsel.configure(state='enabled')
if Window_select is None:
Window_select = tk.Toplevel(self.master)
Window_select.title('SELECT VARIABLE')
Window_select.protocol('WM_DELETE_WINDOW',Window_select.destroy)
else:
Window_select.lift()
return
#axesid = tools.WinGeoaxes(CDF.icdf,CDF.ncid,Window_select)
axesid = tools.WinGeoaxes(CDF.FLD.icdf,CDF.FLD.nc,Window_select)
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
F0 = ttk.Frame(Window_select,padding=5,borderwidth=5)
ttk.Label(F0,text='Select variable',borderwidth=3,font=font_bold) \
.grid(row=0,column=0)
dvar = ttk.Combobox(F0,textvariable=CDF.varname, \
values=CDF.FLD.icdf.VAR_MENU, \
width=20)
dvar.grid(row=0,column=1,columnspan=2)
dvar.bind('<<ComboboxSelected>>',lambda e: axesid.selected_var(CDF.FLD.icdf,CDF.FLD.nc,dvar))
F0.grid()
#CDF.icdf.nx = -9999
F1 = ttk.Frame(Window_select,padding=5)
cancel = ttk.Button(F1,text='Cancel',command=_cancel)
cancel.grid(row=0,column=3,sticky='e',padx=10)
cancel.bind("<Return>",lambda e:_cancel())
done = ttk.Button(F1,text='Done',command=_done)
done.grid(row=0,column=4,sticky='e',padx=10)
done.bind("<Return>",lambda e:_done())
F1.grid(sticky='we')
Window_select.wait_window(Window_select)
def _lselection():
# ================
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
def _kselection():
# ================
_zbox['text'] = self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
def _vselection():
# ================
try:
self.CDF[ii].FLD.varname = self.CDF[ii].varname.get()
self.CDF[ii].FLD.varid = self.CDF[ii].FLD.icdf.vname.index( \
self.CDF[ii].varname.get())
except:
self.CDF[ii].FLD.varid = -1
def _save():
# ================
ii = self.CDF_INDX.get()
toconsola('Saving '+str(ii),wid=self.cons)
self.CDF[ii].save()
def _date():
# ==========
''' Manually select a date'''
aa = get_Date()
self.DATETIME = aa.date
_dbox['text'] = self.DATETIME
# Main window:
# ============
if self.Window_ncdf is None:
self.Window_ncdf = tk.Toplevel(self.master)
self.Window_ncdf.title("Contour files")
self.Window_ncdf.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_ncdf.lift()
if self.ncdf > 0:
ii = self.CDF_INDX.get()
else:
ii = -1
global Window_select
global _wvar
Window_select = None
F0 = ttk.Frame(self.Window_ncdf,padding=5)
# Add
ttk.Button(F0,text='Import', \
command=lambda:_add(self.CSOURCE.get())).grid(row=1, \
column=0,padx=3)
_source = ttk.Combobox(F0,textvariable=self.CSOURCE, \
values=self.CONTOUR_OPTIONS)
_source.grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Netcdf file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.CDF_INDX, \
values=self.CDF_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# Variable:
ttk.Label(F0,text='Variable').grid(row=1,column=1,padx=3,pady=3)
_wvar = ttk.Combobox(F0,width=15)
_wvar.grid(row=1,column=2,columnspan=2,sticky='w')
_wvar.bind('<<ComboboxSelected>>',lambda e: _vselection())
# Depth:
ttk.Label(F0,text='Depth').grid(row=2,column=1,padx=3,pady=3)
_kbox = ttk.Combobox(F0,values=['0'],width=5)
_kbox.grid(row=2,column=2)
_kbox.bind('<<ComboboxSelected>>',lambda e: _kselection())
_zbox = ttk.Label(F0,width=20)
_zbox.grid(row=2,column=3,columnspan=2,sticky='w')
# Time:
ttk.Label(F0,text='Time').grid(row=3,column=1,padx=3,pady=3)
_lbox = ttk.Combobox(F0,width=5)
_lbox.grid(row=3,column=2)
_lbox.bind('<<ComboboxSelected>>',lambda e: _lselection())
_dbox = ttk.Label(F0,width=20)
_dbox.grid(row=3,column=3,columnspan=2,sticky='w')
_dsel = ttk.Button(F0,text='Select date',command=_date)
_dsel.grid(row=3,column=5,sticky='w')
# Alias
ttk.Label(F0,text='Alias').grid(row=4,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=4,column=2,columnspan=2,sticky='w')
if ii == -1:
_wsel.configure(state='disabled')
_wvar.configure(state='disabled')
_kbox.configure(state='disabled')
_lbox.configure(state='disabled')
_dsel.configure(state='disabled')
_aent.configure(state='disabled')
else:
_went['textvariable'] = self.CDF[ii].FILENAME
_wvar['textvariable'] = self.CDF[ii].varname
_wvar['values'] = self.CDF[ii].FLD.icdf.VAR_MENU
_kbox['textvariable'] = self.CDF[ii].K
_kbox['values'] = self.CDF[ii].K_LIST
_aent['textvariable'] = self.CDF[ii].ALIAS
if self.CDF[ii].FLD.icdf.idk < 0:
_kbox.configure(state='disabled')
_zbox['text']='--'
else:
_zbox['text']=self.CDF[ii].Z_LIST[self.CDF[ii].K.get()]
if self.CDF[ii].FLD.icdf.idl < 0:
_lbox.configure(state='disabled')
_dsel.configure(state='enabled')
try:
nodate = empty(sefl.CDF[ii].DATE[0])
except:
nodate = False
if nodate:
_dbox['text']='--'
else:
_dbox['text']=self.CDF[ii].DATE[0]
else:
_lbox['textvariable'] = self.CDF[ii].L
_lbox['values'] = self.CDF[ii].L_LIST
_dbox['text'] = self.CDF[ii].DATE[self.CDF[ii].L.get()]
_dsel.configure(state='disabled')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_ncdf,padding=5)
_wsav = ttk.Button(F1,text='Save data',command=_save)
_wsav.grid(row=1,column=0,padx=3,sticky='w')
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_show.configure(state='disabled')
_wsav.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.CDF[ii].show
_show.configure(command=self.make_plot)
_wsav.configure(state='normal')
_show.grid(row=1,column=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_done).grid(row=1,column=8,padx=3)
ttk.Label(F1,text=' ',width=8).grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(F1,text=' ',width=8).grid(row=1,column=2,padx=3,sticky='w')
F1.grid(row=1,column=0)
#====================
def get_saidin(self):
#====================
'''Function to retrieve the SAIDIN data'''
def _close():
self.Window_saidin.destroy()
self.Window_saidin = None
def _selector():
name = saidin.saidin_selector(parent=self.master, wid=self.cons)
if not empty(name):
self.SAIDIN.FILENAME.set(name)
def _done():
if (empty(self.SAIDIN.FILENAME.get())):
messagebox.showinfo(message='No image selected')
return
self.SAIDIN.FLD.nc = Dataset('[FillMismatch]'+self.SAIDIN.FILENAME.get(),'r')
self.SAIDIN.FLD.icdf = tools.geocdf(self.SAIDIN.FILENAME.get(), wid=self.cons)
self.SAIDIN.varname.set('mcsst')
self.SAIDIN.FLD.varname = 'mcsst'
self.SAIDIN.FLD.x = self.SAIDIN.FLD.nc.variables['lon'][:]
self.SAIDIN.FLD.y = self.SAIDIN.FLD.nc.variables['lat'][:]
self.SAIDIN.FLD.data = self.SAIDIN.FLD.nc.variables[self.SAIDIN.FLD.varname][0,:,:].squeeze()
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy = np.meshgrid(self.SAIDIN.FLD.x,self.SAIDIN.FLD.y)
self.DepthandDate(self.SAIDIN)
self.LAYERS.add(TYPE='SAIDIN',Filename=self.SAIDIN.FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.SAIDIN.FILENAME.get())
#self.FILETYPES.append('SAIDIN')
#self.FILEORDER.append(0)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
if self.first:
if self.drawmap is None:
self.PLOT.WEST.set(np.min(self.SAIDIN.FLD.x))
self.PLOT.EAST.set(np.max(self.SAIDIN.FLD.x))
self.PLOT.SOUTH.set(np.min(self.SAIDIN.FLD.y))
self.PLOT.NORTH.set(np.max(self.SAIDIN.FLD.y))
self.plot_initialize()
self.L.set(self.SAIDIN.L.get())
self.DATE = self.SAIDIN.DATE.copy()
self.TIME = self.SAIDIN.TIME.copy()
#self.PLOT.XLABEL.set('Longitude')
#self.PLOT.YLABEL.set('Latitude')
self.first = False
self.SAIDIN.FLD.get_info(wid=self.cons)
#try:
# self.SAIDIN.FLD.units = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('units')
#except:
# self.SAIDIN.FLD.units = ''
#
# try:
# self.SAIDIN.FLD.missing_value = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('_FillValue')
# except:
# try:
# self.SAIDIN.FLD.missing_value = self.SAIDIN.ncid.variables[self.SAIDIN.FIELD.varname] \
# .getncattr('missing_value')
# except:
# self.SAIDIN.FIELD.missing_value = None
toconsola(str(self.SAIDIN.FLD.minval),wid=self.cons)
toconsola(str(self.SAIDIN.FLD.maxval),wid=self.cons)
if self.SAIDIN.landmask.get():
toconsola('Applying land/sea mask ...',wid=self.cons)
_a = self.SAIDIN.FLD.data.copy()
tmp = self.SAIDIN.FLD.nc.variables['lsmask'][0,:,:].squeeze()
msk = ma.masked_where(tmp==1,tmp)
self.SAIDIN.FLD.data = ma.array(_a,mask=msk).copy()
self.SAIDIN.FLD.mask = ma.getmask(self.SAIDIN.FLD.data)
# Contour intervals
self.SAIDIN.FLD.minval = self.SAIDIN.FLD.data.min()
self.SAIDIN.FLD.maxval = self.SAIDIN.FLD.data.max()
try:
self.SAIDIN.PLOT.CONTOUR_MIN.set(myround(self.SAIDIN.FLD.minval))
except:
self.SAIDIN.PLOT.CONTOUR_MIN.set(self.SAIDIN.FLD.minval)
try:
self.SAIDIN.PLOT.CONTOUR_MAX.set(myround(self.SAIDIN.FLD.maxval))
except:
self.SAIDIN.PLOT.CONTOUR_MAX.set(self.SAIDIN.FLD.maxval)
dd = self.SAIDIN.PLOT.CONTOUR_MAX.get() - self.SAIDIN.PLOT.CONTOUR_MIN.get()
try:
self.SAIDIN.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd))
except:
self.SAIDIN.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
#self.SAIDIN.FIELD.F = interpolate.interp2d(self.SAIDIN.lon, \
# self.SAIDIN.lat, \
# self.SAIDIN.FIELD.data)
_close()
self.make_plot()
def _clear():
self.SAIDIN.FILENAME.set('')
self.SAIDIN.FLD.x = None
self.SAIDIN.FLD.y = None
self.SAIDIN.FLD.xx = None
self.SAIDIN.FLD.yy = None
self.SAIDIN.FLD.data = None
_close()
if self.Window_saidin is None:
self.Window_saidin = tk.Toplevel(self.master)
self.Window_saidin.title("Satellite Sea surface temperature (SAIDIN)")
self.Window_saidin.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_saidin.lift()
F0 = ttk.Frame(self.Window_saidin,padding=5)
ttk.Entry(F0,textvariable=self.SAIDIN.FILENAME,justify='left', \
width=80).grid(row=0,column=0,columnspan=8,padx=3)
ttk.Button(F0,text='Select',command=_selector).grid(row=0,column=8,padx=3)
ttk.Checkbutton(F0,text='Mask land data',variable=self.SAIDIN.landmask).grid(row=1,column=5,padx=3)
ttk.Button(F0,text='Cancel',command=_clear).grid(row=1,column=6,padx=3)
ttk.Button(F0,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F0,text='Plot',command=_done).grid(row=1,column=8,padx=3)
F0.grid()
# ===================
def get_marker(self):
# ===================
'''Widget to read Markers'''
def _close():
# ===========
self.Window_marker.destroy()
self.Window_marker = None
def _done():
# ===========
ii = self.MARKER_INDX.get()
if ii >= 0:
self.MARKER[ii].LABEL.set(_wlab.get())
self.make_plot()
self.Window_marker.destroy()
self.Window_marker = None
if self.Window_dotconfig is not None:
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
self.marker_config()
def _clear():
# ===========
'''Note that markers have no time axis'''
if self.nmarker == 0:
return
ii = self.MARKER_INDX.get()
self.LAYERS.erase('MARKER',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing marker '+str(ii),wid=self.cons)
del self.MARKER[ii]
self.nmarker -= 1
ii = self.nmarker-1 if ii >= self.nmarker else ii
toconsola('New marker = '+str(self.nmarker),wid=self.cons)
self.MARKER_INDX.set(ii)
_refill(ii)
self.make_plot()
_close()
def _reget():
# ===========
self.MARKER_INDX.set(_wsel.get())
ii = self.MARKER_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.MARKER_LIST = list(range(self.nmarker))
_wsel['values'] = self.MARKER_LIST
_went['textvariable'] = self.MARKER[ii].FILENAME
_wstat['text'] = ' N = '+str(self.MARKER[ii].n)
_wsel.configure(state='!disabled')
_wlab['state'] = '!disabled'
_wlab['textvariable'] = self.MARKER[ii].LABEL
_show['variable'] = self.MARKER[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.MARKER[ii].ALIAS
else:
self.MARKER = []
self.MARKER_LIST = ['0']
self.MARKER_INDX = tk.IntVar()
self.MARKER_INDX.set(0)
_wsel['values'] = self.MARKER_LIST
_went['textvariable'] = ''
_wstat['text'] = ''
_wsel.configure(state='disabled')
_wlab['textvariable'] = ''
_wlab.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
def _add():
# ========
nn = filedialog.askopenfilename(filetypes=[('CSV','*.csv'),
('TXT','*.txt'),
('ALL','*')],
initialdir='./',
parent=self.Window_marker)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
MARKER = geomarker.parameters()
toconsola(MARKER.MESSAGE,wid=self.cons)
MARKER.Read(filename)
if MARKER.n == 0:
return
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=MARKER.FILENAME.get(),N=len(MARKER.lon),wid=self.cons)
self.LAYERS.print()
ii = self.MARKER_INDX.get()
_refill(ii)
#self.make_plot()
# Main window:
# ============
if self.Window_marker is not None:
self.Window_marker.lift()
return
self.Window_marker = tk.Toplevel(self.master)
self.Window_marker.title('Geomarkers')
self.Window_marker.protocol('WM_DELETE_WINDOW',_close)
if self.nmarker > 0:
ii = self.MARKER_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_marker,padding=5)
# Add
ttk.Button(F0,text='Add',command=_add).grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Marker file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.MARKER_INDX, \
values=self.MARKER_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' N = '+str(self.MARKER[ii].n),width=50,justify='left')
_went['textvariable'] = self.MARKER[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
ttk.Label(F0,text='Marker Label').grid(row=2,column=1,padx=3)
_wlab = ttk.Entry(F0,justify='left',width=18)
_wlab.grid(row=2,column=2,columnspan=2,padx=3,sticky='w')
if ii == -1:
_wlab['state'] = 'disabled'
else:
_wlab['textvariable'] = self.MARKER[ii].LABEL
#Alias
ttk.Label(F0,text='Alias').grid(row=3,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=3,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_marker,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.MARKER[ii].show
_aent['textvariable'] = self.MARKER[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
# ======================
def get_shapefile(self):
# ==========================
SHAPE = shape.parameters()
toconsola(SHAPE.MESSAGE,wid=self.cons)
def _close():
self.Window_shapefile.destroy()
self.Window_shapefile = None
def _done():
ii = self.SHAPE_INDX.get()
if ii >= 0:
if self.SHAPE[ii].CROP.get():
toconsola('Cropping shapefile',wid=self.cons)
xmin = self.PLOT.WEST.get() + self.PLOT.CROP_PAD.get()
xmax = self.PLOT.EAST.get() - self.PLOT.CROP_PAD.get()
ymin = self.PLOT.SOUTH.get() + self.PLOT.CROP_PAD.get()
ymax = self.PLOT.NORTH.get() - self.PLOT.CROP_PAD.get()
bbox = [xmin, xmax, ymin, ymax]
self.SHAPE[ii].Crop(bbox)
self.SHAPE[ii].LABEL.set(_wlab.get())
self.make_plot()
self.Window_shapefile.destroy()
self.Window_shapefile = None
if self.Window_geoconfig is not None:
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
self.Window_geoconfig()
def _clear():
# ===========
'''Note that shape geometries have no time axis in principle'''
if self.nshape == 0:
return
ii = self.SHAPE_INDX.get()
self.LAYERS.erase('SHAPE',ii,wid=self.cons)
self.LAYERS.print()
toconsola('Erasing marker '+str(ii),wid=self.cons)
del self.SHAPE[ii]
self.nmarker -= 1
ii = self.nmarker-1 if ii >= self.nmarker else ii
toconsola('New marker = '+str(self.nmarker),wid=self.cons)
self.SHAPE.set(ii)
_refill(ii)
self.make_plot()
_close()
def _reget():
# ===========
self.SHAPE_INDX.set(_wsel.get())
ii = self.SHAPE_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
if ii >= 0:
self.SHAPE_LIST = list(range(self.nshape))
_wsel['values'] = self.SHAPE_LIST
_went['textvariable'] = self.SHAPE[ii].FILENAME
_wstat['text'] = ' N = '+str(self.SHAPE[ii].n)+' geometries'
_wsel.configure(state='!disabled')
_wlab['state'] = '!disabled'
_wlab['textvariable'] = self.SHAPE[ii].LABEL
_show['variable'] = self.SHAPE[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.SHAPE[ii].ALIAS
_wcrp['variable'] = self.SHAPE[ii].CROP
_wcrp.configure(state='normal')
_wpad.configure(state='normal')
else:
self.SHAPE = []
self.SHAPE_LIST = ['0']
self.SHAPE_INDX = tk.IntVar()
self.SHAPE_INDX.set(0)
_wsel['values'] = self.SHAPE_LIST
_went['textvariable'] = ''
_wstat['text'] = ''
_wsel.configure(state='disabled')
_wlab['textvariable'] = ''
_wlab.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
_wcrp.configure(state='disabled')
_wpad.configure(state='disabled')
def _add():
# ========
nn = filedialog.askopenfilename(filetypes=[('shp','*.shp')],
initialdir='./',
parent=self.Window_shapefile)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
SHAPE.Read(filename)
if SHAPE.n == 0:
return
self.nshape += 1
self.SHAPE.append(SHAPE)
self.SHAPE_INDX.set(self.nshape-1)
self.SHAPE_LIST = list(range(self.nshape))
self.LAYERS.add(TYPE='SHAPE',Filename=SHAPE.FILENAME.get(),N=SHAPE.n,wid=self.cons)
self.LAYERS.print()
ii = self.SHAPE_INDX.get()
_refill(ii)
# Main window:
# ============
if self.Window_shapefile is not None:
self.Window_shapefile.lift()
return
self.Window_shapefile = tk.Toplevel(self.master)
self.Window_shapefile.title('Shape file')
self.Window_shapefile.protocol('WM_DELETE_WINDOW',_close)
if self.nshape > 0:
ii = self.SHAPE_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_shapefile,padding=5)
# Add
ttk.Button(F0,text='Add',command=_add).grid(row=0,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Shape file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.SHAPE_INDX, \
values=self.SHAPE_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' N = '+str(self.SHAPE[ii].n),width=50,justify='left')
_went['textvariable'] = self.SHAPE[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
ttk.Label(F0,text='Shape Label').grid(row=2,column=1,padx=3)
_wlab = ttk.Entry(F0,justify='left',width=18)
_wlab.grid(row=2,column=2,columnspan=2,padx=3,sticky='w')
if ii == -1:
_wlab['state'] = 'disabled'
else:
_wlab['textvariable'] = self.SHAPE[ii].LABEL
ttk.Label(F0,text='Alias').grid(row=3,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=18,justify='left')
_aent.grid(row=3,column=2,columnspan=2,padx=3,sticky='w')
ttk.Label(F0,text='Crop').grid(row=4,column=1,padx=3)
_wpad = ttk.Entry(F0,textvariable=self.PLOT.CROP_PAD,width=9,justify='left')
_wpad.grid(row=4,column=2,sticky='w',padx=3)
_wcrp = ttk.Checkbutton(F0)
_wcrp.grid(row=4,column=3,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_shapefile,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
_wcrp.configure(state='disabled')
_wpad.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.SHAPE[ii].show
_aent['textvariable'] = self.SHAPE[ii].ALIAS
_wcrp['variable'] = self.SHAPE[ii].CROP
_wpad.configure(state='normal')
_wcrp.configure(state='normal')
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=7,padx=3)
F1.grid(row=1,column=0)
# ================
def get_wms(self):
# ==========================
pass
# ======================
def marker_config(self):
# =======================
'''Widget to configure Markers'''
#self.dot_config(self.MARKER[self.MARKER_INDX.get()])
global ishow
if self.nmarker == 0:
messagebox.showinfo(message='No Marker file opened yet')
return
def _cancel():
# ============
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_dotconfig.destroy()
self.Window_dotconfig = None
def _selected():
# ==============
global ishow
itab = self.Mnb.index('current')
ishow.destroy()
# The usual configuration:
ii = self.MARKER_INDX.get()
_went['textvariable'] = self.MARKER[ii].FILENAME
ishow = ttk.Frame(self.Window_dotconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Marker Aspect')
self.Mnb.add(page2,text='Label Text')
self.Mnb.add(page3,text='Marker coordinates')
self.Mnb.grid()
self.Mnb.select(itab)
# Page0
ttk.Label(page0,
text='Show as text',
padding=3).grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,
variable=self.MARKER[ii].textmode).grid(row=0,
column=1,
padx=3,
sticky='w')
ttk.Label(page0,
text='Generic label',
padding=3).grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,
textvariable=self.MARKER[ii].LABEL).grid(row=1,
column=1,
padx=3,
sticky='w')
# Page 1
dotplot.Configuration(page1,self.MARKER[ii].PLOT)
# Page 2
geomarker.TextConfigure(page2,self.MARKER[ii].PLOT)
# Page 3
geomarker.ShowData(page3,self.MARKER[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
ishow.grid()
def _loadconf():
# =============
'''Load dot configuration'''
ii = self.MARKER_INDX.get()
toconsola('Restoring dot configuration',wid=self.cons)
try:
self.MARKER[ii].PLOT.load(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Load dot configuration'''
ii = self.MARKER_INDX.get()
toconsola('Saving dot configuration',wid=self.cons)
try:
self.MARKER[ii].PLOT.save(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load dot configuration from a file'''
ii = self.MARKER_INDX.get()
nn = filedialog.askopenfilename(title='Load dot configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.MARKER[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring dot configuration from '+
self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
try:
self.MARKER[ii].PLOT.load(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveasconf():
# ================
'''Save dot configuration to a file'''
ii = self.MARKER_INDX.get()
nn = filedialog.asksaveasfilename(title='Save dot configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.MARKER[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving dot configuration to '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
try:
self.MARKER[ii].PLOT.save(self.MARKER[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.MARKER[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_dotconfig is not None:
self.Window_dotconfig.lift()
return
self.Window_dotconfig = tk.Toplevel(self.master)
self.Window_dotconfig.title('Marker plot configuration')
self.Window_dotconfig.resizable(width=True,height=True)
self.Window_dotconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_dotconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_dotconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_dotconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_dotconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.MARKER_INDX,
values=self.MARKER_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.MARKER_INDX.get()
_went['textvariable'] = self.MARKER[ii].FILENAME
ishow = ttk.Frame(self.Window_dotconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Marker Aspect')
self.Mnb.add(page2,text='Label Text')
self.Mnb.add(page3,text='Marker coordinates')
self.Mnb.grid()
# Page0
ttk.Label(page0,
text='Show as text',
padding=3).grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,
variable=self.MARKER[ii].textmode).grid(row=0,
column=1,
padx=3,
sticky='w')
ttk.Label(page0,
text='Generic label',
padding=3).grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,
textvariable=self.MARKER[ii].LABEL).grid(row=1,
column=1,
padx=3,
sticky='w')
# Page 1
dotplot.Configuration(page1,self.MARKER[ii].PLOT)
# Page 2
geomarker.TextConfigure(page2,self.MARKER[ii].PLOT)
# Page 3
geomarker.ShowData(page3,self.MARKER[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew')
ishow.grid()
# ======================
def shape_config(self):
# =======================
'''Widget to configure Markers'''
#self.dot_config(self.MARKER[self.MARKER_INDX.get()])
global ishow
if self.nshape == 0:
messagebox.showinfo(message='No Shape file opened yet')
return
def _cancel():
# ============
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_geoconfig.destroy()
self.Window_geoconfig = None
def _selected():
# ==============
global ishow
# ?????
itab = self.Mnb.index('current')
ishow.destroy()
# The usual configuration:
ii = self.SHAPE_INDX.get()
_went['textvariable'] = self.SHAPE[ii].FILENAME
ishow = ttk.Frame(self.Window_geoconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Geometry Aspect')
self.Mnb.add(page2,text='Text Aspect')
self.Mnb.add(page3,text='Hide Features')
self.Mnb.grid()
self.Mnb.select(itab)
# Page0
ttk.Label(page0, text='Show as text',padding=3). \
grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,variable=self.SHAPE[ii].textmode). \
grid(row=0,column=1,padx=3,sticky='w')
ttk.Label(page0, text='Generic label',padding=3). \
grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,textvariable=self.SHAPE[ii].LABEL). \
grid(row=1,column=1,padx=3,sticky='w')
ttk.Label(page0, text='Label key',padding=3). \
grid(row=2,column=0,padx=3,sticky='e')
_ksel = ttk.Combobox(page0,textvariable=self.SHAPE[ii].LABEL_KEY,
values=self.SHAPE[ii].KEY_LIST,width=12)
_ksel.grid(row=2,column=1,sticky='w',padx=3)
_ksel.bind('<<ComboboxSelected>>',lambda e:self.SHAPE[ii].get_name())
# Page 1
geoplot.Configuration(page1,self.SHAPE[ii].PLOT)
# Page 2
shape.TextConfigure(page2,self.SHAPE[ii].PLOT)
# Page 3
shape.HideData(page3,self.SHAPE[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
ishow.grid()
def _loadconf():
# =============
'''Load dot configuration'''
ii = self.SHAPE_INDX.get()
toconsola('Restoring dot configuration',wid=self.cons)
try:
self.SHAPE[ii].PLOT.load(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveconf():
# =============
'''Load dot configuration'''
ii = self.SHAPE_INDX.get()
toconsola('Saving dot configuration',wid=self.cons)
try:
self.SHAPE[ii].PLOT.save(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load dot configuration from a file'''
ii = self.SHAPE_INDX.get()
nn = filedialog.askopenfilename(title='Load geometry configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.SHAPE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring dot configuration from '+
self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.SHAPE[ii].PLOT.load(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to load file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
self.make_plot()
def _saveasconf():
# ================
'''Save dot configuration to a file'''
ii = self.SHAPE_INDX.get()
nn = filedialog.asksaveasfilename(title='Save geometry configuration',
parent=self.Window_dotconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.SHAPE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving dot configuration to '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.SHAPE[ii].PLOT.save(self.SHAPE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.SHAPE[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_geoconfig is not None:
self.Window_geoconfig.lift()
return
self.Window_geoconfig = tk.Toplevel(self.master)
self.Window_geoconfig.title('Shape geometry plot configuration')
self.Window_geoconfig.resizable(width=True,height=True)
self.Window_geoconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_geoconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_geoconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_geoconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_geoconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.SHAPE_INDX,
values=self.SHAPE_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.SHAPE_INDX.get()
_went['textvariable'] = self.SHAPE[ii].FILENAME
ishow = ttk.Frame(self.Window_geoconfig,padding=10)
# Define tabs:
self.Mnb = ttk.Notebook(ishow)
page0 = ttk.Frame(self.Mnb)
page1 = ttk.Frame(self.Mnb)
page2 = ttk.Frame(self.Mnb)
page3 = ttk.Frame(self.Mnb)
self.Mnb.add(page0,text='Label Aspect')
self.Mnb.add(page1,text='Geometry Aspect')
self.Mnb.add(page2,text='Text Aspect')
self.Mnb.add(page3,text='Hide Feature')
self.Mnb.grid()
# Page0
ttk.Label(page0,text='Show as text',padding=3). \
grid(row=0,column=0,padx=3,sticky='e')
ttk.Checkbutton(page0,variable=self.SHAPE[ii].textmode). \
grid(row=0, column=1,padx=3, sticky='w')
ttk.Label(page0,text='Generic label',padding=3). \
grid(row=1,column=0,padx=3,sticky='e')
ttk.Entry(page0,textvariable=self.SHAPE[ii].LABEL).\
grid(row=1, column=1,padx=3, sticky='w')
ttk.Label(page0, text='Label key',padding=3). \
grid(row=2,column=0,padx=3,sticky='e')
_ksel = ttk.Combobox(page0,textvariable=self.SHAPE[ii].LABEL_KEY,
values=self.SHAPE[ii].KEY_LIST,width=12)
_ksel.grid(row=2,column=1,sticky='w',padx=3)
_ksel.bind('<<ComboboxSelected>>',lambda e:self.SHAPE[ii].get_name())
# Page 1
geoplot.Configuration(page1,self.SHAPE[ii].PLOT)
# Page 2
shape.TextConfigure(page2,self.SHAPE[ii].PLOT)
# Page 3 Si hay Muchas geometrias inmanejable
shape.HideData(page3,self.SHAPE[ii])
f0 = ttk.Frame(ishow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew')
ishow.grid()
# =======================
def get_lagrangian(self):
# =======================
'''Widget to retrieve Lagrangian trajectory data'''
self.LSOURCE = tk.StringVar()
self.LSOURCE.set(self.FLOAT_OPTIONS[0])
#EG
self.COUNT=[]
def _cancel():
# ===========
self.Window_float.destroy()
self.Window_float = None
def _close():
# ===========
ii = self.FLOAT_INDX.get()
if self.FLOAT[ii].CROP.get():
nt = self.FLOAT[ii].nrecords
ppi = [i for i in range(nt) if self.FLOAT[ii].DATE[i] >= self.DATE[0]]
ppf = [i for i in range(nt) if self.FLOAT[ii].DATE[i] > self.DATE[-1]]
pi = ppi[0]
pf = ppf[0] - 1
#print('Initial index : ', pi)
#print('Final index : ', pf)
#print(self.FLOAT[ii].nfloats)
#print(self.FLOAT[ii].nrecords)
if self.FLOAT[ii].nfloats > 1:
lon = self.FLOAT[ii].lon[pi:pf+1,:]
lat = self.FLOAT[ii].lat[pi:pf+1,:]
date = self.FLOAT[ii].DATE[pi:pf+1]
TIME = self.FLOAT[ii].TIME[pi:pf+1]
self.FLOAT[ii].lon = lon
self.FLOAT[ii].lat = lat
self.FLOAT[ii].DATE = date
self.FLOAT[ii].TIME = TIME
else:
lon = self.FLOAT[ii].lon[pi:pf+1]
lat = self.FLOAT[ii].lat[pi:pf+1]
date = self.FLOAT[ii].DATE[pi:pf+1]
TIME = self.FLOAT[ii].TIME[pi:pf+1]
self.FLOAT[ii].lon = lon
self.FLOAT[ii].lat = lat
self.FLOAT[ii].DATE = date
self.FLOAT[ii].TIME = TIME
self.FLOAT[ii].nrecords = len(date)
#print('DATE[0] = ',self.FLOAT[ii].DATE[0])
#print('DATE[n] = ',self.FLOAT[ii].DATE[-1])
#print('TIME[0] = ',datetime.datetime.fromtimestamp(self.FLOAT[ii].TIME[0]))
#print('TIME[n] = ',datetime.datetime.fromtimestamp(self.FLOAT[ii].TIME[-1]))
#print(self.FLOAT[ii].lon)
#print(self.FLOAT[ii].lat)
#print(self.FLOAT[ii].date)
self.Window_float.destroy()
self.Window_float = None
self.make_plot()
if self.Window_lineconfig is not None:
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
self.lagrangian_config()
def _clear():
# ===========
if self.nfloat == 0:
return
ii = self.FLOAT_INDX.get()
self.LAYERS.erase('FLOAT',ii,wid=self.cons)
self.LAYERS.print()
#for i in range(self.nfiles):
# if self.FILETYPES[i] == 'FLOAT' and self.FILEORDER[i] == ii:
# del self.FILENAMES[i]
# del self.FILETYPES[i]
# del self.FILEORDER[i]
# del self.SEQUENCES[i]
# del self.SEQLEADER[i]
# del self.SEQNTIMES[i]
#self.nfiles -= 1
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.FLOAT[ii]
self.nfloat -= 1
ii = self.nfloat-1 if ii >= self.nfloat else ii
toconsola('new nfloat = '+str(self.nfloat),wid=self.cons)
self.FLOAT_INDX.set(ii)
_refill(ii)
#_close()
def _reget():
# ===========
self.FLOAT_INDX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ==============
#print("entro refill",ii)
if ii >= 0:
self.COUNT.append(tk.StringVar())
self.COUNT[-1].set(str(ii))
self.FLOAT_LIST = list(range(self.nfloat))
ttk.Label(self.F1,textvariable=self.COUNT[-1],anchor='center', \
background="#fff",foreground="#000000",width=5).grid(row=ii+1,column=0)
ttk.Label(self.F1,textvariable=self.FLOAT[ii].FILENAME,\
background="#fff",foreground="#000000",justify='left').grid(row=ii+1,column=1,padx=3,sticky='w')
ttk.Entry(self.F1,textvariable=self.FLOAT[ii].ALIAS,width=15).grid(row=ii+1,column=2,sticky='w')
tk.Checkbutton(self.F1).grid(row=ii+1,column=3,sticky='we')
tk.Checkbutton(self.F1).grid(row=ii+1,column=4,sticky='we')
#EG _wsel['values'] = self.FLOAT_LIST
#EG _went['textvariable'] = self.FLOAT[ii].FILENAME
#EG _wstat['text'] = ' Nfloats = '+str(self.FLOAT[ii].nfloats)
#EG _wsel.configure(state='normal')
#EG _show.configure(state='normal')
#EG _show['variable']=self.FLOAT[ii].show
#EG _aent.configure(state='normal')
#EG _aent['textvariable'] = self.FLOAT[ii].ALIAS
#EG _wcrp.configure(state='normal')
#EG _wcrp['variable']=self.FLOAT[ii].CROP
else:
self.FLOAT = []
self.FLOAT_LIST = ['0']
self.FLOAT_INDX = tk.IntVar()
self.FLOAT_INDX.set(0)
#EG _wsel['values'] = self.FLOAT_LIST
#EG _went['textvariable'] = ''
#EG _wstat['text'] = ''
#EG _wsel.configure(state='disabled')
#EG _aent.configure(state='disabled')
#EG _show.configure(state='disabled')
#EG _wcrp.configure(state='disabled')
def _add():
# ========
ISOURCE = self.FLOAT_OPTIONS.index(self.LSOURCE.get())
if ISOURCE == 0:
# self.Lagrangian_types=[('Netcdf','*.nc'),('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
#EG OLD code
# ''' nn = filedialog.askopenfile(parent=self.Window_float, \
# filetypes=types)
# try:
# if empty(nn.name):
# return
# except:
# return
# _load_trajectory(nn.name)
# '''
#EG New code
nn = filedialog.askopenfilenames(parent=self.Window_float,\
filetypes=self.Lagrangian_types)
if len(nn) == 0: return
toconsola("======= Trajectories ======",tag="o",wid=self.cons)
for filename in nn:
_load_trajectory(filename)
# Remember the selected extension and use as default for the next call
# Consider the last filename and retrieve its extension:
selected_basename,selected_extension = os.path.splitext(filename)
indx = -1
iii = -1
all = -1
for filetype in self.Lagrangian_types:
indx = indx + 1
if selected_extension in filetype[1]: iii = indx
if '*' in filetype[1]: all = indx
# If no extension has been found, we assume that it was the ALL:
if iii == -1: iii = all
self.Lagrangian_types.insert(0,self.Lagrangian_types.pop(iii))
toconsola("=====================",tag="o", wid=self.cons)
elif ISOURCE == 1:
path = '%s' % filedialog.askdirectory(parent=self.Window_float, \
title='Select local trajectory folder')
if empty(path):
return
filelist = folderList(path,'geojson')
if len(filelist) > 0:
for f in filelist:
filename = join(path,f)
toconsola('Loading file: '+filename,wid=self.cons)
_load_trajectory(filename)
elif ISOURCE == 2:
url = simple_form('Select remote trajectory folder','url')
if empty(url):
return
filelist = urlList(url,'geojson')
if len(filelist) > 0:
for filename in filelist:
toconsola('Loading file: '+filename,wid=self.cons)
_load_trajectory(filename)
elif ISOURCE == 3:
filelist = db.select_exp()
if len(filelist) > 0:
for filename in filelist:
_load_trajectory(filename)
def _load_trajectory(filename):
# ==================================
FLT = lagrangian.parameters(wid=self.cons)
FLT.Read(filename)
if FLT.nfloats is None or FLT.nfloats==0 or FLT.nrecords==0:
return
if self.NL > 0:
if FLT.nfloats > 1:
MAPX = []
MAPY = []
for i in range(FLT.nfloats):
f = interpolate.interp1d(FLT.TIME,FLT.lon[:,i],
bounds_error=False, fill_value=np.NaN)
MAPX.append(f(self.TIME))
f = interpolate.interp1d(FLT.TIME,FLT.lat[:,i],
bounds_error=False, fill_value=np.NaN)
MAPY.append(list(f(self.TIME)))
FLT.MAPX = np.array(MAPX).T
FLT.MAPY = np.array(MAPY).T
else:
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon,
bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat,
bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
n = self.LAYERS.n
# Adding a FLOAT in the Drawing class
#
nt = len(FLT.lon)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(FLT.FILENAME.get())
#self.FILETYPES.append('FLOAT')
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
#self.FILEORDER.append(self.nfloat-1)
ii = self.FLOAT_INDX.get()
if self.first:
# Set the plot limits according to the sata'''
if self.drawmap is None:
self.PLOT.WEST.set(np.nanmin(FLT.lon)-1)
self.PLOT.EAST.set(np.nanmax(FLT.lon)+1)
self.PLOT.SOUTH.set(np.nanmin(FLT.lat)-1)
self.PLOT.NORTH.set(np.nanmax(FLT.lat)+1)
self.plot_initialize()
#self.PLOT.XLABEL.set('Longitude')
#self.PLOT.YLABEL.set('Latitude')
self.DATE = FLT.DATE.copy()
self.TIME = FLT.TIME.copy()
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.first = False
if nt > 1:
if self.NL == 0:
toconsola('FLOAT initiates Time axis',wid=self.cons)
#self.LAYERS.nsequence = 1
#self.LAYERS.INSEQUENCE[n-1].set(True)
#self.LAYERS.SEQUENCER[n-1].set(True)
#self.LAYERS.leader = n-1
#self.LAYERS.seqlen = nt
# self.SEQUENCES[-1].set(True)
# self.SEQLEADER[-1].set(True)
# self.SEQLEADER_INDX = self.nfiles
self.FLOAT[ii].LINK.set(True)
self.FLOAT[ii].MAPX = self.FLOAT[ii].lon.copy()
self.FLOAT[ii].MAPY = self.FLOAT[ii].lat.copy()
self.DATE = self.FLOAT[ii].DATE.copy()
self.TIME = self.FLOAT[ii].TIME.copy()
self.L.set(self.FLOAT[ii].L.get())
self.L_LIST = list(range(nt))
self.NL = nt
self.lbox.configure(state='normal')
self.lbox['values'] = self.L_LIST
if self.L.get() < self.NL-1:
self.bnext.configure(state='normal')
if self.L.get() > 0:
self.bprev.configure(state='normal')
else:
if nt == self.NL:
toconsola('Linking trajectory to TIME axis',wid=self.cons)
# self.LAYERS.nsequence += 1
# self.LAYERS.INSEQUENCE[n-1].set(True)
# self.LAYERS.SEQUENCER[n-1].set(False)
# self.nsequence += 1
# self.SEQUENCES[-1].set(True)
# self.SEQLEADER[-1].set(False)
self.FLOAT[ii].LINK.set(True)
self.FLOAT[ii].L.set(self.L.get()) #Synchronize records
#print("load self.LAYERS.nsequence",self.LAYERS.nsequence)
#print("abasn refill",self.FLOAT_LIST)
_refill(ii)
# Main window:
# ============
if self.Window_float is None:
self.Window_float = tk.Toplevel(self.master)
self.Window_float.title("Lagrangian Trajectories")
self.Window_float.protocol('WM_DELETE_WINDOW',_cancel)
else:
self.Window_float.lift()
if self.nfloat > 0:
ii = self.FLOAT_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_float,padding=5)
#EG Nueva interface
# Add
#EG ttk.Combobox(F0,textvariable=self.LSOURCE, \
#EG values=self.FLOAT_OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=0,column=0,padx=3)
ttk.Combobox(F0,textvariable=self.LSOURCE, \
values=self.FLOAT_OPTIONS).grid(row=0,column=1)
F0.grid(row=0,column=0,sticky="w")
# Filename:
ttk.Separator(self.Window_float, orient='horizontal').grid(row=1,column=0,sticky="nesw")
#EG F1 = ttk.Frame(self.Window_float,padding=5)
#EG ttk.Label(F0,text='Float file').grid(row=0,column=1,padx=3)
#EG _wsel = ttk.Combobox(F0,textvariable=self.FLOAT_INDX, \
#EG values=self.FLOAT_LIST,width=5)
#EG _wsel.grid(row=0,column=2)
#EG _wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
#EG _went = ttk.Entry(F0,justify='left',width=50,state='readonly')
#EG _went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
#EG
self.F1 = ttk.Frame(self.Window_float,padding=5)
ttk.Label(self.F1,text='Nfloat',width=5).grid(row=0,column=0)
ttk.Label(self.F1,text='Float file',anchor="center",width=50).grid(row=0,column=1,sticky='we')
ttk.Label(self.F1,text='Alias',anchor="center",width=15).grid(row=0,column=2)
ttk.Label(self.F1,text='Crop').grid(row=0,column=3)
ttk.Label(self.F1,text='Show').grid(row=0,column=4)
self.F1.grid(row=2,column=0)
#EGttk.Label(F1,text='Float file').grid(row=0,column=1,padx=3)
#_wsel = ttk.Combobox(F1,textvariable=self.FLOAT_INDX, \
# values=self.FLOAT_LIST,width=5)
#_wsel.grid(row=0,column=2)
#_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
#_went = ttk.Entry(F1,justify='left',width=50,state='readonly')
#_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
# AAA
#if ii == -1:
# _wstat = ttk.Label(F1,text='',width=50,justify='left')
# _wsel.configure(state='disabled')
#else:
# _wstat = ttk.Label(F1,text=' Floats in the file= '+str(self.FLOAT[ii].nfloats),width=50,justify='left')
# _went['textvariable'] = self.FLOAT[ii].FILENAME
#_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
#ttk.Label(F1,text='Alias').grid(row=2,column=1,padx=3,pady=3)
#_aent = ttk.Entry(F0,width=15,justify='left')
#_aent.grid(row=2,column=2,columnspan=2,sticky='w')
#_wcrp = ttk.Checkbutton(F0,text='Crop')
#_wcrp.grid(row=3,column=1,sticky='w')
#EGF0.grid(row=0,column=0)
ttk.Separator(self.Window_float, orient='horizontal').grid(row=3,column=0,sticky="nesw")
F2 = ttk.Frame(self.Window_float,padding=5)
#_show.grid(row=1,column=5,padx=3)
ttk.Button(F2,text='Cancel',command=_cancel).grid(row=0,column=0,padx=3)
ttk.Button(F2,text='Clear',command=_clear).grid(row=0,column=1,padx=3)
ttk.Button(F2,text='Plot',command=_close).grid(row=0,column=2,padx=3)
F2.grid(row=4,column=0)
# ========================
def currents_config(self):
# ========================
global fshow
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
#else:
# self.vector_config(self.VEC[self.VEC_INDX.get()].VEL)
def _cancel():
# ============
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_vectorconfig.destroy()
self.Window_vectorconfig = None
def _selected():
# ==============
global fshow
fshow.destroy()
# The usual configuration:
ii = self.VEC_INDX.get()
_went['textvariable'] = self.VEC[ii].UFILENAME
fshow = ttk.Frame(self.Window_vectorconfig,padding=10)
vectorplot.Configuration(parent=fshow,
PLOT=self.VEC[ii].PLOT)
f0 = ttk.Frame(fshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
fshow.grid()
def _loadconf():
# =============
'''Load vector configuration'''
toconsola('Restoring vector configuration from '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.load(self.VEC[ii].PLOT.UFILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load vector configuration'''
toconsola('Saving vector configuration to '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.save(self.VEC[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to write file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load vector configuration from a file'''
nn = filedialog.askopenfilename(title='Load vector configuration',
parent=self.Window_vectorconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.VEC[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring vector configuration from '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.load(self.VEC[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load vector configuration'''
nn = filedialog.asksaveasfilename(title='Save vector configuration',
parent=self.Window_vectorconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.VEC[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving vector configuration to '+self.VEC[ii].PLOT.FILECONF,wid=self.cons)
try:
self.VEC[ii].PLOT.save(self.VEC[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.VEC[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_vectorconfig is not None:
self.Window_vectorconfig.lift()
return
self.Window_vectorconfig = tk.Toplevel(self.master)
self.Window_vectorconfig.title('Vector plot configuration')
self.Window_vectorconfig.resizable(width=True,height=True)
self.Window_vectorconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_vectorconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
self.Window_vectorconfig.config(menu=menubar)
#try:
# self.Window_vectorconfig.config(menu=menubar)
#except AttributeError:
# # master is a toplevel window (Python 2.4/Tkinter 1.63)
# master.tk.call(self.Window_vectorconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_vectorconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.VEC_INDX,
values=self.VEC_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.VEC_INDX.get()
_went['textvariable'] = self.VEC[ii].UFILENAME
fshow = ttk.Frame(self.Window_vectorconfig,padding=10)
vectorplot.Configuration(parent=fshow,
PLOT=self.VEC[ii].PLOT)
f0 = ttk.Frame(fshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
fshow.grid()
# ======================
def saidin_config(self):
# ======================
if empty(self.SAIDIN.FILENAME.get()):
messagebox.showinfo(message='No SST image opened yet')
return
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.make_plot()
self.Window_saidinconfig.destroy()
self.Window_saidinconfig = None
def _loadconf():
# =============
'''Load contour configuration'''
toconsola('Restoring contour configuration from '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.load(self.SAIDIN.PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load contour configuration'''
toconsola('Saving contour configuration to '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load contour configuration from a file'''
nn = filedialog.askopenfilename(title='Load contour configuration',
parent=self.Window_saidinconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.SAIDIN.PLOT.FILECONF = '%s' % nn
toconsola('Restoring contour configuration from '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.load(self.SAIDIN.PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load contour configuration'''
nn = filedialog.asksaveasfilename(title='Save contour configuration',
parent=self.Window_saidinconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.SAIDIN.PLOT.FILECONF = '%s' % nn
toconsola('Saving contour configuration to '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
try:
self.SAIDIN.PLOT.save(self.SAIDIN.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.SAIDIN.PLOT.FILECONF,wid=self.cons)
if self.Window_saidinconfig is not None:
self.Window_saidinconfig.lift()
return
self.Window_saidinconfig = tk.Toplevel(self.master)
self.Window_saidinconfig.title('SST image configuration')
self.Window_saidinconfig.resizable(width=True,height=True)
self.Window_saidinconfig.protocol('WM_DELETE_WINDOW',_done)
menubar = tk.Menu(self.Window_saidinconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_saidinconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_saidinconfig, "config", "-menu", menubar)
gshow = ttk.Frame(self.Window_saidinconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.SAIDIN.FLD.varname,
units=self.SAIDIN.FLD.units,
missing=self.SAIDIN.FLD.missing,
minval=self.SAIDIN.FLD.minval,
maxval=self.SAIDIN.FLD.maxval,
PLOT=self.SAIDIN.PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
# =======================
def contour_config(self):
# =======================
global gshow
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
def _cancel():
# ============
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_contourconfig.destroy()
self.Window_contourconfig = None
self.make_plot()
def _selected():
# ==============
global gshow
gshow.destroy()
# The usual configuration
ii = self.CDF_INDX.get()
_went['textvariable'] = self.CDF[ii].FILENAME
gshow = ttk.Frame(self.Window_contourconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.CDF[ii].FLD.varname,
units=self.CDF[ii].FLD.units,
missing=self.CDF[ii].FLD.missing,
minval=self.CDF[ii].FLD.minval,
maxval=self.CDF[ii].FLD.maxval,
PLOT=self.CDF[ii].PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
def _loadconf():
# =============
'''Load contour configuration'''
toconsola('Restoring contour configuration from '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.load(self.CDF[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load contour configuration'''
toconsola('Saving contour configuration to '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load contour configuration from a file'''
nn = filedialog.askopenfilename(title='Load contour configuration',
parent=self.Window_contourconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.CDF[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring contour configuration from '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.load(self.CDF[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load contour configuration'''
nn = filedialog.asksaveasfilename(title='Save contour configuration',
parent=self.Window_contourconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.CDF[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving contour configuration to '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
try:
self.CDF[ii].PLOT.save(self.CDF[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.CDF[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_contourconfig is not None:
self.Window_contourconfig.lift()
return
self.Window_contourconfig = tk.Toplevel(self.master)
self.Window_contourconfig.title('Contour plot configuration')
self.Window_contourconfig.resizable(width=True,height=True)
self.Window_contourconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_contourconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_contourconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_contourconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_contourconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.CDF_INDX,
values=self.CDF_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration:
ii = self.CDF_INDX.get()
_went ['textvariable'] = self.CDF[ii].FILENAME
gshow = ttk.Frame(self.Window_contourconfig,padding=10)
contourplot.Configuration(parent=gshow,
varname=self.CDF[ii].FLD.varname,
units=self.CDF[ii].FLD.units,
missing=self.CDF[ii].FLD.missing,
minval=self.CDF[ii].FLD.minval,
maxval=self.CDF[ii].FLD.maxval,
PLOT=self.CDF[ii].PLOT)
f0 = ttk.Frame(gshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
gshow.grid()
# ==========================
def lagrangian_config(self):
# ==========================
global hshow
if self.nfloat == 0:
messagebox.showinfo(message='No Trajectory file opened yet')
return
def _cancel():
# ============
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
def _apply():
# ============
self.make_plot()
def _done():
# ============
self.make_plot()
self.Window_lineconfig.destroy()
self.Window_lineconfig = None
def _selected():
# ==============
global hshow
hshow.destroy()
# The usual configuration:
ii = self.FLOAT_INDX.get()
_went['textvariable'] = self.FLOAT[ii].FILENAME
hshow = ttk.Frame(self.Window_lineconfig,padding=10)
# Define tabs:
nb = ttk.Notebook(hshow)
page1 = ttk.Frame(nb)
page2 = ttk.Frame(nb)
page3 = ttk.Frame(nb)
nb.add(page1,text='Line Configuration')
nb.add(page2,text='Trajectory options')
nb.add(page3,text='Trajectory data')
nb.grid()
# Page 1
#lineplot.WinConfig(self.Window_lineconfig,LL)
lineplot.Configuration(page1,self.FLOAT[ii].PLOT)
# Page 2
lineplot.Configuration_OnMap(page2,self.FLOAT[ii].PLOT,self.FLOAT[ii])
# Page 3
lagrangian.ShowData(page3,self.FLOAT[ii])
f0 = ttk.Frame(hshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
hshow.grid()
def _loadconf():
# =============
'''Load line configuration'''
toconsola('Restoring line configuration from '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.load(self.FLOAT[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file ',self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load line configuration'''
toconsola('Saving line configuration to '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.save(self.FLOAT[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load line configuration from a file'''
nn = filedialog.askopenfilename(title='Load line configuration',
parent=self.Window_lineconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.FLOAT[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring line configuration from '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.load(self.FLOAT[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load line configuration'''
nn = filedialog.asksaveasfilename(title='Save line configuration',
parent=self.Window_lineconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.FLOAT[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving line configuration to '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
try:
self.FLOAT[ii].PLOT.save(self.FLOAT[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+self.FLOAT[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_lineconfig is not None:
self.Window_lineconfig.lift()
return
self.Window_lineconfig = tk.Toplevel(self.master)
self.Window_lineconfig.title('Trajectory plot configuration')
self.Window_lineconfig.resizable(width=False,height=False)
self.Window_lineconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_lineconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_lineconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_lineconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_lineconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.FLOAT_INDX,
values=self.FLOAT_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
# The usual configuration
ii = self.FLOAT_INDX.get()
_went['textvariable'] = self.FLOAT[ii].FILENAME
hshow = ttk.Frame(self.Window_lineconfig,padding=10)
# Define tabs:
nb = ttk.Notebook(hshow)
page1 = ttk.Frame(nb)
page2 = ttk.Frame(nb)
page3 = ttk.Frame(nb)
nb.add(page1,text='Line Configuration')
nb.add(page2,text='Trajectory options')
nb.add(page3,text='Trajectory data')
nb.grid()
# Page 1
#lineplot.WinConfig(self.Window_lineconfig,LL)
lineplot.Configuration(page1,self.FLOAT[ii].PLOT)
# Page 2
lineplot.Configuration_OnMap(page2,self.FLOAT[ii].PLOT,self.FLOAT[ii])
# Page 3
lagrangian.ShowData(page3,self.FLOAT[ii])
f0 = ttk.Frame(hshow,padding=5)
ttk.Button(f0,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=0,padx=3)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Done',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
hshow.grid()
# ===================
def lselection(self):
# ===================
'''Sets all files in the SEQUENCE list to the same time step'''
self.L.set(int(self.lbox.get()))
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
L = self.L.get()
if L == 0:
self.bprev.configure(state='disabled')
else:
self.bprev.configure(state='normal')
if L == self.NL - 1:
self.bnext.configure(state='disabled')
else:
self.bnext.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# self.VEC[jj].L.set(L)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# self.CDF[jj].L.set(L)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
self.make_plot()
# ==============
def tprev(self):
# ==============
'''Points to the previous time step'''
if self.L.get() > 0:
self.L.set(self.L.get() - 1)
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
if self.L.get() == 0:
self.bprev.configure(state='disabled')
if self.L.get() < self.NL - 1:
self.bnext.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
Lm = self.VEC[i].L.get() - 1
self.VEC[i].L.set(Lm)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
Lm = self.CDF[i].L.get() - 1
self.CDF[i].L.set(Lm)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# L = self.VEC[jj].L.get()
# Lm = self.VEC[jj].L.get() - 1
# self.VEC[jj].L.set(Lm)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# L = self.CDF[jj].L.get()
# Lm = self.CDF[jj].L.get() - 1
# self.CDF[jj].L.set(Lm)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
self.make_plot()
else:
return
# ==============
def tnext(self):
# ==============
'''Points to the next time step'''
if self.L.get() < self.NL - 1:
self.L.set(self.L.get() + 1)
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
if self.L.get() == self.NL - 1:
self.bnext.configure(state='disabled')
if self.L.get() > 0:
self.bprev.configure(state='normal')
for i in range(self.nvec):
if self.VEC[i].LINK.get():
Lp = self.VEC[i].L.get() + 1
self.VEC[i].L.set(Lp)
self.VEC[i].read(wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
Lp = self.CDF[i].L.get() + 1
self.CDF[i].L.set(Lp)
self.CDF[i].read(wid=self.cons)
# for i in range(self.LAYERS.n):
# if self.LAYERS.INSEQUENCE[i].get():
# jj = self.LAYERS.TYPE_INDEX[i]
# if self.LAYERS.TYPE[i] == 'VEC':
# L = self.VEC[jj].L.get()
# Lp = self.VEC[jj].L.get() + 1
# self.VEC[jj].L.set(Lp)
# self.VEC[jj].read(wid=self.cons)
# elif self.LAYERS.TYPE[i] == 'FLD':
# L = self.CDF[jj].L.get()
# Lp = self.CDF[jj].L.get() + 1
# self.CDF[jj].L.set(Lp)
# self.CDF[jj].read(update_lims=False,wid=self.cons)
#toconsola("EG Drawing next.................",wid=self.cons)
self.make_plot()
#toconsola("EG next DOne",wid=self.cons)
else:
return
# # ====================
# def data_update(self):
# # ====================
# '''Makes the new plot according to the user selections. It call self.read to get the new data'''
# self.read_UV(self.FLD.ncid,self.FLD.icdf,self.FLD.uid,self.FLD.vid)
# self.read_S(self.FLD.ncid,self.FLD.icdf,self.FLD.sid)
# self.make_plot()
# ===========================
def get_date(self,ncid,icdf):
# ===========================
self.T_LIST = []
if icdf.idl > -1:
wrk = ncid.variables[icdf.tname][:]
self.T_LIST = list(wrk)
else:
self.T_LIST = []
self.DATE = []
for i in range(icdf.nt):
self.DATE.append(num2date(self.T_LIST[i], \
units=icdf.time_units, \
calendar=icdf.time_calendar))
# ========================
def plot_initialize(self):
# ========================
# Meridian and parallel range and intervalls:
tmp1 = np.trunc(100*(self.PLOT.EAST.get()-self.PLOT.WEST.get())/4)/100
if tmp1 > 1:
tmp1 = np.rint(tmp1)
self.PLOT.MERIDIAN_INT.set(tmp1)
self.PLOT.MERIDIAN_INI.set(np.trunc(self.PLOT.WEST.get()/tmp1 - 2)*tmp1)
self.PLOT.MERIDIAN_FIN.set(np.trunc(self.PLOT.EAST.get()/tmp1 + 2)*tmp1)
tmp1 = None
tmp2 = np.trunc(100*(self.PLOT.NORTH.get() - self.PLOT.SOUTH.get())/4)/100
if tmp2 > 1:
tmp2 = np.rint(tmp2)
self.PLOT.PARALLEL_INT.set(tmp2)
self.PLOT.PARALLEL_INI.set(np.trunc(self.PLOT.SOUTH.get()/tmp2 - 2)*tmp2)
self.PLOT.PARALLEL_FIN.set(np.trunc(self.PLOT.NORTH.get()/tmp2 + 2)*tmp2)
tmp2 = None
# =================================
def make_plot(self):
# =================================
#toconsola("EG make_plot:\n PLOT.OUTPUT_FIGURE: "+str(self.PLOT.OUTPUT_FIGURE.get()),
# wid=self.cons)
if self.PLOT.OUTPUT_FIGURE.get():
if self.fig is None:
#toconsola("\n EGL creation", wid=self.cons)
self.Window_mapa = tk.Toplevel(self.master)
self.Window_mapa.title("COSMO-VIEW plotting tool")
self.Window_mapa.resizable(width=True,height=True)
self.Window_mapa.grid_columnconfigure(0, weight=1)
self.Window_mapa.grid_rowconfigure(0, weight=1)
#self.Window_mapa.wm_geometry("1900x1200")
#self.canvas = None # canvas
# Frame container
topframe = tk.Frame(self.Window_mapa)
topframe.grid_rowconfigure(0, weight=1)
topframe.grid(sticky='swen')
topframe.grid_columnconfigure(0, weight=1)
# Two panels Utilizamos pack en canvas y grid en consola
# Afegim el canvas
top_panel = tk.Frame(topframe, pady = 20)
# Initialize figure,canvas an Plot panel
#self.ax=None
self.fig = Figure(figsize=self.PLOT.SIZE, \
facecolor=self.PLOT.FIGURE_COLOR.get(),dpi=self.PLOT.DPI.get())
#toconsola(" MAP_PLOT: Set projection parameters",wid=self.cons)
proj = map_proj(self.PLOT.MAP_PROJECTION.get(), params=self.params)
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
self.canvas = FigureCanvasTkAgg(self.fig, master=top_panel)
#EG Dibujamos con self.draw_figure
#EG self.canvas.draw()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
toolbar = NavigationToolbar2Tk(self.canvas, top_panel)
toolbar.update()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=1)
#EG event controllers
self.CANVAS_CLICK = self.canvas.mpl_connect('button_press_event',self.canvas_click)
self.canvas.mpl_connect('close_event',self.canvas_closing)
self.canvas.mpl_connect('resize_event',self.canvas_resizing)
top_panel.grid(row=0, column=0, sticky='swen')
self.drawmap = True
else: toconsola(" EG ojo fig existe",wid=self.cons)
self.draw_figure()
# ========================
def setmap(self,target=0):
# ========================
#'''EG OJOJ new setmap Routine focused to set the projection
# We implement a function to manage projection with Cartopy
# map_proj(name,list). See tools module
#'''
#projection = self.PLOT.MAP_PROJECTION.get()
#EG self.toconsola("EG Set map Projection")
#proj = map_proj(projection)
#self.ax = self.fig.add_subplot(111, projection=proj['proj'])
#self.ax.set_extent([ float(self.PLOT.WEST.get()), \
#float(self.PLOT.EAST.get()), float(self.PLOT.SOUTH.get()), \
#float(self.PLOT.NORTH.get())],proj['proj'])
#EG self.ax.coastlines()
#EG Projection
'''
if proj is None:
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax = self.fig.add_subplot(111, projection=proj['proj'])
'''
return
# =================================
def draw_figure(self):
# ==================================
global CONSOLA
toconsola("EG draw_figure:",wid=self.cons)
toconsola((" EG Configuration:\n"+ \
"\t Projection: "+str(self.PLOT.MAP_PROJECTION.get())+ \
"\n\t Domain:\t \t West - East: "+str(float(self.PLOT.WEST.get()))+ \
" - "+str(float(self.PLOT.EAST.get()))+ \
"\n\t \t South - North: "+str(float(self.PLOT.SOUTH.get()))+ \
" - "+str(float(self.PLOT.NORTH.get()))),wid=self.cons)
try:
self.scbar.remove()
except: pass
for bar in self.cdfbar:
try:
bar.remove()
except: pass
self.cdfbar = []
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
self.ax.clear()
font_family = self.PLOT.MAP_FONT_TYPE.get() # Lets see ...
font_size = self.PLOT.LABEL_SIZE.get()
# EPSG
# EG Not necessary
# epsg = int(self.PLOT.EPSG.get())
# Este bloque podría rehacerse ahora
# Temporally deprecated self.PLOT.GEOMAP.get()
self.ax.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
crs=proj['proj'])
#Eg pruebas con projeccions self.ax.coastlines()
toconsola(" EG self.PLOT.GEOMAP: "+str(self.PLOT.GEOMAP.get()),wid=self.cons)
if self.drawmap:
toconsola(" EG draw_figure: call setmap no more needed !",wid=self.cons)
self.drawmap = False
#EG We implement GEBCO+EMODNET Tiles services
toconsola(" EG: RELIEF tiles"+str(self.PLOT.RELIEF_SHOW.get()),wid=self.cons)
if self.PLOT.RELIEF_SHOW.get():
if self.PLOT.RELIEF.get() == 1:
gebco ="GEBCO_2019_Grid"
try:
toconsola("\t EG: GEBCO tiles",wid=self.cons)
self.ax.add_wms(wms='https://www.gebco.net/data_and_products/gebco_web_services/2019/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,360&crs=EPSG:4326&format=image/jpeg&layers=gebco_2019_grid&width=1200&height=600&version=1.3.0',layers=gebco,zorder=0)
except:
toconsola("\t WARNING: GEBCO server failed !, it is disabled......",wid=self.cons)
elif self.PLOT.RELIEF.get() == 2:
emod_land="emodnet:mean_atlas_land"
toconsola("\t EG: EMODNET tiles",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emod_land,zorder=0)
except:
toconsola("\t WARNING: EMODNET server failed !, it is disabled......",wid=self.cons)
else:
#EG Sometimes this situation is possible (i.e. manual edition of conf files)
self.PLOT.RELIEF_SHOW.set(False)
if self.PLOT.EMODNET_ISO.get():
emodnet="emodnet:contours"
toconsola("\t EG: EMODNET contours",wid=self.cons)
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET contours failed !, it is disabled......",wid=self.cons)
# Draw SAIDIN:
#
if not empty(self.SAIDIN.FILENAME.get()):
if self.SAIDIN.show.get():
toconsola("EG plot SAIDIN",wid=self.cons)
#EG Added projection argument, map reference dropped
self.scbar = contourplot.drawing(self.fig,self.ax,proj['proj'],
self.SAIDIN.FLD.xx, self.SAIDIN.FLD.yy,
self.SAIDIN.FLD.data,
self.SAIDIN.FLD.data.mask,
self.SAIDIN.PLOT)
# Draw fields:
#
if self.ncdf > 0:
#EG Added projection argument, map reference dropped
toconsola("EG: plot netcdf",wid=self.cons)
for ii in range(self.ncdf):
if self.CDF[ii].show.get():
self.cdfbar.append(contourplot.drawing(self.fig,
self.ax, proj['proj'],
self.CDF[ii].FLD.xx, self.CDF[ii].FLD.yy,
self.CDF[ii].FLD.data,
self.CDF[ii].FLD.data.mask,
self.CDF[ii].PLOT))
# Draw currents:
#
if self.nvec > 0:
toconsola("EG plot currents",wid=self.cons)
for ii in range(self.nvec):
if self.VEC[ii].show.get():
vectorplot.drawing(self.ax, proj['proj'], self.VEC[ii])
# Draw floats:
#
if self.nfloat > 0:
toconsola("EG plot floats",wid=self.cons)
for ii in range(self.nfloat):
self.FLOAT[ii].L.set(self.L.get())
lagrangian.drawing(self.ax, proj['proj'], self.FLOAT[ii])
# Draw markers:
#
mrklines = []
mrklabls = []
if self.nmarker > 0:
toconsola("EG plot markers",wid=self.cons)
for ii in range(self.nmarker):
#EG Added projection argument, reference map and fig dropped
lmrk = geomarker.drawing(self.ax, proj['proj'], self.MARKER[ii])
mrklines.append(lmrk)
mrklabls.append(self.MARKER[ii].LABEL.get())
# Draw SHAPES:
#
if self.nshape > 0:
toconsola("EG plot shapes",wid=self.cons)
for ii in range(self.nshape):
toconsola("\tSHAPE"+str(ii),wid=self.cons)
#EG Added projection argument, reference map and fig
lmrk = shape.drawing(self.ax, proj['proj'], self.SHAPE[ii])
if lmrk is not None:
mrklines.append(lmrk)
mrklabls.append(self.SHAPE[ii].LABEL.get())
# Draw Ellipses:
#
if self.nellipse > 0:
for ii in range(self.nellipse):
ellipse.drawing(self.ax, proj['proj'], self.ELLIPSE[ii])
# Draw Patches:
#
if self.npatch > 0:
for ii in range(self.npatch):
patch.drawing(self.ax, proj['proj'], self.PATCH[ii])
# Draw Features:
#
if self.FEATURE.n > 0:
for ii in range(self.FEATURE.n):
self.FEATURE.DATA[ii].drawing(self.ax, proj['proj'])
#EG Coastlines
#toconsola("EG: COASTLINES"+str(self.PLOT.COASTLINE_SHOW.get()),wid=self.cons)
if self.PLOT.COASTLINE_SHOW.get():
if self.PLOT.COASTLINE_SOURCE.get() == 2:
emodnet="coastlines"
try:
self.ax.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET coastlines !, it is disabled......",wid=self.cons)
else:
toconsola("\t EG COASTLINE: Natural_Earth (50m by default) or EMODNET wms",wid=self.cons)
self.ax.coastlines(self.PLOT.MAP_RESOLUTION.get(),color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
if self.PLOT.ISOBAT_NPLOT > 0:
toconsola("EG plot Custom ISOBATHS",wid=self.cons)
# Plot isobaths and its legend:
lines, labels = [], []
toconsola("\t lABEL_SHOW"+str(self.PLOT.ISOBAT_LABEL_SHOW.get()),wid=self.cons)
for ii in range(self.PLOT.nisobat):
label = None
if self.PLOT.ISOBAT_LABEL_SHOW.get():
label = self.PLOT.ISOBAT_LABEL[ii]
try:
color = eval(self.PLOT.ISOBAT_COLOR[ii].get())
except:
color = self.PLOT.ISOBAT_COLOR[ii].get()
if self.PLOT.ISOBAT_SHOW[ii]:
toconsola("\t EG ISOBATA:"+str(self.PLOT.ISOBAT_LABEL[ii]),wid=self.cons)
z = self.PLOT.ISOBAT_DATA[ii]
isox,isoy = z['lon'],z['lat']
for i in range(len(isox)):
if isox[i] > 1e29:
isox[i], isoy[i] = np.nan, np.nan
isbt, = self.ax.plot(isox,isoy,marker=None,
linestyle=self.PLOT.ISOBAT_STYLE[ii].get(),
linewidth=self.PLOT.ISOBAT_WIDTH[ii].get(),
#transform=proj['proj'],
transform=ccrs.PlateCarree(),
color=color)
lines.append(isbt)
labels.append(label)
if self.PLOT.ISOBAT_LEGEND.SHOW.get():
toconsola("\t self.PLOT.ISOBAT_LEGEND.SHOW"+str(self.PLOT.ISOBAT_LEGEND.SHOW.get()),wid=self.cons)
fontsize = self.PLOT.ISOBAT_LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.ISOBAT_LEGEND.FONTSIZE.get() < 1:
fontsize = None
if self.PLOT.ISOBAT_LEGEND.MODE.get() == 1:
mode = 'expand'
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
try: pass
except: pass
# Anchor BBOX:
if self.PLOT.ISOBAT_LEGEND.USE_BB.get():
bb = [self.PLOT.ISOBAT_LEGEND.BBx.get(),
self.PLOT.ISOBAT_LEGEND.BBy.get()]
else:
bb = None
Ilegend = self.ax.legend(lines,labels, \
#title=self.PLOT.ISOBAT_LEGEND.TITLE.get(),
#title_fontsize=24,
loc=self.PLOT.ISOBAT_LEGEND.LOC.get(),
ncol=self.PLOT.ISOBAT_LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.ISOBAT_LEGEND.FRAMEON.get(),
fancybox=self.PLOT.ISOBAT_LEGEND.FANCYBOX.get(),
shadow=self.PLOT.ISOBAT_LEGEND.SHADOW.get(),
framealpha=self.PLOT.ISOBAT_LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.ISOBAT_LEGEND.COLOR.get(),
edgecolor=self.PLOT.ISOBAT_LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.ISOBAT_LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.ISOBAT_LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.ISOBAT_LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.ISOBAT_LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.ISOBAT_LEGEND.LABELSPACING.get())
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
Ilegend.set_title(self.PLOT.ISOBAT_LEGEND.TITLE.get(),
prop=self.PLOT.ISOBAT_LEGEND.TITLEFONT)
if self.PLOT.WATER_COLOR.get() != 'None':
#toconsola("PLOT.WATER_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'ocean', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.WATER_COLOR.get()),zorder=self.PLOT.WATER_ZORDER.get())
if self.PLOT.LAND_COLOR.get() != 'None':
#toconsola("PLOT.LAND_COLOR por defecto 50m",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical', 'land', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.LAND_COLOR.get()),zorder=self.PLOT.LAND_ZORDER.get())
if self.PLOT.COUNTRYLINE_SHOW.get():
#toconsola("PLOT.COUNTRYLINE",wid=self.cons)
self.ax.add_feature(cfeat.BORDERS,edgecolor=self.PLOT.COUNTRYLINE_COLOR.get(),
linewidth=self.PLOT.COUNTRYLINE_WIDTH.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1)
if self.PLOT.RIVERS_SHOW.get():
#toconsola("PLOT.RIVERS",wid=self.cons)
self.ax.add_feature(cfeat.NaturalEarthFeature('physical','rivers_and_lakes_centerlines', \
self.PLOT.MAP_RESOLUTION.get(), \
linewidth=self.PLOT.RIVERS_WIDTH.get(),
edgecolor=self.PLOT.RIVERS_COLOR.get(),zorder=self.PLOT.LAND_ZORDER.get()+1))
#self.ax.coastlines(resolution='110m')
#self.ax.gridlines()
if self.PLOT.GRID_SHOW.get():
toconsola("EG PLOT.GRID"+self.PLOT.GRID_LINESTYLE.get(),wid=self.cons)
#EG adaptar falat comprobar
#def setcolor(x,color):
# for m in x:
# for t in x[m][1]:
# t.set_color(color)
vmeridians = np.arange(self.PLOT.MERIDIAN_INI.get(), \
self.PLOT.MERIDIAN_FIN.get(), \
self.PLOT.MERIDIAN_INT.get())
vparallels = np.arange(self.PLOT.PARALLEL_INI.get(), \
self.PLOT.PARALLEL_FIN.get(), \
self.PLOT.PARALLEL_INT.get())
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
#gl = self.ax.gridlines(crs=proj['proj'],draw_labels=True,
gl = self.ax.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,
linewidth=self.PLOT.GRID_LINEWIDTH.get(),
color=self.PLOT.GRID_FONTCOLOR.get(),
alpha=self.PLOT.GRID_ALPHA.get(),
linestyle=self.PLOT.GRID_LINESTYLE.get(),
zorder=self.PLOT.GRID_ZORDER.get())
# Lines visibility
gl.xlines, gl.ylines = True, True
if self.PLOT.GRID_LINESTYLE.get() == "None":
gl.xlines, gl.ylines = False, False
# xy labels visibility
if CARTOPY_VERSION < '0.18':
# Works with 0.17
gl.xlabels_top = self.PLOT.GRID_NORTH.get()
gl.xlabels_bottom = self.PLOT.GRID_SOUTH.get()
gl.ylabels_left = self.PLOT.GRID_WEST.get()
gl.ylabels_right = self.PLOT.GRID_EAST.get()
else:
# Works with > 0.18
gl.top_labels = self.PLOT.GRID_NORTH.get()
gl.bottom_labels = self.PLOT.GRID_SOUTH.get()
gl.left_labels = self.PLOT.GRID_WEST.get()
gl.right_labels = self.PLOT.GRID_EAST.get()
gl.xlocator = mticker.FixedLocator(vmeridians)
gl.ylocator = mticker.FixedLocator(vparallels)
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
#gl.xpadding , gl.ypadding = self.PLOT.LABEL_PAD.get(), self.PLOT.LABEL_PAD.get()
#else:
# # Default: no labels, no grid just Latitude and Longitude
# toconsola("EG XYLabels ..\n\t"+self.PLOT.XLABEL.get()+" - "+self.PLOT.YLABEL.get(),wid=self.cons)
# font_family = self.PLOT.MAP_FONT_TYPE.get()
# font_size = self.PLOT.LABEL_SIZE.get()
font_weight = 'normal'
font = {'family' : font_family, 'weight' : font_weight,
'color' : self.PLOT.TEXT_COLOR.get(),
'size' : font_size}
# -0.07
self.ax.text(-self.PLOT.YLABEL_PAD.get(), 0.55, self.PLOT.YLABEL.get(), va="bottom", \
ha="center", rotation="vertical", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# -0.2
self.ax.text(0.5, -self.PLOT.XLABEL_PAD.get(), self.PLOT.XLABEL.get(), va="bottom", \
ha="center", rotation="horizontal", rotation_mode="anchor",
transform=self.ax.transAxes,fontdict=font)
# Title
toconsola("Plot Title: "+self.PLOT.TITLE.get(),wid=self.cons)
self.ax.set_title(self.PLOT.TITLE.get(),fontproperties=self.PLOT.TITLEFONT)
px,py = self.ax.title.get_position()
dy = self.PLOT.TITLE_PAD.get()/self.fig.get_dpi()
self.ax.title.set_position((px,py+dy))
if self.PLOT.GEOMAP.get():
#toconsola("EG PLOT.GEOMAP 2 scale: Not yet implemented",wid=self.cons)
if self.PLOT.SCALE_SHOW.get():
try:
YOFFSET = float(self.PLOT.SCALE_YOFFSET.get())
except: YOFFSET = None
try:
LINEWIDTH = float(self.PLOT.SCALE_LINEWIDTH.get())
except: LINEWIDTH = None
#EG no parecefuncionarojo scale_bar from tools
toconsola("EG bar scale", wid=self.cons)
scale_bar(self.ax,proj=ccrs.PlateCarree(),
location=[self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
length=self.PLOT.SCALE_LENGTH.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
zorder=self.PLOT.SCALE_ZORDER.get(),
linewidth=LINEWIDTH)
#scale_bar(self.ax, self.PLOT.SCALE_LENGTH.get(), \
# [self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
# linewidth=LINEWIDTH)
'''EG To be implemented with Cartopy
print("EG PLOT.GEOMAP 2 drawmapscale")
self.m.drawmapscale(self.PLOT.SCALE_X.get(),
self.PLOT.SCALE_Y.get(),
self.PLOT.SCALE_XO.get(),
self.PLOT.SCALE_YO.get(),
length=self.PLOT.SCALE_LENGTH.get(),
units=self.PLOT.SCALE_UNITS.get(),
barstyle=self.PLOT.SCALE_STYLE.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
yoffset=YOFFSET,
labelstyle=self.PLOT.SCALE_LABELSTYLE.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fillcolor1=self.PLOT.SCALE_FILLCOLOR1.get(),
fillcolor2=self.PLOT.SCALE_FILLCOLOR2.get(),
format=self.PLOT.SCALE_FORMAT.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
linewidth=LINEWIDTH)
'''
# Time stamp
try:
self.time_stamp.remove()
except: pass
if len(self.DATE) > 0:
toconsola("EG Time stamp: len(self.DATE) > 0", wid=self.cons)
if self.PLOT.TIMESTAMP_SHOW.get():
toconsola("EG Time stamp: "+str(self.DATE[self.L.get()]), wid=self.cons)
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.ax.annotate(str(self.DATE[self.L.get()]), \
xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', \
color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(), \
fontfamily=font_family, \
fontweight=font_weight, \
annotation_clip=False)
if self.PLOT.LOGO_DISPLAY.get() == 1: self.plot_logo()
self.ax.callbacks.connect('xlim_changed', self.on_xlims_change)
self.ax.callbacks.connect('ylim_changed', self.on_ylims_change)
if len(mrklines) > 0 and self.PLOT.LEGEND.SHOW.get():
toconsola("EG self.nmarker ?",wid=self.cons)
fontsize = self.PLOT.LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.LEGEND.FONTSIZE.get() < 1: fontsize = None
if self.PLOT.LEGEND.MODE.get() == 1: mode = 'expand'
# Anchor BBOX:
if self.PLOT.LEGEND.USE_BB.get():
bb = [self.PLOT.LEGEND.BBx.get(),
self.PLOT.LEGEND.BBy.get()]
else:
bb = None
#try:
toconsola("EG ax.legend",wid=self.cons)
legend = self.ax.legend(mrklines,mrklabls,
loc=self.PLOT.LEGEND.LOC.get(),
ncol=self.PLOT.LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.LEGEND.FRAMEON.get(),
fancybox=self.PLOT.LEGEND.FANCYBOX.get(),
shadow=self.PLOT.LEGEND.SHADOW.get(),
framealpha=self.PLOT.LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.LEGEND.COLOR.get(),
edgecolor=self.PLOT.LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.LEGEND.LABELSPACING.get())
#except: pass
try:
self.ax.add_artist(Ilegend)
except:
pass
if not empty(self.PLOT.LEGEND.TITLE.get()):
try:
legend.set_title(self.PLOT.LEGEND.TITLE.get(),
prop=self.PLOT.LEGEND.TITLEFONT)
except: pass
self.canvas.draw()
toconsola("End draw_figure:",wid=self.cons)
return
# ============================
def make_Mplot(self,proj=None):
# ============================
'''Plotting the maps using CARTOPY,
output directed to Movie window'''
try:
self.SAIDIN.Mcbar.remove()
except: pass
try:
self.Mscbar.remove()
except: pass
for bar in self.Mcdfbar:
try:
bar.remove()
except: pass
#EG recover the cartopy projection
if proj is None:
rproj = map_proj(self.PLOT.MAP_PROJECTION.get())
proj = rproj['proj']
self.Mcdfbar = []
self.Max.clear()
font_family = self.PLOT.MAP_FONT_TYPE.get() # Lets see ...
font_size = self.PLOT.LABEL_SIZE.get()
# Deshabilitado temporalmente EPSG
# epsg = int(self.PLOT.EPSG.get())
# SOUTH = float(self.PLOT.SOUTH.get())
# NORTH = float(self.PLOT.NORTH.get())
# WEST = float(self.PLOT.WEST.get())
# EAST = float(self.PLOT.EAST.get())
self.Max.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
crs=proj)
if self.Mdrawmap:
#EG no se necesita mas self.setmap(self.Max,1)
self.Mdrawmap = False
#toconsola("EG: RELIEF tiles",wid=self.cons)
if self.PLOT.RELIEF_SHOW.get():
if self.PLOT.RELIEF.get() == 1:
gebco ="GEBCO_2019_Grid"
try:
#toconsola("\tEG: GEBCO tiles",wid=self.cons)
self.Max.add_wms(wms='https://www.gebco.net/data_and_products/gebco_web_services/2019/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,360&crs=EPSG:4326&format=image/jpeg&layers=gebco_2019_grid&width=1200&height=600&version=1.3.0',layers=gebco,zorder=0)
except:
toconsola("\tWARNING: GEBCO server failed !, it is disabled......",wid=self.cons)
elif self.PLOT.RELIEF.get() == 2:
emod_land="emodnet:mean_atlas_land"
#toconsola("\tEG: EMODNET tiles",wid=self.cons)
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emod_land,zorder=0)
except:
toconsola("\tWARNING: EMODNET server failed !, it is disabled......",wid=self.cons)
else:
#EG Sometimes this situation is possible (i.e. manual edition of conf files)
self.PLOT.RELIEF_SHOW.set(False)
if self.PLOT.EMODNET_ISO.get():
emodnet="emodnet:contours"
#toconsola("EG: EMODNET contours",wid=self.cons)
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',layers=emodnet,zorder=0)
except:
toconsola("\t WARNING: EMODNET contours failed !, it is disabled......",wid=self.cons)
# Draw SAIDIN:
if not empty(self.SAIDIN.FILENAME.get()):
if self.SAIDIN.show.get():
self.Mscbar = contourplot.drawing(self.Mfig,self.Max, proj,\
self.SAIDIN.FLD.xx,self.SAIDIN.FLD.yy, \
self.SAIDIN.FLD.data, \
self.SAIDIN.FLD.data.mask, \
self.SAIDIN.PLOT)
# Draw fields:
if self.ncdf > 0:
for ii in range(self.ncdf):
if self.CDF[ii].show.get():
self.Mcdfbar.append(contourplot.drawing(self.Mfig,self.Max, proj,\
self.CDF[ii].FLD.xx, \
self.CDF[ii].FLD.yy, \
self.CDF[ii].FLD.data, \
self.CDF[ii].FLD.data.mask, \
self.CDF[ii].PLOT))
# Draw currents:
if self.nvec > 0:
for ii in range(self.nvec):
if self.VEC[ii].show.get():
vectorplot.drawing(self.Max,proj,self.VEC[ii])
# Draw floats:
if self.nfloat > 0:
for ii in range(self.nfloat):
self.FLOAT[ii].L.set(self.L.get())
lagrangian.drawing(self.Max,proj,self.FLOAT[ii])
# Draw markers:
mrklines = []
mrklabls = []
if self.nmarker > 0:
for ii in range(self.nmarker):
lmrk = geomarker.drawing(self.Max,proj,self.MARKER[ii])
mrklines.append(lmrk)
mrklabls.append(self.MARKER[ii].LABEL.get())
# Draw shapes:
if self.nshape > 0:
for ii in range(self.nshape):
#toconsola("\tSHAPE"+str(ii),wid=self.cons)
#EG Added projection argument, reference map and fig
lmrk = shape.drawing(self.Max,proj,self.SHAPE[ii])
if lmrk is not None:
mrklines.append(lmrk)
mrklabls.append(self.SHAPE[ii].LABEL.get())
# Draw Ellipses:
if self.nellipse > 0:
for ii in range(self.nellipse):
ellipse.drawing(self.Max,proj,self.ELLIPSE[ii])
# Draw patches:
#
if self.npatch > 0:
for ii in range(self.npatch):
patch.drawing(self.Max,proj,self.PATCH[ii])
# Draw Features:
#
if self.FEATURE.n > 0:
for ii in range(self.FEATURE.n):
self.FEATURE.DATA[ii].drawing(self.Max,proj)
#EG Coastlines
#toconsola("EG: COASTLINES",wid=self.cons)
if self.PLOT.COASTLINE_SHOW.get():
if self.PLOT.COASTLINE_SOURCE.get() == 2:
emodnet="coastlines"
try:
self.Max.add_wms(wms='http://ows.emodnet-bathymetry.eu/wms',
layers=emodnet,
color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
except:
toconsola("WARNING: EMODNET coastlines !, it is disabled......",wid=self.cons)
else:
#toconsola("EG COASTLINE: Natural_Earth (50m by default) or EMODNET wms",wid=self.cons)
self.Max.coastlines(self.PLOT.MAP_RESOLUTION.get(),
color=self.PLOT.COASTLINE_COLOR.get(),
linewidth=self.PLOT.COASTLINE_WIDTH.get(),
zorder=self.PLOT.COASTLINE_ZORDER.get())
if self.PLOT.ISOBAT_NPLOT > 0:
#toconsola("EG Custom ISOBATHS",wid=self.cons)
# Plot isobaths and its legend:
lines, labels = [], []
toconsola("\t lABEL_SHOW",self.PLOT.ISOBAT_LABEL_SHOW.get(),wid=self.cons)
for ii in range(self.PLOT.nisobat):
label = None
if self.PLOT.ISOBAT_LABEL_SHOW.get():
label = self.PLOT.ISOBAT_LABEL[ii]
try:
color = eval(self.PLOT.ISOBAT_COLOR[ii].get())
except:
color = self.PLOT.ISOBAT_COLOR[ii].get()
if self.PLOT.ISOBAT_SHOW[ii]:
#toconsola("\t EG ISOBATA:",self.PLOT.ISOBAT_LABEL[ii],wid=self.cons)
z = self.PLOT.ISOBAT_DATA[ii]
isox,isoy = z['lon'],z['lat']
for i in range(len(isox)):
if isox[i] > 1e29:
isox[i], isoy[i] = np.nan, np.nan
isbt, = self.Max.plot(isox,isoy,marker=None,
linestyle=self.PLOT.ISOBAT_STYLE[ii].get(),
linewidth=self.PLOT.ISOBAT_WIDTH[ii].get(),
transform=ccrs.PlateCarree(),
color=color)
lines.append(isbt)
labels.append(label)
if self.PLOT.ISOBAT_LEGEND.SHOW.get():
#toconsola("\t EG self.PLOT.ISOBAT_LEGEND.SHOW",wid=self.cons)
fontsize = self.PLOT.ISOBAT_LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.ISOBAT_LEGEND.FONTSIZE.get() < 1:
fontsize = None
if self.PLOT.ISOBAT_LEGEND.MODE.get() == 1:
mode = 'expand'
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
try: pass
except: pass
# Anchor BBOX:
if self.PLOT.ISOBAT_LEGEND.USE_BB.get():
bb = [self.PLOT.ISOBAT_LEGEND.BBx.get(),
self.PLOT.ISOBAT_LEGEND.BBy.get()]
else:
bb = None
Ilegend = self.Max.legend(lines,labels, \
#title=self.PLOT.ISOBAT_LEGEND.TITLE.get(),
#title_fontsize=24,
loc=self.PLOT.ISOBAT_LEGEND.LOC.get(),
ncol=self.PLOT.ISOBAT_LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.ISOBAT_LEGEND.FRAMEON.get(),
fancybox=self.PLOT.ISOBAT_LEGEND.FANCYBOX.get(),
shadow=self.PLOT.ISOBAT_LEGEND.SHADOW.get(),
framealpha=self.PLOT.ISOBAT_LEGEND.ALPHA.get(),
mode=mode,
facecolor=self.PLOT.ISOBAT_LEGEND.COLOR.get(),
edgecolor=self.PLOT.ISOBAT_LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.ISOBAT_LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.ISOBAT_LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.ISOBAT_LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.ISOBAT_LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.ISOBAT_LEGEND.LABELSPACING.get())
if not empty(self.PLOT.ISOBAT_LEGEND.TITLE.get()):
Ilegend.set_title(self.PLOT.ISOBAT_LEGEND.TITLE.get(),
prop=self.PLOT.ISOBAT_LEGEND.TITLEFONT)
if self.PLOT.WATER_COLOR.get() != 'None':
#toconsola("EG PLOT.WATER_COLOR por defecto 50m",wid=self.cons)
self.Max.add_feature(cfeat.NaturalEarthFeature('physical', 'ocean', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.WATER_COLOR.get()),zorder=self.PLOT.WATER_ZORDER.get())
if self.PLOT.LAND_COLOR.get() != 'None':
#toconsola("EG PLOT.LAND_COLOR por defecto 50m",wid=self.cons)
self.Max.add_feature(cfeat.NaturalEarthFeature('physical', 'land', \
self.PLOT.MAP_RESOLUTION.get(), \
facecolor=self.PLOT.LAND_COLOR.get()),zorder=self.PLOT.LAND_ZORDER.get())
if self.PLOT.COUNTRYLINE_SHOW.get():
#toconsola("EG PLOT.COUNTRYLINE",wid=self.cons)
self.Max.add_feature(cfeat.BORDERS,edgecolor=self.PLOT.COUNTRYLINE_COLOR.get(),
linewidth=self.PLOT.COUNTRYLINE_WIDTH.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1)
if self.PLOT.RIVERS_SHOW.get():
toconsola("EG PLOT.RIVERS",wid=self.cons)
#print("EG PLOT.RIVERS")
self.Max.add_feature(cfeat.NaturalEarthFeature('physical','rivers_and_lakes_centerlines', \
self.PLOT.MAP_RESOLUTION.get(), \
linewidth=self.PLOT.RIVERS_WIDTH.get(),
edgecolor=self.PLOT.RIVERS_COLOR.get(),
zorder=self.PLOT.LAND_ZORDER.get()+1))
if self.PLOT.GRID_SHOW.get():
#toconsola("EG PLOT.GRID"+str(self.PLOT.GRID_LINESTYLE.get()),wid=self.cons)
vmeridians = np.arange(self.PLOT.MERIDIAN_INI.get(), \
self.PLOT.MERIDIAN_FIN.get(), \
self.PLOT.MERIDIAN_INT.get())
vparallels = np.arange(self.PLOT.PARALLEL_INI.get(), \
self.PLOT.PARALLEL_FIN.get(), \
self.PLOT.PARALLEL_INT.get())
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
lstyle = {'size':self.PLOT.GRID_SIZE.get(),'color':self.PLOT.GRID_COLOR.get()}
gl = self.Max.gridlines(crs=ccrs.PlateCarree(),draw_labels=True,
linewidth=self.PLOT.GRID_LINEWIDTH.get(),
color=self.PLOT.GRID_FONTCOLOR.get(),
alpha=self.PLOT.GRID_ALPHA.get(),
linestyle=self.PLOT.GRID_LINESTYLE.get(),
zorder=self.PLOT.GRID_ZORDER.get())
# Lines visibility
gl.xlines, gl.ylines = True, True
if self.PLOT.GRID_LINESTYLE.get() == "None":
gl.xlines, gl.ylines = False, False
# xy labels visibility
if CARTOPY_VERSION < '0.18':
# Works with 0.17
gl.xlabels_top = self.PLOT.GRID_NORTH.get()
gl.xlabels_bottom = self.PLOT.GRID_SOUTH.get()
gl.ylabels_left = self.PLOT.GRID_WEST.get()
gl.ylabels_right = self.PLOT.GRID_EAST.get()
else:
# Works with > 0.18
gl.top_labels = self.PLOT.GRID_NORTH.get()
gl.bottom_labels = self.PLOT.GRID_SOUTH.get()
gl.left_labels = self.PLOT.GRID_WEST.get()
gl.right_labels = self.PLOT.GRID_EAST.get()
gl.xlocator = mticker.FixedLocator(vmeridians)
gl.ylocator = mticker.FixedLocator(vparallels)
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style, gl.ylabel_style = lstyle, lstyle
#gl.xpadding , gl.ypadding = self.PLOT.LABEL_PAD.get(), self.PLOT.LABEL_PAD.get()
else:
# Default: no labels, no grid just Latitude and Longitude
#toconsola("EG XYLabels ..\n\t"+self.PLOT.XLABEL.get()+self.PLOT.YLABEL.get(),wid=self.cons)
#print("EG XYLabels ..\n\t",self.PLOT.XLABEL.get(),self.PLOT.YLABEL.get())
#font_family = self.PLOT.MAP_FONT_TYPE.get()
#font_size = self.PLOT.LABEL_SIZE.get()
font_weight = 'normal'
font = {'family' : font_family, 'weight' : font_weight,
'color' : self.PLOT.TEXT_COLOR.get(),
'size' : font_size}
self.Max.text(-self.PLOT.YLABEL_PAD.get(), 0.55, self.PLOT.YLABEL.get(), va="bottom", \
ha="center", rotation="vertical", rotation_mode="anchor",
transform=self.Max.transAxes,fontdict=font)
self.Max.text(0.5, -self.PLOT.XLABEL_PAD.get(), self.PLOT.XLABEL.get(), va="bottom", \
ha="center", rotation="horizontal", rotation_mode="anchor",
transform=self.Max.transAxes,fontdict=font)
# Title
#toconsola("Title:\n"+self.PLOT.TITLE.get(),wid=self.cons)
self.Max.set_title(self.PLOT.TITLE.get(),fontproperties=self.PLOT.TITLEFONT)
px,py = self.Max.title.get_position()
dy = self.PLOT.TITLE_PAD.get()/self.fig.get_dpi()
self.Max.title.set_position((px,py+dy))
if self.PLOT.GEOMAP.get():
#toconsola("EG PLOT.GEOMAP 2 scale: Not yet implemented",wid=self.cons)
#print("EG PLOT.GEOMAP 2 scale: Not yet implemented")
if self.PLOT.SCALE_SHOW.get():
try:
YOFFSET = float(self.PLOT.SCALE_YOFFSET.get())
except: YOFFSET = None
try:
LINEWIDTH = float(self.PLOT.SCALE_LINEWIDTH.get())
except: LINEWIDTH = None
#EG no parecefuncionarojo scale_bar from tools
#toconsola("EG bar scale",wid=self.cons)
#scale_bar(self.Max, 1)
scale_bar(self.Max,proj=ccrs.PlateCarree(),
location=[self.PLOT.SCALE_XO.get(),self.PLOT.SCALE_YO.get()],
length=self.PLOT.SCALE_LENGTH.get(),
linecolor=self.PLOT.SCALE_LINECOLOR.get(),
fontcolor=self.PLOT.SCALE_FONTCOLOR.get(),
fontsize=self.PLOT.SCALE_FONTSIZE.get(),
zorder=self.PLOT.SCALE_ZORDER.get(),
linewidth=LINEWIDTH)
# Time stamp
try:
self.Mtime_stamp.remove()
except: pass
if len(self.DATE) > 0:
if self.PLOT.TIMESTAMP_SHOW.get():
#toconsola("EG Time stamp: "+str(self.DATE[self.L.get()]),wid=self.cons)
font_weight = 'normal'
if self.PLOT.TIMESTAMP_BOLD.get(): font_weight = 'bold'
self.Max.annotate(self.DATE[self.L.get()], \
xy=(self.PLOT.TIMESTAMP_X.get(), \
self.PLOT.TIMESTAMP_Y.get()), \
xycoords='figure fraction', \
color=self.PLOT.TIMESTAMP_COLOR.get(), \
fontsize=self.PLOT.TIMESTAMP_SIZE.get(), \
fontfamily=font_family, fontweight=font_weight, \
annotation_clip=False)
if self.PLOT.LOGO_DISPLAY.get() == 1: self.plot_logo()
if len(mrklines) > 0 and self.PLOT.LEGEND.SHOW.get():
fontsize = self.PLOT.LEGEND.FONTSIZE.get()
mode = None
if self.PLOT.LEGEND.FONTSIZE.get() < 1: fontsize = None
if self.PLOT.LEGEND.MODE.get() == 1: mode = 'expand'
# Anchor BBOX:
if self.PLOT.LEGEND.USE_BB.get():
bb = [self.PLOT.LEGEND.BBx.get(),
self.PLOT.LEGEND.BBy.get()]
else:
bb = None
#try:
#toconsola("EG ax.legend",wid=self.cons)
legend = self.Max.legend(mrklines,mrklabls,
loc=self.PLOT.LEGEND.LOC.get(),
ncol=self.PLOT.LEGEND.NCOL.get(),
fontsize=fontsize,
frameon=self.PLOT.LEGEND.FRAMEON.get(),
fancybox=self.PLOT.LEGEND.FANCYBOX.get(),
shadow=self.PLOT.LEGEND.SHADOW.get(),
framealpha=self.PLOT.LEGEND.ALPHA.get(),
mode=mode,
bbox_to_anchor=bb,
facecolor=self.PLOT.LEGEND.COLOR.get(),
edgecolor=self.PLOT.LEGEND.EDGECOLOR.get(),
markerscale=self.PLOT.LEGEND.MARKERSCALE.get(),
borderpad=self.PLOT.LEGEND.BORDERPAD.get(),
handletextpad=self.PLOT.LEGEND.HANDLETEXTPAD.get(),
borderaxespad=self.PLOT.LEGEND.BORDERAXESPAD.get(),
labelspacing=self.PLOT.LEGEND.LABELSPACING.get())
#except: pass
try:
self.Max.add_artist(Ilegend)
except:
pass
if not empty(self.PLOT.LEGEND.TITLE.get()):
try:
legend.set_title(self.PLOT.LEGEND.TITLE.get(),
prop=self.PLOT.LEGEND.TITLEFONT)
except:
pass
# self.Max.set_extent([float(self.PLOT.WEST.get()) ,float(self.PLOT.EAST.get()),\
# float(self.PLOT.SOUTH.get()),float(self.PLOT.NORTH.get())],\
# crs=proj)
self.Mcanvas.draw()
return
def trajectory_editor(self):
# ==========================
''' Launch the editor of a trajectory '''
def _close():
# ===========
self.Window_editor.destroy()
self.Window_editor = None
# Check if the window was closed by EDITOR !!
if self.Window_editor is None:
pass
else:
try:
self.Window_editor.lift()
except:
self.Window_editor = None
if self.Window_editor is None:
self.Window_editor = tk.Toplevel(self.master)
self.Window_editor.title('GEOJSON EDITOR')
self.Window_editor.resizable(width=False,height=False)
self.Window_editor.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_editor.lift()
return
if self.nfloat == 0:
jeditor.EDITOR(self.Window_editor,wid=self.cons)
else:
jeditor.EDITOR(self.Window_editor, \
self.FLOAT[self.FLOAT_INDX.get()].FILENAME.get(),\
wid=self.cons)
def contour_mean(self):
# ==========================
''' Calculates the long term mean of a contour field '''
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
ii = self.CDF_INDX.get()
if self.CDF[ii].PARENT is None:
toconsola('Calculating mean of current CONTOUR field')
else:
ii = self.CDF[ii].PARENT
toconsola('Calculating mean of PARENT CONTOUR field, ii=',ii)
K = self.CDF[ii].K.get()
L = self.CDF[ii].L.get()
nt = self.CDF[ii].FLD.icdf.nt
for L in range(0,nt):
data = self.CDF[ii].FLD.read(K=K,L=L,wid=self.cons)
if L==0:
num = data.copy()
else:
num = num + data
CDF = CONTOUR()
CDF.SOURCE = 'MEAN'
CDF.PARENT = ii # The index to PARENT data
CDF.FLD.data = num / nt
CDF.FLD.minval = np.nanmin(data)
CDF.FLD.maxval = np.nanmax(data)
toconsola('Min val = '+str(CDF.FLD.minval),wid=self.cons)
toconsola('Max val = '+str(CDF.FLD.maxval),wid=self.cons)
CDF.K.set(K)
if len(self.CDF[ii].Z_LIST) > 0:
CDF.K_LIST = [K]
CDF.Z_LIST = [self.CDF[ii].Z_LIST[K]]
# Middle of the time segment
t2 = 0.5*(self.CDF[ii].T_LIST[0]+self.CDF[ii].T_LIST[-1])
CDF.L.set(0)
CDF.L_LIST = [0]
CDF.T_LIST = [t2]
try:
CDF.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
CDF.DATE = [0.5*(self.CDF[ii].FLD.icdf.nt-1)]
CDF.ALIAS.set('Average')
CDF.FLD.x = self.CDF[ii].FLD.x
CDF.FLD.y = self.CDF[ii].FLD.y
CDF.FLD.xx = self.CDF[ii].FLD.xx
CDF.FLD.yy = self.CDF[ii].FLD.yy
CDF.FLD.ndims = self.CDF[ii].FLD.ndims
CDF.FLD.with_axes = self.CDF[ii].FLD.with_axes
CDF.FLD.units = self.CDF[ii].FLD.units
CDF.FLD.missing = self.CDF[ii].FLD.missing
CDF.FLD.varname = self.CDF[ii].FLD.varname
CDF.FLD.varid = self.CDF[ii].FLD.varid
CDF.FLD.xmin = self.CDF[ii].FLD.xmin
CDF.FLD.xmax = self.CDF[ii].FLD.xmax
CDF.FLD.ymin = self.CDF[ii].FLD.ymin
CDF.FLD.ymax = self.CDF[ii].FLD.ymax
CDF.FILENAME.set(self.CDF[ii].FILENAME.get())
CDF.varname.set(CDF.FLD.varname)
CDF.FLD.nc = Dataset(self.CDF[ii].FILENAME.get())
CDF.FLD.icdf = tools.geocdf(wid=self.cons)
# We copy the original icdf information
conf = self.CDF[ii].FLD.icdf.conf_get()
CDF.FLD.icdf.conf_set(conf)
CDF.FLD.icdf.VAR_MENU = [CDF.FLD.varname]
# Add the appropriate changes
CDF.FLD.icdf.nt = 1
conf = self.CDF[ii].PLOT.conf_get()
CDF.PLOT.conf_set(conf)
CDF.show.set(True)
self.CDF[ii].show.set(False)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
# Adding a VECTOR in the Drawing class
#
self.LAYERS.add(TYPE='FLD',Filename=self.CDF[ii].FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.CDF[ii].FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def contour_var(self):
# ==========================
''' Calculates the long term variance of a contour field '''
if self.ncdf == 0:
messagebox.showinfo(message='No Netcdf file opened yet')
return
ii = self.CDF_INDX.get()
if self.CDF[ii].PARENT is None:
toconsola('Calculating variance of current CONTOUR field')
else:
ii = self.CDF[ii].PARENT
toconsola('Calculating variance of PARENT CONTOUR field, ii=',ii)
K = self.CDF[ii].K.get()
L = self.CDF[ii].L.get()
nt = self.CDF[ii].FLD.icdf.nt
if nt <= 1:
messagebox.showinfo(message='Variance requires more than one time records')
return
for L in range(0,nt):
data = self.CDF[ii].FLD.read(K=K,L=L,wid=self.cons)
if L==0:
num1 = data.copy()
num2 = np.square(data)
else:
num1 += data
num2 += np.square(data)
#data = num2/nt - np.square(num1/nt)
data = num2/(nt-1) - np.square(num1)/(nt*(nt-1))
CDF = CONTOUR()
CDF.SOURCE = 'VARIANCE'
CDF.PARENT = ii
CDF.FLD.data = data.copy()
CDF.FLD.minval = float(data.min())
CDF.FLD.maxval = float(data.max())
toconsola('Variance Min val = '+str(CDF.FLD.minval),wid=self.cons)
toconsola('Variance Max val = '+str(CDF.FLD.maxval),wid=self.cons)
# Middle of the time segment
t2 = 0.5*(self.CDF[ii].T_LIST[0]+self.CDF[ii].T_LIST[-1])
CDF.K.set(K)
CDF.L.set(0)
CDF.K_LIST = [K]
CDF.L_LIST = [0]
CDF.Z_LIST = [self.CDF[ii].Z_LIST[K]]
CDF.T_LIST = [t2]
try:
CDF.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
CDF.DATE = [0.5*(self.CDF[ii].FLD.icdf.nt-1)]
CDF.ALIAS.set('Variance')
CDF.FLD.x = self.CDF[ii].FLD.x
CDF.FLD.y = self.CDF[ii].FLD.y
CDF.FLD.xx = self.CDF[ii].FLD.xx
CDF.FLD.yy = self.CDF[ii].FLD.yy
CDF.FLD.ndims = self.CDF[ii].FLD.ndims
CDF.FLD.with_axes = self.CDF[ii].FLD.with_axes
CDF.FLD.units = self.CDF[ii].FLD.units
CDF.FLD.missing = self.CDF[ii].FLD.missing
CDF.FLD.varname = self.CDF[ii].FLD.varname
CDF.FLD.varid = self.CDF[ii].FLD.varid
CDF.FLD.xmin = self.CDF[ii].FLD.xmin
CDF.FLD.xmax = self.CDF[ii].FLD.xmax
CDF.FLD.ymin = self.CDF[ii].FLD.ymin
CDF.FLD.ymax = self.CDF[ii].FLD.ymax
CDF.FILENAME.set(self.CDF[ii].FILENAME.get())
CDF.varname.set(CDF.FLD.varname)
CDF.FLD.nc = Dataset(self.CDF[ii].FILENAME.get())
CDF.FLD.icdf = tools.geocdf(wid=self.cons)
# We copy the original icdf information
conf = self.CDF[ii].FLD.icdf.conf_get()
CDF.FLD.icdf.conf_set(conf)
# Add the appropriate changes
CDF.FLD.icdf.VAR_MENU = [CDF.FLD.varname]
CDF.FLD.icdf.nt = 1
conf = self.CDF[ii].PLOT.conf_get()
CDF.PLOT.conf_set(conf)
toconsola('Setting contour intervals ...',wid=self.cons)
try:
CDF.PLOT.CONTOUR_MIN.set(myround(CDF.FLD.minval))
except:
CDF.PLOT.CONTOUR_MIN.set(CDF.FLD.minval)
try:
CDF.PLOT.CONTOUR_MAX.set(myround(CDF.FLD.maxval))
except:
CDF.PLOT.CONTOUR_MAX.set(CDF.FLD.maxval)
dd = CDF.PLOT.CONTOUR_MAX.get() - CDF.PLOT.CONTOUR_MIN.get()
try:
CDF.PLOT.CONTOUR_INTERVAL.set(myround(0.1*dd,0))
except:
CDF.PLOT.CONTOUR_INTERVAL.set(0.1*dd)
CDF.show.set(True)
self.CDF[ii].show.set(False)
self.ncdf += 1
self.CDF.append(CDF)
self.CDF_INDX.set(self.ncdf-1)
self.CDF_LIST = list(range(self.ncdf))
self.LAYERS.add(TYPE='FLD',Filename=self.CDF[ii].FILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.CDF[ii].FILENAME.get())
#self.FILETYPES.append('FLD')
#self.FILEORDER.append(self.ncdf-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def get_map_coords(self):
# ====================
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_xysel.destroy()
self.Window_xysel = None
def _done():
# -----------
_close()
if self.Window_xysel is None:
self.CAPTURE_POINT = True
self.Window_xysel = tk.Toplevel(self.master)
self.Window_xysel.title('Select point')
self.Window_xysel.resizable(width=False,height=False)
self.Window_xysel.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_xysel,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x = ',width=5).grid(row=1,column=0,sticky='e')
ttk.Entry(F0,textvariable=self.pxo,width=15).grid(row=1,column=1,columnspan=3,sticky='ew',pady=5)
ttk.Label(F0,text='y = ',width=5).grid(row=1,column=4,sticky='e')
ttk.Entry(F0,textvariable=self.pyo,width=15).grid(row=1,column=5,columnspan=3,sticky='ew',pady=5)
ttk.Button(F0,text='Cancel',command=_close).grid(row=2,column=4,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=2,column=5,sticky='e',padx=5)
F0.grid()
self.Window_xysel.wait_window()
return [self.pxo.get(), self.pyo.get()]
def vector_series(self):
# ==========================
''' Opens a figure and shows the time series of the velocity.
The user has selected a point. '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
K = self.VEC[ii].K.get()
nt = self.VEC[ii].U.icdf.nt
ndims = self.VEC[ii].U.ndims
if nt == 1:
messagebox.showinfo(message='Single time step. No time series')
return
yy = self.get_map_coords()
xo = yy[0]; yo = yy[1]
dis = (xo-self.VEC[ii].U.xx)**2 + (yo-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
self.VEC[ii].jo.set(jo)
self.VEC[ii].io.set(io)
toconsola('Vector selected point: '+str(io)+', '+str(jo),wid=self.cons)
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
t = []
for i in range(nt):
t.append(datetime.datetime.strptime(str(self.VEC[ii].DATE[i]),'%Y-%m-%d %H:%M:%S'))
Window = tk.Toplevel(self.master)
Window.title('PLOTXY')
Window.resizable(width=False,height=False)
#Window.protocol('WM_DELETE_WINDOW',_close)
plotxy.PLOTXY(Window,t=t,u=u,v=v)
def vector_mean(self):
# ==========================
''' Calculates the long term mean of a vector field '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
if self.VEC[ii].PARENT is None:
pass
else:
ii = self.VEC[ii].PARENT
K = self.VEC[ii].K.get()
L = self.VEC[ii].L.get()
nt = self.VEC[ii].U.icdf.nt
for L in range(0,nt):
print('L = ', L)
udata = self.VEC[ii].U.read(K=K,L=L,wid=self.cons)
vdata = self.VEC[ii].V.read(K=K,L=L,wid=self.cons)
#ny, nx = udata.shape
#udata = udata.reshape((1,ny,nx))
#vdata = vdata.reshape((1,ny,nx))
#if L==0:
# unum = udata.copy()
# vnum = vdata.copy()
#else:
# unum = np.ma.concatenate([unum,udata])
# vnum = np.ma.concatenate([vnum,vdata])
if L==0:
unum = udata.copy()
vnum = vdata.copy()
else:
unum = unum + udata
vnum = vnum + vdata
VEC = VECTOR()
# Make sure that the missing value is NaN:
#udata = unum.mean(axis=0)
#vdata = vnum.mean(axis=0)
VEC.SOURCE = 'MEAN'
VEC.PARENT = ii
udata = unum / nt
vdata = vnum / nt
_u = udata.filled(fill_value=np.nan)
_v = vdata.filled(fill_value=np.nan)
udata = np.ma.masked_equal(_u,np.nan); del _u
vdata = np.ma.masked_equal(_v,np.nan); del _v
VEC.U.data = udata
VEC.V.data = vdata
VEC.K.set(K)
if len(self.VEC[ii].Z_LIST) > 0:
VEC.K_LIST = [K]
VEC.Z_LIST = [self.VEC[ii].Z_LIST[K]]
VEC.L.set(0)
VEC.L_LIST = [0]
# Middle of the time segment
t2 = 0.5*(self.VEC[ii].T_LIST[0]+self.VEC[ii].T_LIST[-1])
VEC.T_LIST = [t2]
try:
VEC.DATE = [num2date(t2, \
units=self.CDF[ii].FLD.icdf.time_units, \
calendar=self.CDF[ii].FLD.icdf.time_calendar)]
except:
VEC.DATE = [0.5*(self.VEC[ii].U.icdf.nt-1)]
VEC.grid_type.set(VEC.grid_type.get())
VEC.ALIAS.set('Average')
VEC.U.x = self.VEC[ii].U.x
VEC.U.y = self.VEC[ii].U.y
VEC.U.xx = self.VEC[ii].U.xx
VEC.U.yy = self.VEC[ii].U.yy
VEC.U.ndims = self.VEC[ii].U.ndims
VEC.U.with_axes = self.VEC[ii].U.with_axes
VEC.U.units = self.VEC[ii].U.units
VEC.U.missing = self.VEC[ii].U.missing
VEC.U.varname = self.VEC[ii].U.varname
VEC.U.varid = self.VEC[ii].U.varid
VEC.U.xmin = self.VEC[ii].U.xmin
VEC.U.xmax = self.VEC[ii].U.xmax
VEC.V.x = self.VEC[ii].V.x
VEC.V.y = self.VEC[ii].V.y
VEC.V.xx = self.VEC[ii].V.xx
VEC.V.yy = self.VEC[ii].V.yy
VEC.V.ndims = self.VEC[ii].V.ndims
VEC.V.with_axes = self.VEC[ii].V.with_axes
VEC.V.units = self.VEC[ii].V.units
VEC.V.missing = self.VEC[ii].V.missing
VEC.V.varname = self.VEC[ii].V.varname
VEC.V.varid = self.VEC[ii].V.varid
VEC.V.xmin = self.VEC[ii].V.xmin
VEC.V.xmax = self.VEC[ii].V.xmax
VEC.UFILENAME.set(self.VEC[ii].UFILENAME.get())
VEC.VFILENAME.set(self.VEC[ii].VFILENAME.get())
VEC.uname.set(VEC.U.varname)
VEC.vname.set(VEC.V.varname)
VEC.U.nc = Dataset(self.VEC[ii].UFILENAME.get())
VEC.V.nc = Dataset(self.VEC[ii].VFILENAME.get())
VEC.U.icdf = tools.geocdf(wid=self.cons)
VEC.V.icdf = tools.geocdf(wid=self.cons)
conf = self.VEC[ii].U.icdf.conf_get()
VEC.U.icdf.conf_set(conf)
VEC.U.icdf.VAR_MENU = [VEC.U.varname]
VEC.U.icdf.nt = 1
conf = self.VEC[ii].V.icdf.conf_get()
VEC.V.icdf.conf_set(conf)
VEC.V.icdf.VAR_MENU = [VEC.V.varname]
VEC.V.icdf.nt = 1
conf = self.VEC[ii].PLOT.conf_get()
VEC.PLOT.conf_set(conf)
VEC.show.set(True)
self.VEC[ii].show.set(False)
self.nvec += 1
self.VEC.append(VEC)
self.VEC_INDX.set(self.nvec-1)
self.VEC_LIST = list(range(self.nvec))
self.LAYERS.add(TYPE='VEC',Filename=self.VEC[ii].UFILENAME.get(),N=1,wid=self.cons)
#self.nfiles += 1
#self.FILENAMES.append(self.VEC[ii].UFILENAME.get())
#self.FILETYPES.append('VEC')
#self.FILEORDER.append(self.nvec-1)
#self.SEQUENCES.append(tk.BooleanVar(value=False))
#self.SEQLEADER.append(tk.BooleanVar(value=False))
#self.SEQNTIMES.append(1)
self.make_plot()
def marker_editor(self):
# ====================
MARKER = geomarker.parameters()
marklabel = tk.StringVar()
# Map projection
#
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_markered.destroy()
self.Window_markered = None
def _done():
# -----------
_close()
MARKER.SOURCE = 'VIEWER'
MARKER.FILENAME.set(None)
self.nmarker += 1
self.MARKER.append(MARKER)
self.MARKER_INDX.set(self.nmarker-1)
self.MARKER_LIST = list(range(self.nmarker))
self.LAYERS.add(TYPE='MARKER',Filename=None,N=len(MARKER.lon),wid=self.cons)
self.LAYERS.print()
ii = self.MARKER_INDX.get()
self.make_plot()
def _clear():
# -----------
global log
log.delete('1.0','end')
marklabel.set('')
def _add():
# ---------
''' Add the new mark '''
#string = '\t {} {} {} \n'.format(self.pxo.get(),self.pyo.get(),marklabel.get())
string = '%9.4f, %9.4f, %s\n' %(self.pxo.get(),self.pyo.get(),marklabel.get())
print('string = ', string)
log.insert('end',string)
MARKER.lon.append(self.pxo.get())
MARKER.lat.append(self.pyo.get())
MARKER.label.append(marklabel.get())
MARKER.n = len(MARKER.lon)
geomarker.drawing(self.ax, proj['proj'], MARKER)
self.canvas.draw()
marklabel.set('')
def _load():
# ---------
global log
''' Load an existent marker filek '''
nn = filedialog.askopenfilename(filetypes=[('CSV','*.csv'),
('TXT','*.txt'),
('ALL','*')],
initialdir='./',
parent=self.Window_marker)
if len(nn) == 0:
return
else:
filename = '%s' % nn
# Not empty filename:
MARKER.Read(filename)
if MARKER.n == 0:
return
for l in range(MARKER.n):
string = '%9.4f, %9.4f, %s\n' %(MARKER.lon[l], \
MARKER.lat[l],
MARKER.label[l])
log.insert('end',string)
def _save():
# ---------
global log
aa = log.get("1.0","end-1c")
''' Save markers onto file '''
filetypes = [('Text file','.txt')]
nn = filedialog.asksaveasfilename(title='Save marker file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
filename = '%s' %nn
toconsola('Saving entries to file ' +filename,wid=self.cons)
f = open(filename,'w')
f.write(aa)
f.close()
if self.Window_markered is None:
self.CAPTURE_POINT = True
self.Window_markered = tk.Toplevel(self.master)
self.Window_markered.title('Marker editor')
self.Window_markered.resizable(width=False,height=False)
self.Window_markered.protocol('WM_DELETE_WINDOW',_close)
F0 = ttk.Frame(self.Window_markered,padding=5,borderwidth=5)
ttk.Label(F0,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='x',width=12).grid(row=1,column=0,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='y').grid(row=1,column=1,columnspan=6,sticky='we',pady=10)
ttk.Label(F0,text='Label').grid(row=1,column=2,columnspan=6,sticky='we',pady=10)
ttk.Entry(F0,textvariable=self.pxo,width=12).grid(row=2,column=0,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=self.pyo,width=12).grid(row=2,column=1,columnspan=1,sticky='ew',pady=5)
ttk.Entry(F0,textvariable=marklabel,width=12).grid(row=2,column=2,columnspan=1,sticky='ew',pady=5)
ttk.Button(F0,text='Add',command=_add).grid(row=2,column=3,sticky='ew',pady=5)
global log
log = tk.Text(F0,height=5)
log.grid(row=3,column=0,columnspan=4,padx=10,pady=10,sticky='nsew')
#log.configure(state='disabled')
# Scrollbar
scrollb = tk.Scrollbar(F0,command=log.yview)
scrollb.grid(row=3,column=4,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
ttk.Button(F0,text='Clear',command=_clear).grid(row=4,column=0,sticky='e',padx=5)
ttk.Button(F0,text='Load',command=_load).grid(row=4,column=1,sticky='e',padx=5)
ttk.Button(F0,text='Save',command=_save).grid(row=4,column=2,sticky='e',padx=5)
ttk.Button(F0,text='Done',command=_done).grid(row=4,column=3,sticky='e',padx=5)
F0.grid()
# ====================
def get_ellipse(self):
# ====================
''' Widget to read Ellipses '''
self.ESOURCE = tk.StringVar()
ELLIPSE = ellipse.ELLIPSE()
self.ESOURCE.set(self.ELLIPSE_OPTIONS[0])
def _cancel():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
def _close():
# ===========
self.Window_gellipse.destroy()
self.Window_gellipse = None
self.make_plot()
if self.Window_cellipse is not None:
self.Window_cellipse.destroy()
self.Window_cellipse = None
def _done():
# ===========
_close()
def _clear():
# ===========
if self.nellipse == 0:
return
ii = self.ELLIPSE_INDX.get()
self.LAYERS.erase('ELLIPSE',ii,wid=self.cons)
self.LAYERS.print()
#for i in range(self.nfiles):
# if self.FILETYPES[i] == 'ELLIPSE' and self.FILEORDER[i] == ii:
# del self.FILENAMES[i]
# del self.FILETYPES[i]
# del self.FILEORDER[i]
# del self.SEQUENCES[i]
# del self.SEQLEADER[i]
# del self.SEQNTIMES[i]
# self.nfiles -= 1
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.ELLIPSE[ii]
self.nellipse -= 1
ii = self.nellipse-1 if ii >= self.nellipse else ii
toconsola('New ellipse = '+str(ii),wid=self.cons)
self.ELLIPSE_INDX.set(ii)
_refill(ii)
def _reget():
# ===========
self.ELLIPSE_INDEX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ============
if ii >= 0:
self.ELLIPSE_LIST = list(range(self.nellipse))
_wsel['values'] = self.ELLIPSE_LIST
_went['textvariable'] = self.ELLIPSE[ii].FILENAME
_wstat['text'] = 'Number ellipses = '+str(self.ELLIPSE[ii].n)
_wsel.configure(state='normal')
_show['variable'] = self.ELLIPSE[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.ELLIPSE[ii].ALIAS
else:
self.ELLIPSE = []
self.ELLIPSE_LIST = ['0']
self.ELLIPSE_INDX.set(0)
#_wsel['values'] = self.ELLIPSE_LIST
_wsel['values'] = None
_went['textvariable'] = None
_wstat['text'] = ''
_wsel.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
self.make_plot()
def _add():
# ===========
ISOURCE = self.ELLIPSE_OPTIONS.index(self.ESOURCE.get())
types=[('TXT','*.txt'),('ALL','*')]
nn = filedialog.askopenfilename(parent=self.Window_gellipse, \
filetypes=types)
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Reading ELLIPSE file '+filename,wid=self.cons)
ELLIPSE.Read(filename)
if ELLIPSE.n == 0:
return
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=ELLIPSE.n,wid=self.cons)
self.LAYERS.print()
ii = self.ELLIPSE_INDX.get()
_refill(ii)
# Main Window ...
# ================
if self.Window_gellipse is None:
self.Window_gellipse = tk.Toplevel(self.master)
self.Window_gellipse.title('Variance ellipses')
self.Window_gellipse.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_gellipse.lift()
if self.nellipse > 0:
ii = self.ELLIPSE_INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_gellipse,padding=5)
#Add
ttk.Combobox(F0,textvariable=self.ESOURCE, \
values=self.ELLIPSE_OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=1,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Ellipse file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.ELLIPSE_INDX, \
values=self.ELLIPSE_LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' Ellipses in the file= '+str(self.ELLIPSE[ii].n),width=50,justify='left')
_went['textvariable'] = self.ELLIPSE[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=2,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_gellipse,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.ELLIPSE[ii].show
_aent['textvariable'] = self.ELLIPSE[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_close).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
# ====================
def calc_ellipse(self):
# ====================
''' Widget to calculate ellipse from velocity field '''
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
ii = self.VEC_INDX.get()
if self.VEC[ii].PARENT is None:
toconsola('Calculating mean of current VECTOR field')
else:
ii = self.VEC[ii].PARENT
toconsola('Calculating mean of PARENT VECTOR field, ii=',ii)
K = self.VEC[ii].K.get()
nt = self.VEC[ii].U.icdf.nt
ndims = self.VEC[ii].U.ndims
ELLIPSE = ellipse.ELLIPSE()
ELLIPSE.SOURCE = 'VIEWER'
ELLIPSE.FILENAME.set(None)
# Map projection
#
proj = map_proj(self.PLOT.MAP_PROJECTION.get())
try:
self.pzo.set(self.VEC[ii].Z_LIST[K])
except:
self.pzo.set(0)
SUM = tk.DoubleVar()
SVM = tk.DoubleVar()
SUM = tk.DoubleVar()
SPM = tk.DoubleVar()
SAA = tk.DoubleVar()
SBB = tk.DoubleVar()
SPP = tk.DoubleVar()
SXO = tk.DoubleVar()
SYO = tk.DoubleVar()
if nt == 1:
messagebox.showinfo(message='Single time step. No variance ellipses')
return
def _close():
# -----------
self.CAPTURE_POINT = False
self.Window_cellipse.destroy()
self.Window_cellipse = None
def _cancel():
# -----------
global log
ELLIPSE = ellipse.ELLIPSE()
log.delete('1.0','end')
self.make_plot()
def _done():
# -----------
filename = self.VEC[ii].UFILENAME.get()
ELLIPSE.SOURCE = 'VIEWER'
ELLIPSE.PARENT = ii
self.nellipse += 1
self.ELLIPSE.append(ELLIPSE)
self.ELLIPSE_INDX.set(self.nellipse-1)
self.ELLIPSE_LIST = list(range(self.nellipse))
self.LAYERS.add(TYPE='ELLIPSE',Filename=filename,N=len(ELLIPSE.xo),wid=self.cons)
self.LAYERS.print()
_close()
self.make_plot()
def _calc():
# ---------
dis = (self.pxo.get()-self.VEC[ii].U.xx)**2 + (self.pyo.get()-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
self.VEC[ii].jo.set(jo)
self.VEC[ii].io.set(io)
toconsola('Vector selected point: '+str(io)+', '+str(jo),wid=self.cons)
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
_u = u.filled(fill_value=np.nan)
_v = v.filled(fill_value=np.nan)
u = np.ma.masked_equal(_u,np.nan); del _u
v = np.ma.masked_equal(_v,np.nan); del _v
mu = np.mean(u)
mv = np.mean(v)
mphi = np.angle(mu+1j*mv)
print('Angle mean current = ', mphi, 180*mphi/np.pi)
u = u - np.mean(u)
v = v - np.mean(v)
suu = np.dot(u,u)
svv = np.dot(v,v)
suv = np.dot(u,v)
Tra = suu + svv
Det = suu*svv - suv*suv
a2 = 0.5*(Tra + np.sqrt(Tra*Tra - 4*Det))
b2 = 0.5*(Tra - np.sqrt(Tra*Tra - 4*Det))
aphi = 0.5*np.arctan2(2*suv,suu-svv)
print('Test: ',2*suv/(suu-svv), np.tan(2*aphi))
print('Eddy kinetic energy: ', 0.5*Tra)
print('Total eddy variance: ', a2 + b2, Tra)
print('Directional eddy variance: ', a2 - b2)
print('Isotropic eddy variance: ', 2*b2)
print('Polarization factor: ', (a2-b2)/(a2+b2))
print('Variance angle: ', aphi, 180*aphi/np.pi)
SXO.set(self.pxo.get())
SYO.set(self.pyo.get())
SUM.set(mu)
SVM.set(mv)
SPM.set(180*mphi/np.pi)
SAA.set(np.sqrt(a2))
SBB.set(np.sqrt(b2))
SPP.set(180*aphi/np.pi)
def _add():
# ---------
global log
ELLIPSE.n += 1
ELLIPSE.xo.append(SXO.get())
ELLIPSE.yo.append(SYO.get())
ELLIPSE.zo.append(self.pzo.get())
ELLIPSE.a.append(SAA.get())
ELLIPSE.b.append(SBB.get())
ELLIPSE.phim.append(SPM.get())
ELLIPSE.phia.append(SPP.get())
_wnn['text'] = 'n = %d' % ELLIPSE.n
i = -1
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], \
ELLIPSE.zo[i], \
ELLIPSE.phim[i], \
ELLIPSE.phia[i], \
ELLIPSE.a[i], \
ELLIPSE.b[i])
log.insert('end',string)
SUM.set(None)
SVM.set(None)
SPM.set(None)
SAA.set(None)
SBB.set(None)
SPP.set(None)
ellipse.drawing(self.ax, proj['proj'], ELLIPSE)
self.canvas.draw()
def _save():
# ---------
''' Save ellipses onto file '''
filetypes = [('Text file','.txt')]
nn = filedialog.asksaveasfilename(title='Save Ellipse file',
initialdir='./',
filetypes=filetypes,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
filename = '%s' %nn
toconsola('Saving entries to file ' +filename,wid=self.cons)
f = open(filename,'w')
for i in range(len(ELLIPSE.xo)):
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], \
ELLIPSE.zo[i], \
ELLIPSE.phim[i], \
ELLIPSE.phia[i], \
ELLIPSE.a[i], \
ELLIPSE.b[i])
f.write(string)
f.close()
def _load():
# ---------
''' Loads ellipses position from file,
and calculateis ellipse parameters using current vector data '''
global log
nn = filedialog.askopenfilename(title='Load ellipse data',
parent=self.Window_cellipse,
initialdir='./')
if len(nn) == 0:
return
filename = '%s' % nn
with open(filename) as datafile:
for line in datafile.readlines():
line = line.strip()
columns = line.split(',')
ELLIPSE.xo.append(float(columns[0]))
ELLIPSE.yo.append(float(columns[1]))
ELLIPSE.zo.append(float(columns[2]))
dis = (ELLIPSE.xo[-1]-self.VEC[ii].U.xx)**2 + (ELLIPSE.yo[-1]-self.VEC[ii].U.yy)**2
ind = np.unravel_index(dis.argmin(), dis.shape)
io = ind[1]
jo = ind[0]
# Read the data
if ndims == 3:
if self.VEC[ii].U.icdf.ppl[self.VEC[ii].U.varid] > -1:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,jo,io].squeeze()
else:
toconsola('Invalid file!',wid=wid)
return
elif ndims == 4:
u = self.VEC[ii].U.nc.variables[self.VEC[ii].U.varname][:,K,jo,io].squeeze()
v = self.VEC[ii].V.nc.variables[self.VEC[ii].V.varname][:,K,jo,io].squeeze()
else:
toconsola("Invalid number of dimensions, "+str(ndims),wid=wid)
mu = np.mean(u)
mv = np.mean(v)
mphi = np.angle(mu+1j*mv)
u = u - np.mean(u)
v = v - np.mean(v)
suu = np.dot(u,u)
svv = np.dot(v,v)
suv = np.dot(u,v)
Tra = suu + svv
Det = suu*svv - suv*suv
a2 = 0.5*(Tra + np.sqrt(Tra*Tra - 4*Det))
b2 = 0.5*(Tra - np.sqrt(Tra*Tra - 4*Det))
aphi = 0.5*np.arctan2(2*suv,suu-svv)
ELLIPSE.phim.append(180*mphi/np.pi)
ELLIPSE.a.append(np.sqrt(a2))
ELLIPSE.b.append(np.sqrt(b2))
ELLIPSE.phia.append(180*aphi/np.pi)
ELLIPSE.n += 1
_wnn['text'] = 'n = %d' % ELLIPSE.n
i = -1
string = '%8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f, %8.4f\n' % (ELLIPSE.xo[i], \
ELLIPSE.yo[i], ELLIPSE.zo[i], ELLIPSE.phim[i], ELLIPSE.phia[i], \
ELLIPSE.a[i], ELLIPSE.b[i])
log.insert('end',string)
SUM.set(None)
SVM.set(None)
SPM.set(None)
SAA.set(None)
SBB.set(None)
SPP.set(None)
ellipse.drawing(self.ax, proj['proj'], ELLIPSE)
self.canvas.draw()
# Main widget
# -----------
if self.Window_cellipse is not None:
self.Window_cellipse.lift()
return
self.CAPTURE_POINT = True
self.Window_cellipse = tk.Toplevel(self.master)
self.Window_cellipse.title('Ellipse calculator')
self.Window_cellipse.resizable(width=False,height=False)
self.Window_cellipse.protocol('WM_DELETE_WINDOW',_close)
Fm = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(Fm,text='Enter or select a point in the map ...').grid(row=0,column=0,columnspan=6,sticky='we')
Fm.grid()
F0 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(F0,text='x').grid(row=1,column=0,sticky='we',padx=6)
_wsx = ttk.Entry(F0,textvariable=self.pxo,width=15,justify='left')
_wsx.grid(row=1,column=1,sticky='ew',padx=6)
ttk.Label(F0,text='y').grid(row=1,column=2,sticky='we',padx=6)
_wsy = ttk.Entry(F0,textvariable=self.pyo,width=15,justify='left')
_wsy.grid(row=1,column=3,sticky='ew',padx=6)
ttk.Label(F0,text='z').grid(row=1,column=4,sticky='we',padx=6)
_wsz = ttk.Entry(F0,textvariable=self.pzo,width=15,justify='left',state='readonly')
_wsz.grid(row=1,column=5,sticky='ew',padx=6)
ttk.Button(F0,text='Get Ellipse',command=_calc).grid(row=1,column=6,sticky='ew',pady=5,padx=6)
F0.grid()
F1 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Label(F1,text='U mean',width=9).grid(row=1,column=0,sticky='e',padx=3)
_wum = ttk.Entry(F1,textvariable=SUM,width=15,justify='left',state='readonly')
_wum.grid(row=1,column=1,sticky='ew',padx=3)
ttk.Label(F1,text='V mean',width=9).grid(row=1,column=2,sticky='e',padx=3)
_wvm = ttk.Entry(F1,textvariable=SVM,width=15,justify='left',state='readonly')
_wvm.grid(row=1,column=3,sticky='ew',padx=3)
ttk.Label(F1,text='Mean angle',width=9).grid(row=1,column=4,sticky='e',padx=3)
_wpm = ttk.Entry(F1,textvariable=SPM,width=15,justify='left',state='readonly')
_wpm.grid(row=1,column=5,sticky='ew',padx=3)
ttk.Label(F1,text='a',width=9,justify='right').grid(row=2,column=0,sticky='e',padx=3)
_waa = ttk.Entry(F1,textvariable=SAA,width=15,justify='left',state='readonly')
_waa.grid(row=2,column=1,sticky='ew',padx=3)
ttk.Label(F1,text='b',width=9).grid(row=2,column=2,sticky='e',padx=3)
_wbb = ttk.Entry(F1,textvariable=SBB,width=15,justify='left',state='readonly')
_wbb.grid(row=2,column=3,sticky='ew',padx=3)
ttk.Label(F1,text='Anom angle',width=9).grid(row=2,column=4,sticky='e',padx=3)
_wpp = ttk.Entry(F1,textvariable=SPP,width=15,justify='left',state='readonly')
_wpp.grid(row=2,column=5,sticky='ew',padx=3)
F1.grid()
F2 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
global log
_wnn = ttk.Label(F2,text='n = 0',width=6)
_wnn.grid(row=0,column=0,sticky='ew',padx=3)
log = tk.Text(F2,height=5)
log.grid(row=3,column=0,columnspan=5,padx=10,pady=10,sticky='nsew')
# Scrollbar
scrollb = tk.Scrollbar(F2,command=log.yview)
scrollb.grid(row=3,column=5,sticky='nsew',padx=2,pady=2)
log['yscrollcommand'] = scrollb.set
F2.grid()
#SUM = tk.DoubleVar()
#SVM = tk.DoubleVar()
#SUM = tk.DoubleVar()
#SPM = tk.DoubleVar()
#SAA = tk.DoubleVar()
#SBB = tk.DoubleVar()
#SPP = tk.DoubleVar()
F3 = ttk.Frame(self.Window_cellipse,padding=5,borderwidth=5)
ttk.Button(F3,text='Clear',command=_cancel).grid(row=4,column=0,sticky='e',padx=5)
ttk.Button(F3,text='Load',command=_load).grid(row=4,column=1,sticky='e',padx=5)
ttk.Button(F3,text='Save',command=_save).grid(row=4,column=2,sticky='e',padx=5)
ttk.Button(F3,text='Add',command=_add).grid(row=4,column=3,sticky='e',padx=5)
ttk.Button(F3,text='Done',command=_done).grid(row=4,column=4,sticky='e',padx=5)
F3.grid()
def ellipse_config(self):
# =======================
if self.nellipse == 0:
messagebox.showinfo(message='No ellipse variance specified yet')
return
ii = self.ELLIPSE_INDX.get()
global eshow
def _cancel():
# ============
self.Window_ellipseconfig.destroy()
self.Window_ellipseconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_ellipseconfig.destroy()
self.Window_ellipseconfig = None
self.make_plot()
def _loadconf():
# =============
'''Load ellipse configuration'''
toconsola('Restoring ellipse configuration from '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.load(self.ELLIPSE[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load ellipse configuration'''
toconsola('Saving ellipse configuration to '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load ellipse configuration from a file'''
nn = filedialog.askopenfilename(title='Load ellipse configuration',
parent=self.Window_ellipseconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.ELLIPSE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring ellipse configuration from '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.load(self.ELLIPSE[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load ellipse configuration'''
nn = filedialog.asksaveasfilename(title='Save ellipse configuration',
parent=self.Window_ellipseconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.ELLIPSE[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving ellipse configuration to '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
try:
self.ELLIPSE[ii].PLOT.save(self.ELLIPSE[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.ELLIPSE[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_ellipseconfig is not None:
self.Window_ellipseconfig.lift()
return
def _selected():
# ===============
global eshow
eshow.destroy()
ii = self.ELLIPSE_INDX.get()
eshow = ttk.Frame(self.Window_ellipseconfig,padding=10)
ellipse.Configuration2(eshow,self.ELLIPSE[ii])
f0 = ttk.Frame(eshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
eshow.grid()
# Main window
# ============
self.Window_ellipseconfig = tk.Toplevel(self.master)
self.Window_ellipseconfig.title('Ellipse plot configuration')
self.Window_ellipseconfig.resizable(width=True,height=True)
self.Window_ellipseconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_ellipseconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_ellipseconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_ellipseconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_ellipseconfig,padding=10)
ttk.Label(fsel,text="File: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.ELLIPSE_INDX,
values=self.ELLIPSE_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
_went ['textvariable'] = self.ELLIPSE[ii].FILENAME
eshow = ttk.Frame(self.Window_ellipseconfig,padding=10)
ellipse.Configuration2(eshow,self.ELLIPSE[ii])
f0 = ttk.Frame(eshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
eshow.grid()
# ====================
def get_patch(self):
# ====================
''' Widget to add patches '''
global _wcx,_wcy,_wcr,_wxw,_wxe,_wys,_wyn
PATCH = patch.PATCH()
PATCH.SOURCE = 'VIEWER'
def _close():
# ===========
self.Window_patch.destroy()
self.Window_patch = None
def _cancel():
# ===========
PATCH = patch.PATCH()
_close()
def _done():
# ===========
if PATCH.TYPE.get() == 'Rectangle':
xo = float(_wxw.get())
x1 = float(_wxe.get())
yo = float(_wys.get())
y1 = float(_wyn.get())
PATCH.xo.set(xo)
PATCH.yo.set(yo)
PATCH.dx.set(x1-xo)
PATCH.dy.set(y1-yo)
PATCH.show.set(True)
if PATCH.TYPE.get() == 'Circle':
xo = float(_wcx.get())
yo = float(_wcy.get())
rr = float(_wcr.get())
PATCH.xo.set(xo)
PATCH.yo.set(yo)
PATCH.dx.set(np.abs(rr))
PATCH.dy.set(np.abs(rr))
PATCH.show.set(True)
self.npatch += 1
self.PATCH.append(PATCH)
self.PATCH_INDX.set(self.npatch-1)
self.PATCH_LIST = list(range(self.npatch))
self.LAYERS.add(TYPE='PATCH',Filename=None,N=1,wid=self.cons)
self.LAYERS.print()
_close()
self.make_plot()
def _sel():
# =========
if PATCH.TYPE.get() == 'Rectangle':
_wxw.configure(state='normal')
_wxe.configure(state='normal')
_wys.configure(state='normal')
_wyn.configure(state='normal')
_wcx.configure(state='disabled')
_wcy.configure(state='disabled')
_wcr.configure(state='disabled')
elif PATCH.TYPE.get() == 'Circle':
_wxw.configure(state='disabled')
_wxe.configure(state='disabled')
_wys.configure(state='disabled')
_wyn.configure(state='disabled')
_wcx.configure(state='normal')
_wcy.configure(state='normal')
_wcr.configure(state='normal')
# Main Window ...
# ================
if self.Window_patch is None:
self.Window_patch = tk.Toplevel(self.master)
self.Window_patch.title('Add/configura Patch')
self.Window_patch.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_patch.lift()
F0 = ttk.Frame(self.Window_patch,padding=5)
# Add
ttk.Radiobutton(F0,text='Rectangle',variable=PATCH.TYPE,value='Rectangle',command=_sel).grid(row=0,column=0,sticky='w',padx=3)
ttk.Radiobutton(F0,text='Circle',variable=PATCH.TYPE,value='Circle',command=_sel).grid(row=1,column=0,sticky='w',padx=3)
_wxw = ttk.Entry(F0,width=8,justify='left')
_wxw.grid(row=0,column=1,padx=3)
_wxe = ttk.Entry(F0,width=8,justify='left')
_wxe.grid(row=0,column=2,padx=3)
_wys = ttk.Entry(F0,width=8,justify='left')
_wys.grid(row=0,column=3,padx=3)
_wyn = ttk.Entry(F0,width=8,justify='left')
_wyn.grid(row=0,column=4,padx=3)
ttk.Label(F0,text='West, East, South, Nord').grid(row=0,column=5,padx=3,sticky='w')
_wcx = ttk.Entry(F0,width=8,justify='left')
_wcx.grid(row=1,column=1,padx=3)
_wcy = ttk.Entry(F0,width=8,justify='left')
_wcy.grid(row=1,column=2,padx=3)
_wcr = ttk.Entry(F0,width=8,justify='left')
_wcr.grid(row=1,column=3,padx=3)
ttk.Label(F0,text='X, Y, Radius').grid(row=1,column=5,padx=3,sticky='w')
_wxw.configure(state='disabled')
_wxe.configure(state='disabled')
_wys.configure(state='disabled')
_wyn.configure(state='disabled')
_wcx.configure(state='disabled')
_wcy.configure(state='disabled')
_wcr.configure(state='disabled')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=0,padx=3,pady=3)
_aent = ttk.Entry(F0,textvariable=PATCH.ALIAS,width=17,justify='left')
_aent.grid(row=2,column=1,columnspan=2,sticky='w')
#
F0.grid(row=0,column=0)
if self.nellipse > 0:
ii = self.ELLIPSE_INDX.get()
else:
ii = -1
F1 = ttk.Frame(self.Window_patch,padding=5)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=3,padx=3)
F1.grid(row=1,column=0)
# =======================
def patch_config(self):
# =======================
if self.npatch == 0:
messagebox.showinfo(message='No patch added yet')
return
ii = self.PATCH_INDX.get()
global pshow
def _cancel():
# ============
self.Window_patchconfig.destroy()
self.Window_patchconfig = None
def _apply():
# ===========
self.make_plot()
def _done():
# ==========
self.Window_patchconfig.destroy()
self.Window_patchconfig = None
self.make_plot()
def _loadconf():
# =============
'''Load patch configuration'''
toconsola('Restoring patch configuration from '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.load(self.PATCH[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _saveconf():
# =============
'''Load patch configuration'''
toconsola('Saving patch configuration to '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.save(FF.PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _loadfromconf():
# ==================
'''Load patch configuration from a file'''
nn = filedialog.askopenfilename(title='Load patch configuration',
parent=self.Window_patchconfig,
initialdir=COSMO_CONF)
if len(nn) == 0:
return
self.PATCH[ii].PLOT.FILECONF = '%s' % nn
toconsola('Restoring patch configuration from '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.load(self.PATCH[ii].PLOT.FILECONF)
self.make_plot()
except:
toconsola('Error: Unable to load file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
def _saveasconf():
# ================
'''Load patch configuration'''
nn = filedialog.asksaveasfilename(title='Save patch configuration',
parent=self.Window_patchconfig,
initialdir=COSMO_CONF,
confirmoverwrite=True)
if nn is None or len(nn) == 0:
return
self.PATCH[ii].PLOT.FILECONF = '%s' % nn
toconsola('Saving patch configuration to '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
try:
self.PATCH[ii].PLOT.save(self.PATCH[ii].PLOT.FILECONF)
except:
toconsola('Error: Unable to write file '+
self.PATCH[ii].PLOT.FILECONF,wid=self.cons)
if self.Window_patchconfig is not None:
self.Window_patchconfig.lift()
return
def _selected():
# ==============
ii = self.FLOAT_INDX.get()
global pshow
pshow.destroy()
pshow = ttk.Frame(self.Window_patchconfig,padding=10)
patch.Configuration(pshow,self.PATCH[ii])
f0 = ttk.Frame(pshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
pshow.grid()
# Main window
# ============
self.Window_patchconfig = tk.Toplevel(self.master)
self.Window_patchconfig.title('Patch configuration')
self.Window_patchconfig.resizable(width=True,height=True)
self.Window_patchconfig.protocol('WM_DELETE_WINDOW',_cancel)
menubar = tk.Menu(self.Window_patchconfig)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Restore from',command=_loadfromconf)
menu.add_command(label='Save',command=_saveconf)
menu.add_command(label='Save as',command=_saveasconf)
try:
self.Window_patchconfig.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(Window_patchconfig, "config", "-menu", menubar)
fsel = ttk.Frame(self.Window_patchconfig,padding=10)
ttk.Label(fsel,text="Patch: ").grid(row=0,column=0,sticky='e',padx=3)
_wsel = ttk.Combobox(fsel,textvariable=self.PATCH_INDX,
values=self.PATCH_LIST,width=5)
_wsel.grid(row=0,column=1,sticky='w',padx=3)
_wsel.bind('<<ComboboxSelected>>',lambda e:_selected())
#_went = ttk.Entry(fsel,justify='left',width=50,state='readonly')
#_went.grid(row=0,column=2,columnspan=5,padx=3,sticky='w')
#_went = ttk.Entry(fsel,justify='left',width=80,state='readonly')
#_went.grid(row=0,column=2,columnspan=8,padx=3,sticky='w')
fsel.grid()
#_went ['textvariable'] = self.PATCH[ii].FILENAME
pshow = ttk.Frame(self.Window_patchconfig,padding=10)
patch.Configuration(pshow,self.PATCH[ii])
f0 = ttk.Frame(pshow,padding=5)
ttk.Button(f0,text='Apply',command=_apply,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(f0,text='Close',command=_done,padding=5). \
grid(row=0,column=2,padx=3)
f0.grid(sticky='ew',columnspan=3)
pshow.grid()
# =======================================================
def skill_Liu(self):
# =======================================================
if self.nvec == 0:
messagebox.showinfo(message='No currents file opened yet')
return
if self.nfloat == 0:
messagebox.showinfo(message='No Lagrangian file opened yet')
return
def _get_release():
# =================
if self.time_ini.get() == 0:
# The release point is defined as the buoy position
# at the earliest model time step. It may be a missmatch
# between the actual release of the buoy and the location
# of the buoy at the model time step.
#
for i in range(len(T)):
if np.isnan(X[i]) or np.isnan(Y[i]):
pass
else:
ko = i
break
txo.set(X[ko])
tyo.set(Y[ko])
try:
tzo.set(self.VEC[ii].Z[0])
except:
tzo.set(0)
tdo.set(self.VEC[ii].DATE[ko])
tto.set(T[ko])
tdt.set(T[ko] - T[0])
else:
res = tools.initial_position(self.VEC[ii],self.FLOAT[jj],wid=self.cons)
if res is None:
messagebox.showinfo(message='No initial position has been found')
return
ko = res[0][self.Fp.get()]
txo.set(res[1][self.Fp.get()][0])
tyo.set(res[2][self.Fp.get()][0])
tzo.set(res[3][self.Fp.get()][0])
tdo.set(res[4][self.Fp.get()])
tto.set(res[5][self.Fp.get()][0])
tdt.set(res[6][self.Fp.get()][0])
ii = self.VEC_INDX.get()
jj = self.FLOAT_INDX.get()
NFLOATS = self.FLOAT[jj].nfloats
FLOAT_LIST = []
for i in range(NFLOATS):
FLOAT_LIST.append(i)
T = self.VEC[ii].TIME
X = self.FLOAT[jj].Fx(T)
Y = self.FLOAT[jj].Fy(T)
txo = tk.DoubleVar()
tyo = tk.DoubleVar()
tzo = tk.DoubleVar()
tdo = tk.DoubleVar()
tto = tk.DoubleVar()
tdt = tk.DoubleVar()
_get_release()
RELEASE_TIME = tto.get()
PERIOD = tk.IntVar()
PERIOD_LIST = []
_wlst = None
global have_run, NT
have_run = False
NT = None
FFx = None
FFy = None
global velocity_model
global velocity_buoy
# ===================
class buoy():
# ===================
def __init__(self):
# =================
# Define the structure
self.lon = []
self.lat = []
self.date = [] # Date as datetime structure
self.time = [] # Date as a number
def _close():
# ===========
self.Window_skill.destroy()
self.Window_skill = None
def _done():
# ==========
global have_run, NT
global separation_mod_obs,displacement_buoy,displacement_model,B,M,model_color
if not have_run:
with open(self.release_file.get(),'w') as f:
ss = "%9.3f, %9.3f, %9.3f, %9.0f\n" % (txo.get(), tyo.get(), tzo.get(), tdt.get())
f.write(ss)
CLM = clm.parameters()
command = CLM.PATH.get() + CLM.BIN.get()
options = ' -U file='+self.VEC[ii].UFILENAME.get()
options += ' x='+self.VEC[ii].U.icdf.xname
options += ' y='+self.VEC[ii].U.icdf.yname
if self.VEC[ii].U.icdf.idz >= 0:
options += ' z='+self.VEC[ii].U.icdf.zname
options += ' t='+self.VEC[ii].U.icdf.tname
options += ' u='+self.VEC[ii].uname.get()
options += ' -V file='+self.VEC[ii].VFILENAME.get()
options += ' x='+self.VEC[ii].V.icdf.xname
options += ' y='+self.VEC[ii].V.icdf.yname
if self.VEC[ii].V.icdf.idz >= 0:
options += ' z='+self.VEC[ii].V.icdf.zname
options += ' t='+self.VEC[ii].V.icdf.tname
options += ' v='+self.VEC[ii].vname.get()
options += ' -release ' + self.release_file.get()
options += ' -idt %d ' % self.clm_idt.get()
options += ' -out '+ self.out_file.get()
command += options
toconsola(command,wid=self.cons)
os.system(command)
if os.path.isfile(self.out_file.get()):
FLT = lagrangian.parameters()
toconsola(FLT.MESSAGE,wid=self.cons)
FLT.Read(self.out_file.get())
if FLT is None:
return
have_run = True
else:
have_run = False
if not have_run:
return
# Add the Lagrangian simulation to the cosmo-view layers !!!
FLT.Fx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FLT.MAPX = FLT.Fx(self.TIME)
FLT.Fy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
FLT.MAPY = FLT.Fy(self.TIME)
FLT.SOURCE = 'clm'
FLT.PLOT.LINE_COLOR.set(self.VEC[ii].PLOT.CURRENT_COLOR.get())
self.nfloat += 1
self.FLOAT.append(FLT)
self.FLOAT_INDX.set(self.nfloat-1)
self.FLOAT_LIST = list(range(self.nfloat))
model_color = self.FLOAT[-1].PLOT.LINE_COLOR.get()
nt = len(FLT.TIME)
self.LAYERS.add(TYPE='FLOAT',Filename=FLT.FILENAME.get(),N=nt,wid=self.cons)
self.make_plot()
# Clean model and buoy:
ind = []
for i in range(len(FLT.lon)):
if np.isnan(FLT.lon[i]) or np.isnan(FLT.lat[i]):
ind.append(i)
if len(ind)>0:
aa = np.delete(FLT.lon,ind)
FLT.lon = aa
aa = np.delete(FLT.lat,ind)
FLT.lat = aa
aa = np.delete(FLT.DATE,ind)
FLT.DATE = aa
aa = np.delete(FLT.TIME,ind)
FLT.TIME = aa
FLT.nrecords = len(FLT.DATE)
print('LON From the model: ', FLT.lon[0:5])
print('DATE From the model: ', FLT.DATE[0:5])
# Once cropped, check if the model has been able to explicitly save the
# RELEASE point:
#
if FLT.TIME[0] > RELEASE_TIME:
FLT.lon = np.insert(FLT.lon,0,xo[0])
FLT.lat = np.insert(FLT.lat,0,yo[0])
FLT.DATE = np.insert(FLT.DATE,0,do)
FLT.TIME = np.insert(FLT.TIME,0,to[0])
# New interpolation function:
#
FFx = interpolate.interp1d(FLT.TIME,FLT.lon, bounds_error=False, fill_value=np.NaN)
FFy = interpolate.interp1d(FLT.TIME,FLT.lat, bounds_error=False, fill_value=np.NaN)
# Subsampled time axis:
#
duration_hours = (FLT.TIME[-1] - FLT.TIME[0])/3600
nt = int(duration_hours/self.time_sampling.get())
if FLT.TIME[0] > RELEASE_TIME:
print('Inserting release position at cropped model solution ...')
dd = [do]
tt = [do.timestamp()]
else:
dd = []
tt = []
rr = FLT.DATE[0] + datetime.timedelta(hours=-self.time_sampling.get())
for i in range(nt):
rr += datetime.timedelta(hours=self.time_sampling.get())
dd.append(rr) # Date, every time_sampling hours
tt.append(rr.timestamp()) # Ordinal time, every time_sampling hours
dd = np.array(dd)
tt = np.array(tt)
nt = len(dd)
# Interpolate the geojson onto the constructed time axis
#
B = buoy()
B.lon = self.FLOAT[jj].Fx(tt)
B.lat = self.FLOAT[jj].Fy(tt)
B.date = dd[:]
B.time = tt[:]
# Interpolate the model
#
M = buoy()
M.lon = FFx(tt)
M.lat = FFy(tt)
M.date = dd[:]
M.time = tt[:]
d = len(M.lon)
a = np.arange(12,d,12)
if np.remainder(d-1,12) == 0:
PERIOD_LIST = list(a)
else:
PERIOD_LIST = list(np.append(a,d))
_wlst['values'] = PERIOD_LIST
_wlst.configure(state='normal')
_wlst.set(PERIOD_LIST[-1])
global velocity_model
global velocity_buoy
# Displacement of the model (not used in any calculation):
dl = []
uu = []
for i in range(1,len(M.lon)):
dl.append(tools.haversine((M.lon[i-1],M.lat[i-1]),(M.lon[i],M.lat[i])))
dt = M.time[i] - M.time[i-1] #Difference in seconds
if dt > 1:
uu.append(dl[i-1]/dt)
else:
uu.append(0)
displacement_model = np.array(dl)
velocity_model = np.array(uu)
#print('Model displacement: ', displacement_model)
print('Model velocity:', velocity_model)
# Displacement of the buoy:
dl = []
uu = []
for i in range(1,len(B.lon)):
dl.append(tools.haversine((B.lon[i-1],B.lat[i-1]),(B.lon[i],B.lat[i])))
dt = B.time[i] - B.time[i-1] #Difference in seconds
if dt > 1:
uu.append(dl[i-1]/dt)
else:
uu.append(0)
displacement_buoy = np.array(dl)
velocity_buoy = np.array(uu)
#print('Buoy displacement: ', displacement_buoy)
print('Buoy velocity:', velocity_buoy)
# Separation buoy - model:
dl = []
for i in range(len(B.lon)):
dl.append(tools.haversine((B.lon[i],B.lat[i]),(M.lon[i],M.lat[i])))
separation_mod_obs = np.array(dl)
#print('Separation : ', separation_mod_obs)
NT = len(B.lon)
final_separation = separation_mod_obs[NT-1]
print('final separation ', final_separation)
index_s = []
num = np.sum(separation_mod_obs[1:NT]) # d_1 + d_2 + d_3 + ...
l_n = []
for i in range(NT-1):
l_n.append(np.sum(displacement_buoy[0:i+1]))
den = np.sum(np.array(l_n)) # Denominator: l_1 + l_2 + l_3 + ...
index_s = num/den
# Histogram of velocitities:
#
#tools.dhist(velocity_buoy[:NT-1],velocity_model[:NT-1])
# LIU index:
#
if index_s < self.index_n.get():
ss = 1 - index_s/self.index_n.get()
else:
ss = 0
print('Index s : ', index_s)
print('Skill score, ss = ', ss)
# COSMO index:
# We define it as a product of a geometric index and an arithmetic index to
# account both for the direction and the normalized closeness of the
# predicted and observed positions:
# Buoy bearing:
buoy_bearing = tools.initial_bearing((B.lon[0],B.lat[0]),(B.lon[NT-1],B.lat[NT-1]))
model_bearing = tools.initial_bearing((M.lon[0],M.lat[0]),(M.lon[NT-1],M.lat[NT-1]))
print('Buoy bearing: ', buoy_bearing)
print('Model bearing: ', model_bearing)
theta = tools.angle_diff(buoy_bearing,model_bearing)
direction_factor = np.cos(np.pi*theta/180)
if direction_factor < 0:
direction_factor = 0
distance_factor = 1/(1+final_separation/l_n[NT-2])
print('Bearing angle difference:' , theta)
print('Direction factor (max(0,cos(theta)):' , direction_factor)
print('buoy model prediction distance: ', final_separation)
print('Buoy travelled distance): ', l_n[-1])
print('Distance factor : ', distance_factor)
cosmo_index = direction_factor * distance_factor
print('COSMO index : ', cosmo_index)
fig = plt.figure(2)
ax = plt.axes([0.15,0.10,0.80,0.66])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.grid(True)
ax.plot(B.lon[0],B.lat[0],'or',ms=8)
ax.plot(B.lon[0:NT],B.lat[0:NT],'+r',ms=3)
ax.plot(B.lon[0:NT],B.lat[0:NT],'-',color=self.FLOAT[jj].PLOT.LINE_COLOR.get(),linewidth=2,label='Buoy')
ax.plot(M.lon[0:NT],M.lat[0:NT],'+b',ms=3)
ax.plot(M.lon[0:NT],M.lat[0:NT],'-',color=model_color,linewidth=2,label='Model')
#for k in range(len(B.lon)):
for k in range(NT):
ax.plot([B.lon[k],M.lon[k]], \
[B.lat[k],M.lat[k]], \
'--k',linewidth=0.5)
ax.legend()
string = 'Initial point and date: (%.3f,%.3f) %s ' % (B.lon[0], B.lat[0], B.date[0])
ax.text(0.2,0.98,string,ha='left',va='top',transform=fig.transFigure)
string = 'Simulation length %.1f hours, i.e. %.2f days ' % ((M.time[NT-1]-M.time[0])/3600 ,(M.time[NT-1]-M.time[0])/86400 )
ax.text(0.2,0.95,string,ha='left',va='top',transform=fig.transFigure)
string = 'Displacement: Buoy = %d km; Model = %d km' % (np.sum(displacement_buoy[0:NT-1])/1000, \
np.sum(displacement_model[0:NT-1])/1000)
ax.text(0.2,0.92,string,ha='left',va='top',transform=fig.transFigure)
string = 'Final distance between model and buoy = %d km' % (final_separation/1000)
ax.text(0.2,0.89,string,ha='left',va='top',transform=fig.transFigure)
string = 'LW2011 Index s = %.2f, n = %.2f. Skill score = %.2f' % (index_s, self.index_n.get(), ss)
ax.text(0.2,0.86,string,ha='left',va='top',transform=fig.transFigure)
string = 'Bearing: Buoy = %d; Model = %d. Difference = %d ' % (buoy_bearing, model_bearing, theta)
ax.text(0.2,0.83,string,ha='left',va='top',transform=fig.transFigure)
string = 'Factor: Direction = %.2f; Distance = %.2f. Combined = %.2f ' % (direction_factor, distance_factor, cosmo_index)
ax.text(0.2,0.80,string,ha='left',va='top',transform=fig.transFigure)
plt.show()
def _floatselect():
# =================
print(self.Fp.get())
def _releasechange():
# =================
if 'model' in self.skill_release.get():
self.time_ini.set(0)
else:
self.time_ini.set(1)
_get_release()
def _lselection():
# =================
global NT
NT = PERIOD.get() + 1
print('NT = ', NT)
# Main window:
if self.Window_skill is None:
self.Window_skill = tk.Toplevel(self.master)
self.Window_skill.title('Trajectory model evaluation')
self.Window_skill.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_skill.lift()
FF = ttk.Frame(self.Window_skill,padding=5)
tk.Label(FF,text='Float:').grid(row=0,column=0,padx=3,sticky='e')
_wsf = ttk.Combobox(FF,textvariable=self.Fp,values=FLOAT_LIST,width=3)
_wsf.grid(row=0,column=1,sticky='w')
_wsf.bind('<<ComboboxSelected>>',lambda e: _floatselect())
if NFLOATS == 1:
_wsf.configure(state='disabled')
FF.grid()
F0 = ttk.Frame(self.Window_skill,padding=5)
#ttk.Label(F0,text='Model initial date : '+str(self.VEC[ii].DATE[0])).grid(row=0,column=0,columnspan=3,padx=3,stick='w')
#ttk.Label(F0,text='Trajectory initial date: '+str(self.FLOAT[jj].date[0])).grid(row=1,column=0,columnspan=3,padx=3,stick='w')
tk.Label(F0,text='Model initial date:').grid(row=0,column=0,padx=3,stick='w')
e = tk.Entry(F0)
e.grid(row=0,column=1,padx=3,stick='w')
e.insert(0,str(self.VEC[ii].DATE[0]))
e.configure(state='readonly')
tk.Label(F0,text='Buoy initial date:').grid(row=1,column=0,padx=3,stick='w')
e = tk.Entry(F0)
e.grid(row=1,column=1,padx=3,stick='w')
e.insert(0,str(self.FLOAT[jj].DATE[0]))
e.configure(state='readonly')
ttk.Label(F0,text='Release at: ').grid(row=2,column=0,padx=3,stick='w')
_wrl = ttk.Combobox(F0,textvariable=self.skill_release,values=self.skill_release_VALUES)
_wrl.grid(row=2,column=1,padx=3,sticky='w')
_wrl.bind('<<ComboboxSelected>>',lambda e: _releasechange())
tk.Label(F0,text='Release date:').grid(row=3,column=0,padx=3,stick='e')
tk.Entry(F0,textvariable=tdo,justify='left',width=12,state='readonly').grid(row=3,column=1,padx=3,sticky='ew')
ttk.Label(F0,text='Initial point (xo, yo, zo, to): ').grid(row=4,column=0,columnspan=3,padx=3,stick='w')
F0.grid()
F2 = ttk.Frame(self.Window_skill)
#txo.set(xo[0])
#tyo.set(yo[0])
#tzo.set(zo[0])
#tdo.set(do)
#tto.set(Dt[0])
tk.Entry(F2,textvariable=txo,justify='left',width=12,state='readonly').grid(row=0,column=0,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tyo,justify='left',width=12,state='readonly').grid(row=0,column=1,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tzo,justify='left',width=12,state='readonly').grid(row=0,column=2,padx=3,sticky='ew')
tk.Entry(F2,textvariable=tdt,justify='left',width=12,state='readonly').grid(row=0,column=3,padx=3,sticky='ew')
F2.grid()
F3 = ttk.Frame(self.Window_skill)
tk.Label(F3,text='Release filename:').grid(row=0,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.release_file,justify='left',width=40).grid(row=0,column=1,padx=3,sticky='w')
tk.Label(F3,text='Trajectory filename:').grid(row=1,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.out_file,justify='left',width=40).grid(row=1,column=1,padx=3,sticky='w')
tk.Label(F3,text='clm option -idt:').grid(row=2,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.clm_idt,justify='left',width=40).grid(row=2,column=1,padx=3,sticky='w')
tk.Label(F3,text='Normalization factor, n:').grid(row=3,column=0,padx=3,sticky='w')
tk.Entry(F3,textvariable=self.index_n,justify='left',width=40).grid(row=3,column=1,padx=3,sticky='w')
tk.Label(F3,text='Target prediction:').grid(row=4,column=0,padx=3,sticky='w')
_wlst = ttk.Combobox(F3,textvariable=PERIOD,width=5)
_wlst.grid(row=4,column=1,padx=3,sticky='w')
_wlst.configure(state='disabled')
_wlst.bind('<<ComboboxSelected>>',lambda e: _lselection())
#aa = self.ax.plot(xo[0],yo[0],'o',
# ms=9.0,linestyle='dotted',
# color='red',
# mfc='none',
# zorder=100,
# transform=ccrs.PlateCarree())
#
# self.canvas.draw()
F3.grid()
F1 = ttk.Frame(self.Window_skill,padding=5)
ttk.Button(F1,text='Cancel',command=_close).grid(row=1,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done).grid(row=1,column=3,padx=3)
F1.grid()
# =======================================================
def converter(self):
# =======================================================
NS = tk.StringVar()
EW = tk.StringVar()
D1x = tk.DoubleVar()
D1y = tk.DoubleVar()
D2xd = tk.IntVar()
D2yd = tk.IntVar()
D2xm = tk.DoubleVar()
D2ym = tk.DoubleVar()
D3xd = tk.IntVar()
D3yd = tk.IntVar()
D3xm = tk.IntVar()
D3ym = tk.IntVar()
D3xs = tk.DoubleVar()
D3ys = tk.DoubleVar()
EWL = ['E','W']
NSL = ['N','S']
EW.set(EWL[0])
NS.set(NSL[0])
def _close():
# ===========
self.Window_converter.destroy()
self.Window_converter = None
if self.Window_converter is None:
self.Window_converter = tk.Toplevel(self.master)
self.Window_converter.title('Coordinate Converter')
self.Window_converter.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_converter.lift()
def _DD():
# =========
print('Converting from Decimal Degrees')
dd = D1x.get()
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
D2xd.set(int(degrees))
D2xm.set(minutes+seconds/60)
D3xd.set(int(degrees))
D3xm.set(int(minutes))
D3xs.set(seconds)
dd = D1y.get()
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
D2yd.set(int(degrees))
D2ym.set(minutes+seconds/60)
D3yd.set(int(degrees))
D3ym.set(int(minutes))
D3ys.set(seconds)
def _DDM():
# =========
print('Converting from Degrees Decimal Minutes')
D2xm.set(abs(D2xm.get()))
D2ym.set(abs(D2ym.get()))
if D2xd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D2xd.get())
d3 = dd*3600 + D2xm.get()*60
D1x.set(factor*d3/3600)
minutes,seconds = divmod(d3,60)
degrees,minutes = divmod(minutes,60)
D3xd.set(int(factor*degrees))
D3xm.set(int(minutes))
D3xs.set(seconds)
D2xd.set(int(factor*degrees))
D2xm.set(minutes+seconds/60)
if D2yd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D2yd.get())
d3 = dd*3600 + D2ym.get()*60
minutes,seconds = divmod(d3,60)
degrees,minutes = divmod(minutes,60)
D1y.set(factor*d3/3600)
D2yd.set(int(factor*degrees))
D2ym.set(minutes+seconds/60)
D3yd.set(int(factor*degrees))
D3ym.set(int(minutes))
D3ys.set(seconds)
def _DMS():
# =========
print('Converting from Degrees Minutes Seconds')
D3xm.set(abs(D3xm.get()))
D3xs.set(abs(D3xs.get()))
D3ym.set(abs(D3ym.get()))
D3ys.set(abs(D3ys.get()))
if D3xd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D3xd.get())
d3 = dd*3600 + D3xm.get()*60 + D3xs.get()
D1x.set(factor*d3/3600)
d2 = int(np.floor(d3/3600))
m2 = (d3 - d2*3600)/60
D2xd.set(int(factor*d2))
D2xm.set(m2)
if D3yd.get() > 0:
factor = 1
else:
factor = -1
dd = abs(D3yd.get())
d3 = dd*3600 + D3ym.get()*60 + D3ys.get()
D1y.set(factor*d3/3600)
d2 = int(np.floor(d3/3600))
m2 = (d3 - d2*3600)/60
D2yd.set(int(factor*d2))
D2ym.set(m2)
# Styles
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
# DD
F0 =ttk.LabelFrame(self.Window_converter,text='Decimal Degrees (DD)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F0,text='Longitude').grid(row=0,column=0,sticky='w')
_d1x = ttk.Entry(F0,textvariable=D1x,width=20)
_d1x.grid(row=0,column=1,sticky='ew')
ttk.Label(F0,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d1xh = ttk.Combobox(F0,textvariable=EW,values=EWL,width=3)
EW.set('E')
_d1xh.grid(row=0,column=3,sticky='ew')
ttk.Label(F0,text='Latitude').grid(row=1,column=0,sticky='w')
_d1y = ttk.Entry(F0,textvariable=D1y,width=20)
_d1y.grid(row=1,column=1,sticky='ew')
ttk.Label(F0,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d1yh = ttk.Combobox(F0,textvariable=NS,values=NSL,width=3)
_d1yh.grid(row=1,column=3,sticky='ew')
ttk.Button(F0,text='Ok',width=4,command=_DD).grid(row=1,column=4,sticky='ew',padx=3)
F0.grid(row=0,column=0,padx=5,pady=10,sticky='ewsn')
# DDM
F1 =ttk.LabelFrame(self.Window_converter,text='Degrees Decimal Minutes (DDM)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F1,text='Longitude').grid(row=0,column=0,sticky='w')
_d2xd = ttk.Entry(F1,textvariable=D2xd,width=4)
_d2xd.grid(row=0,column=1,sticky='ew')
ttk.Label(F1,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d2xd = ttk.Entry(F1,textvariable=D2xm,width=14)
_d2xd.grid(row=0,column=3,sticky='ew')
ttk.Label(F1,text="'",width=1).grid(row=0,column=4,sticky='w')
_d2xh = ttk.Combobox(F1,textvariable=EW,values=EWL,width=3)
_d2xh.grid(row=0,column=5,sticky='w')
ttk.Label(F1,text='Latitude').grid(row=1,column=0,sticky='w')
_d2yd = ttk.Entry(F1,textvariable=D2yd,width=4)
_d2yd.grid(row=1,column=1,sticky='ew')
ttk.Label(F1,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d2ym = ttk.Entry(F1,textvariable=D2ym,width=14)
_d2ym.grid(row=1,column=3,sticky='ew')
ttk.Label(F1,text="'",width=1).grid(row=1,column=4,sticky='w')
_d2yh = ttk.Combobox(F1,textvariable=NS,values=NSL,width=3)
_d2yh.grid(row=1,column=5,sticky='ew')
ttk.Button(F1,text='Ok',width=4,command=_DDM).grid(row=1,column=6,sticky='ew',padx=3)
F1.grid(row=1,column=0,padx=5,pady=10,sticky='ewsn')
# DMS
F2 =ttk.LabelFrame(self.Window_converter,text='Degrees Minutes Seconds (DMS)',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F2,text='Longitude').grid(row=0,column=0,sticky='w')
_d3xd = ttk.Entry(F2,textvariable=D3xd,width=4)
_d3xd.grid(row=0,column=1,sticky='ew')
ttk.Label(F2,text='\u00b0',width=1).grid(row=0,column=2,sticky='w')
_d3xm = ttk.Entry(F2,textvariable=D3xm,width=3)
_d3xm.grid(row=0,column=3,sticky='ew')
ttk.Label(F2,text="'",width=1).grid(row=0,column=4,sticky='w')
_d3xm = ttk.Entry(F2,textvariable=D3xs,width=9)
_d3xm.grid(row=0,column=5,sticky='ew')
ttk.Label(F2,text='"',width=1).grid(row=0,column=6,sticky='w')
_d3xh = ttk.Combobox(F2,textvariable=EW,values=EWL,width=3)
_d3xh.grid(row=0,column=7,sticky='w')
ttk.Label(F2,text='Latitude').grid(row=1,column=0,sticky='w')
_d3yd = ttk.Entry(F2,textvariable=D3yd,width=4)
_d3yd.grid(row=1,column=1,sticky='ew')
ttk.Label(F2,text='\u00b0',width=1).grid(row=1,column=2,sticky='w')
_d3ym = ttk.Entry(F2,textvariable=D3ym,width=3)
_d3ym.grid(row=1,column=3,sticky='ew')
ttk.Label(F2,text="'",width=1).grid(row=1,column=4,sticky='w')
_d3ym = ttk.Entry(F2,textvariable=D3ys,width=9)
_d3ym.grid(row=1,column=5,sticky='ew')
ttk.Label(F2,text='"',width=1).grid(row=1,column=6,sticky='w')
_d3yh = ttk.Combobox(F2,textvariable=NS,values=NSL,width=3)
_d3yh.grid(row=1,column=7,sticky='ew')
ttk.Button(F2,text='Ok',width=4,command=_DMS).grid(row=1,column=8,sticky='ew',padx=3)
F2.grid(row=2,column=0,padx=5,pady=10,sticky='ewsn')
# =======================
def set_time(self):
# =======================
global initial_DATE
global final_DATE
global time_updated
global time_layer
TSELECTION = tk.StringVar()
initial_date = tk.StringVar()
final_date = tk.StringVar()
time_interval= tk.DoubleVar()
time_updated = False
time_layer = -1
try:
backup_TIME = self.TIME.copy()
backup_DATE = self.DATE.copy()
backup_NL = self.NL
initial_date.set(self.DATE[0])
final_date.set(self.DATE[self.NL-1])
time_interval.set(self.TIME[2] - self.TIME[1])
initial_DATE = self.DATE[0]
final_DATE = self.DATE[self.NL-1]
except:
backup_NL = 0
now = datetime.datetime.now().date()
now = datetime.datetime.combine(now,datetime.datetime.min.time())
initial_DATE = now
final_DATE = now + datetime.timedelta(1)
initial_date.set(initial_DATE)
final_date.set(final_DATE)
time_interval.set(0)
def _cancel():
# ============
print("In _cancel: ",initial_date.get())
if backup_NL > 0:
self.TIME = backup_TIME.copy()
self.DATE = backup_DATE.copy()
self.NL = backup_NL
self.Window_settime.destroy()
self.Window_settime = None
def _done():
# ==========
global time_updated
global time_layer
# Get the initial and final date stamps:
#
initial_DATE = datetime.datetime.strptime(initial_date.get(),'%Y-%m-%d %H:%M:%S')
final_DATE = datetime.datetime.strptime(final_date.get(),'%Y-%m-%d %H:%M:%S')
initial_TIME = initial_DATE.timestamp()
final_TIME = final_DATE.timestamp()
if final_TIME < initial_TIME:
time_updated = False
_cancel()
return
if time_interval.get() == 0:
time_updated = False
_cancel()
return
if time_updated:
print("Updating drawing TIME and DATE ...")
# Unlink all the layers except the one selected
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
if i == time_layer:
linked = True
else:
linked = False
print(i, TYPE, ii, linked)
if TYPE == 'VEC':
self.VEC[ii].LINK.set(linked)
elif TYPE == 'FLD':
self.CDF[ii].LINK.set(linked)
elif TYPE == 'FLT':
self.FLOAT[ii].LINK.set(linked)
self.NL = int((final_TIME - initial_TIME) / time_interval.get() + 1)
print(initial_TIME,final_TIME,time_interval.get(), self.NL)
self.TIME = []
self.DATE = []
for i in range(self.NL):
self.TIME.append(initial_TIME + i*time_interval.get())
self.DATE.append(datetime.datetime.fromtimestamp(self.TIME[i]))
print(self.DATE[i])
# Interpolate Lagrangian trajectories
#
for ii in range(self.nfloat):
if self.FLOAT[ii].nfloats > 1:
MAPX = []
MAPY = []
for i in range(self.FLOAT[ii].nfloats):
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon[:,i],
bounds_error=False, fill_value=np.NaN)
MAPX.append(f(self.TIME))
f = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat[:,i],
bounds_error=False, fill_value=np.NaN)
MAPY.append(list(f(self.TIME)))
self.FLOAT[ii].MAPX = np.array(MAPX).T
self.FLOAT[ii].MAPY = np.array(MAPY).T
else:
self.FLOAT[ii].Fx = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lon,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPX = self.FLOAT[ii].Fx(self.TIME)
self.FLOAT[ii].Fy = interpolate.interp1d(self.FLOAT[ii].TIME,self.FLOAT[ii].lat,
bounds_error=False, fill_value=np.NaN)
self.FLOAT[ii].MAPY = self.FLOAT[ii].Fy(self.TIME)
# Update time widgets
self.L.set(0)
self.L_LIST = list(range(self.NL))
self.lbox['values'] = self.L_LIST
self.PLOT.TLABEL.set(self.DATE[self.L.get()])
self.PLOT.VIDEO_L2.set(len(self.DATE)-1)
self.PLOT.SFRAME_L2.set(len(self.DATE)-1)
self.Window_settime.destroy()
self.Window_settime = None
self.make_plot()
def _autotime():
# ==============
global time_updated
global time_layer
print('In autotime')
layer_selected = TSELECTION.get()
print(layer_selected)
if empty(layer_selected):
return
for i in range(self.LAYERS.n):
layer_name = os.path.basename(self.LAYERS.FILENAME[i])
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
print(layer_name,TYPE,ii)
if TYPE == 'VEC':
layer_ref = self.VEC[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
print('Found it !!!!!')
time_updated = True
time_layer = i
self.TIME = self.VEC[ii].TIME.copy()
self.DATE = self.VEC[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
elif TYPE == 'FLD':
layer_ref = self.CDF[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
print('Found it !!!!!')
time_updated = True
time_layer = i
self.TIME = self.CDF[ii].TIME.copy()
self.DATE = self.CDF[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
elif TYPE == 'FLT':
layer_ref = self.FLOAT[ii].ALIAS.get()
if layer_ref == layer_selected or layer_name == layer_selected:
time_updated = True
time_layer = i
self.TIME = self.FLOAT[ii].TIME.copy()
self.DATE = self.FLOAT[ii].DATE.copy()
self.NL = self.LAYERS.NREC[i]
if time_updated:
initial_date.set(self.DATE[0])
final_date.set(self.DATE[self.NL-1])
time_interval.set(self.TIME[2] - self.TIME[1])
def _initime():
# ==============
global time_updated
global initial_DATE
initial_DATE = datetime.datetime.strptime(initial_date.get(),'%Y-%m-%d %H:%M:%S')
time_updated = True
def _initime2():
# ==============
global time_updated
global initial_DATE
global cal
global top
top.destroy()
top = None
aa = cal.selection_get()
initial_DATE = initial_DATE.replace(year=aa.year,month=aa.month,day=aa.day)
initial_date.set(initial_DATE)
time_updated = True
def _inical():
# =============
global time_updated
global final_DATE
global cal
global top
top = tk.Toplevel(self.master)
cal = Calendar(top, font="Arial 14", selectmode='day', locale='en_US',
disabledforeground='red',cursor="hand1",
year=initial_DATE.year,month=initial_DATE.month,day=initial_DATE.day)
cal.grid()
ttk.Button(top, text="ok", command=_initime2).grid()
time_updated = True
def _fintime():
# ==============
global time_updated
global final_DATE
final_DATE = datetime.datetime.strptime(final_date.get(),'%Y-%m-%d %H:%M:%S')
time_updated = True
def _fintime2():
# ==============
global time_updated
global final_DATE
global cal
global top
top.destroy()
top = None
aa = cal.selection_get()
final_DATE = final_DATE.replace(year=aa.year,month=aa.month,day=aa.day)
final_date.set(final_DATE)
time_updated = True
def _fincal():
# =============
global time_updated
global final_DATE
global cal
global top
top = tk.Toplevel(self.master)
cal = Calendar(top, font="Arial 14", selectmode='day', locale='en_US',
disabledforeground='red',cursor="hand1",
year=final_DATE.year,month=final_DATE.month,day=final_DATE.day)
cal.grid()
ttk.Button(top, text="ok", command=_fintime2).grid()
time_updated = True
def _dt():
# =============
global time_updated
#time_updated = True
if time_interval.get() == 0:
messagebox.showinfo(message='Error: Time interval cannot be zero')
time_updated = False
else:
time_updated = True
def _reverse():
# =============
global time_updated
global initial_DATE
global final_DATE
if time_interval.get() != 0:
time_interval.set(-time_interval.get())
tmpDATE = final_DATE
final_DATE = initial_DATE
initial_DATE = tmpDATE
del tmpDATE
final_date.set(final_DATE)
initial_date.set(initial_DATE)
time_updated = True
# Main window
# ============
self.Window_settime = tk.Toplevel(self.master)
self.Window_settime.title('Set time axis')
self.Window_settime.resizable(width=True,height=True)
self.Window_settime.protocol('WM_DELETE_WINDOW',_cancel)
tpad = ttk.Style()
tpad.configure("tpad.TLabelframe",padding=[20,5,5,10])
# Make a list of all potential files to define the Time Axis:
#
tlist = []
layer_ref = ''
for i in range(self.LAYERS.n):
TYPE = self.LAYERS.TYPE[i]
ii = self.LAYERS.TYPE_INDEX[i]
if TYPE == 'VEC':
layer_ref = self.VEC[ii].ALIAS.get()
elif TYPE == 'FLD':
layer_ref = self.CDF[ii].ALIAS.get()
elif TYPE == 'FLT':
layer_ref = self.FLOAT[ii].ALIAS.get()
else:
print('Unknown file type in time axis')
if empty(layer_ref):
layer_ref = os.path.basename(self.LAYERS.FILENAME[i])
tlist.append(layer_ref)
F0 = ttk.Frame(self.Window_settime,padding=10)
F1=ttk.LabelFrame(F0,text='Automatic time selection',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F1,text="Select field: ").grid(row=1,column=0,sticky='e',padx=3)
_was = ttk.Combobox(F1,textvariable=TSELECTION,values=tlist,width=14)
_was.grid(row=1,column=1,sticky='w',padx=3)
_was.bind('<<ComboboxSelected>>',lambda e: _autotime())
if len(tlist) == 0:
_was.configure(state='disabled')
F1.grid(row=0,column=0,columnspan=3)
F2=ttk.LabelFrame(F0,text='Manual time selection',borderwidth=5,style='tpad.TLabelframe')
ttk.Label(F2,text="Initial time: ").grid(row=0,column=0,sticky='e',padx=3)
_wini = tk.Entry(F2,textvariable=initial_date,width=18)
_wini.bind('<Return>',lambda e: _initime())
_wini.grid(row=0,column=1,sticky='w',padx=3)
tk.Button(F2,text='Select',command=_inical).grid(row=0,column=2,sticky='w',padx=3)
ttk.Label(F2,text="Final time: ").grid(row=1,column=0,sticky='e',padx=3)
_wfin = tk.Entry(F2,textvariable=final_date,width=18)
_wfin.bind('<Return>',lambda e: _fintime())
_wfin.grid(row=1,column=1,sticky='w',padx=3)
tk.Button(F2,text='Select',command=_fincal).grid(row=1,column=2,sticky='w',padx=3)
ttk.Label(F2,text="Time interval (seconds): ").grid(row=2,column=0,sticky='e',padx=3)
_wtdt = tk.Entry(F2,textvariable=time_interval,width=18)
_wtdt.bind('<Return>',lambda e: _dt())
_wtdt.grid(row=2,column=1,sticky='w',padx=3)
F2.grid(row=1,column=0,columnspan=3)
F0.grid()
F1 = ttk.Frame(self.Window_settime,padding=5)
ttk.Button(F1,text='Cancel',command=_cancel,padding=5). \
grid(row=0,column=1,padx=3)
ttk.Button(F1,text='Reverse',command=_reverse,padding=5). \
grid(row=0,column=2,padx=3)
ttk.Button(F1,text='Done',command=_done,padding=5). \
grid(row=0,column=3,padx=3)
F1.grid(sticky='ew',columnspan=2)
# =======================
def ruler(self):
# =======================
global first
global cross
global _cc
global _kk
global _ll
global xo
global yo
try:
self.canvas.mpl_disconnect(self.CANVAS_CLICK)
except:
self.make_plot()
self.canvas.mpl_disconnect(self.CANVAS_CLICK)
first = True
cross = None
_cc = None
_kk = None
_ll = None
def _done():
# ==========
global _cc
global _kk
global _ll
global cross
global line
global annotation
cross.clear()
line.clear()
annotation.remove()
self.make_plot()
self.canvas.mpl_disconnect(_cc)
self.canvas.mpl_disconnect(_kk)
self.canvas.mpl_disconnect(_ll)
self.master.unbind('<Key>')
self.CANVAS_CLICK = self.canvas.mpl_connect('button_press_event',self.canvas_click)
def _canvas_click(event):
# ============================
global first
global cross
global xo
global yo
global _ll
global line
global annotation
if first:
first = False
xo = event.xdata
yo = event.ydata
cross = self.ax.plot(xo,yo,'+',ms=20,transform=ccrs.PlateCarree())
self.canvas.draw()
messagebox.showinfo(message='Use left mouse to select the second point. ESC to quit')
string = 'Calculating distances from point ({0:.3f},{1:.3f})'.format(xo,yo)
line = self.ax.plot(xo,yo,color='k',lw=0.8,ls='--',zorder=100,transform=ccrs.PlateCarree())
annotation = self.ax.annotate('',xy=(xo,yo), \
ha='right', \
va='bottom', \
xycoords='data',
bbox=dict(boxstyle='round,pad=0.5',fc='yellow',alpha=0.75))
else:
line[0].set_visible(False)
if event.inaxes:
dist = haversine((xo,yo),(event.xdata,event.ydata)) / 1000.
string = 'Distance to ({0:8.3f},{1:8.3f}): {2:7.1f} km (ESC to quit)'.format(event.xdata,event.ydata,dist)
line[0].set_data([xo,event.xdata],[yo,event.ydata])
line[0].set_visible(True)
annotation.xytext = event.xdata, event.ydata
annotation.set_text('{0:7.1f} km'.format(dist))
self.canvas.draw()
toconsola(string,wid=self.cons)
def _key_handler(event):
# ============================
if event.keycode == 9:
_done()
def _key_handler2(event):
# ============================
if event.key == 'escape':
_done()
# Main window
# ============
_cc = self.canvas.mpl_connect('button_press_event',_canvas_click)
_kk = self.canvas.mpl_connect('key_press_event',_key_handler2)
self.master.bind('<Key>',_key_handler)
messagebox.showinfo(message='Select a starting point with the left mouse button. ESC to quit')
# =======================
def atlas(self):
# =======================
def _cancel():
# ============
self.Window_atlas.destroy()
# Main window
# ============
self.Window_atlas = tk.Toplevel(self.master)
self.Window_atlas.title('Set time axis')
self.Window_atlas.resizable(width=False,height=False)
self.Window_atlas.protocol('WM_DELETE_WINDOW',_cancel)
climatology.winClim(self.Window_atlas,wid=self.cons)
# ====================
def save_frames(self):
# ====================
''' Save (PNG) a series of frames frames '''
# -----------
def _close():
# -----------
self.Window_sframe.destroy()
self.Window_sframe = None
def _done():
# ----------
L_Backup = self.L.get()
d = np.int(np.ceil(np.log10(self.NL)))
fmt = '0%d' % d
for L in range(self.PLOT.SFRAME_L1.get(),self.PLOT.SFRAME_L2.get()+1,self.PLOT.SFRAME_LSTEP.get()):
self.L.set(L)
self.PLOT.TLABEL.set(self.DATE[L])
ofile = self.PLOT.SFRAME_PREFIX.get()
if (self.PLOT.SFRAME_POSTFIX_MODE.get() == 0):
ofile = ofile + format(L,fmt) + '.png'
else:
postfix = self.DATE[L].isoformat(sep='T')
postfix = postfix.replace(":","")
postfix = postfix.replace("-","")
ofile = ofile + postfix + '.png'
print('saving frame in ', ofile)
for i in range(self.nvec):
if self.VEC[i].LINK.get():
self.VEC[i].L.set(L)
self.VEC[i].read(update_lims=False,wid=self.cons)
for i in range(self.ncdf):
if self.CDF[i].LINK.get():
self.CDF[i].L.set(L)
self.CDF[i].read(update_lims=False,wid=self.cons)
self.make_plot()
self.fig.savefig(ofile,
dpi=self.PLOT.DPI.get(),
bbox_inches='tight')
messagebox.showinfo(parent=self.Window_sframe,message='Frames have been saved')
self.L.set(L_Backup)
def _loadconf():
# -------------
'''Load SAVE FRAME configuration'''
toconsola('Retrieving SAVING FRAME defaults.',wid=self.cons)
#print('Retrieving VIDEO defaults.')
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
self.PLOT.SFRAME_PREFIX.set(conf['FRAME_PREFIX'])
self.PLOT.SFRAME_POSTFIX_MODE.set(conf['FRAME_POSTFIX_MODE'])
self.PLOT.SFRAME_LSTEP.set(conf['FRAME_STEP'])
def _saveconf():
# -------------
'''Save SAVE FRAME configuration'''
with open(self.PLOT.FILECONF) as infile:
conf = json.load(infile)
toconsola('Updating SAVING FRAME defaults.',wid=self.cons)
#print('Updating VIDEO defaults.')
conf['FRAME_PREFIX'] = self.PLOT.SFRAME_PREFIX.get()
conf['FRAME_POSTFIX_MODE'] = self.PLOT.SFRAME_POSTFIX_MODE.get()
conf['FRAME_STEP'] = self.PLOT.SFRAME_LSTEP.get()
with io.open(self.PLOT.FILECONF,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# Main
# ----
if self.LAYERS.n == 0:
messagebox.showinfo(message='No layers have been added')
return
if self.Window_sframe is not None:
self.Window_sframe.lift()
return
self.Window_sframe = tk.Toplevel(self.master)
self.Window_sframe.title('Save frames')
self.Window_sframe.resizable(width=True,height=True)
self.Window_sframe.protocol('WM_DELETE_WINDOW',_close)
# Menu:
menubar = tk.Menu(self.Window_sframe)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=_loadconf)
menu.add_command(label='Save',command=_saveconf)
try:
self.Window_sframe.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
self.Window_sframe.tk.call(self.Window_sframe, "config", "-menu", menubar)
# Widgets
#
F0 = ttk.Frame(self.Window_sframe,borderwidth=5,padding=5)
ttk.Label(F0,text='Output prefix : ').grid(row=0,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_PREFIX,width=40).grid(row=0,column=1,columnspan=4,sticky='w')
ttk.Label(F0,text='Output postfix mode : ').grid(row=1,column=0)
ttk.Radiobutton(F0,text=' Frame number',variable=self.PLOT.SFRAME_POSTFIX_MODE,value=0).\
grid(row=1,column=1,padx=3)
ttk.Radiobutton(F0,text=' Date and time',variable=self.PLOT.SFRAME_POSTFIX_MODE,value=1).\
grid(row=2,column=1,padx=3)
ttk.Label(F0,text='Initial frame : ').grid(row=3,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_L1,width=7).grid(row=3,column=1,sticky='w')
ttk.Label(F0,text='Final frame : ').grid(row=4,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_L2,width=7).grid(row=4,column=1,sticky='w')
ttk.Label(F0,text='Frame step : ').grid(row=5,column=0)
ttk.Entry(F0,textvariable=self.PLOT.SFRAME_LSTEP,width=7).grid(row=5,column=1,sticky='w')
done = ttk.Button(F0,text='Do it',command=_done)
done.grid(row=6,column=3,padx=3)
done.bind("<Return>",lambda e:_done())
close = ttk.Button(F0,text='Close',command=_close)
close.grid(row=6,column=4,padx=3)
close.bind("<Return>",lambda e:_close())
F0.grid()
# ====================
def get_feature(self):
# ====================
''' Widget to read Features '''
self.FSOURCE = tk.StringVar()
FEATURE = feature.parameters()
self.FSOURCE.set(self.FEATURE.OPTIONS[0])
def _cancel():
# ===========
self.Window_feature.destroy()
self.Window_feature = None
def _close():
# ===========
self.Window_feature.destroy()
self.Window_feature = None
self.make_plot()
def _done():
# ===========
_close()
def _clear():
# ===========
if self.FEATURE.n == 0:
return
ii = self.FEATURE.INDX.get()
self.LAYERS.erase('FEATURE',ii,wid=self.cons)
self.LAYERS.print()
if self.LAYERS.n == 0:
self.TIME = []
self.DATE = []
self.L.set(0)
self.L_LIST = []
self.NL = 0
self.bnext.configure(state='disabled')
self.bprev.configure(state='disabled')
self.PLOT.TLABEL.set('')
self.lbox['values'] = self.L_LIST
self.lbox.configure(state='disabled')
self.first = True
toconsola('Erasing record '+str(ii),wid=self.cons)
del self.FEATURE.DATA[ii]
self.FEATURE.n -= 1
ii = self.FEATURE.n-1 if ii >= self.FEATURE.n else ii
toconsola('New feature = '+str(ii),wid=self.cons)
self.FEATURE.INDX.set(ii)
_refill(ii)
def _reget():
# ===========
self.FEATURE.INDEX.set(_wsel.get())
ii = self.FLOAT_INDX.get()
_refill(ii)
def _refill(ii):
# ============
if ii >= 0:
self.FEATURE.LIST = list(range(self.FEATURE.n))
_wsel['values'] = self.FEATURE.LIST
_went['textvariable'] = self.FEATURE.DATA[ii].FILENAME
_wstat['text'] = 'Number feature = '+str(self.FEATURE.DATA[ii].n)
_wsel.configure(state='normal')
_show['variable'] = self.FEATURE.DATA[ii].show
_aent.configure(state='normal')
_aent['textvariable'] = self.FEATURE.DATA[ii].ALIAS
else:
self.FEATURE.DATA = []
self.FEATURE.LIST = ['0']
self.FEATURE.INDX.set(0)
#_wsel['values'] = self.FEATURE_LIST
_wsel['values'] = None
_went['textvariable'] = None
_wstat['text'] = ''
_wsel.configure(state='disabled')
_aent.configure(state='disabled')
_show.configure(state='disabled')
self.make_plot()
def _add():
# ===========
ISOURCE = self.FEATURE.OPTIONS.index(self.FSOURCE.get())
types=[('JSON','*.json'),('GEOJSON','*.geojson'),('ALL','*')]
nn = filedialog.askopenfilename(parent=self.Window_feature, \
filetypes=types)
if len(nn) == 0:
return
filename = '%s' % nn
toconsola('Reading FEATURE file '+filename,wid=self.cons)
FEATURE.Read(filename)
if FEATURE.n == 0:
return
self.FEATURE.n += 1
self.FEATURE.DATA.append(FEATURE)
self.FEATURE.INDX.set(self.FEATURE.n-1)
self.FEATURE.LIST = list(range(self.FEATURE.n))
self.LAYERS.add(TYPE='FEATURE',Filename=filename,N=FEATURE.n,wid=self.cons)
self.LAYERS.print()
ii = self.FEATURE.INDX.get()
_refill(ii)
# Main Window ...
# ================
if self.Window_feature is None:
self.Window_feature = tk.Toplevel(self.master)
self.Window_feature.title('JSON feature')
self.Window_feature.protocol('WM_DELETE_WINDOW',_close)
else:
self.Window_feature.lift()
if self.FEATURE.n > 0:
ii = self.FEATURE.INDX.get()
else:
ii = -1
F0 = ttk.Frame(self.Window_feature,padding=5)
#Add
ttk.Combobox(F0,textvariable=self.FSOURCE, \
values=self.FEATURE.OPTIONS).grid(row=0,column=0,padx=3)
ttk.Button(F0,text='Import',command=_add).grid(row=1,column=0,padx=3)
# Filename:
ttk.Label(F0,text='Feature file').grid(row=0,column=1,padx=3)
_wsel = ttk.Combobox(F0,textvariable=self.FEATURE.INDX, \
values=self.FEATURE.LIST,width=5)
_wsel.grid(row=0,column=2)
_wsel.bind('<<ComboboxSelected>>',lambda e: _reget())
_went = ttk.Entry(F0,justify='left',width=50,state='readonly')
_went.grid(row=0,column=3,columnspan=5,padx=3,sticky='w')
if ii == -1:
_wstat = ttk.Label(F0,text='',width=50,justify='left')
_wsel.configure(state='disabled')
else:
_wstat = ttk.Label(F0,text=' Features in the file= '+str(self.FEATURE.DATA[ii].n),width=50,justify='left')
_went['textvariable'] = self.FEATURE.DATA[ii].FILENAME
_wstat.grid(row=1,column=3,columnspan=5,padx=3,sticky='w')
#Alias
ttk.Label(F0,text='Alias').grid(row=2,column=1,padx=3,pady=3)
_aent = ttk.Entry(F0,width=15,justify='left')
_aent.grid(row=2,column=2,columnspan=2,sticky='w')
F0.grid(row=0,column=0)
F1 = ttk.Frame(self.Window_feature,padding=5)
if ii == -1:
_show = ttk.Checkbutton(F1,text='Show')
_aent.configure(state='disabled')
else:
_show = ttk.Checkbutton(F1,text='Show',command=self.make_plot)
_show['variable']=self.FEATURE.DATA[ii].show
_aent['textvariable'] = self.FEATURE.DATA[ii].ALIAS
_show.grid(row=1,column=5,padx=3)
ttk.Button(F1,text='Cancel',command=_cancel).grid(row=1,column=6,padx=3)
ttk.Button(F1,text='Clear',command=_clear).grid(row=1,column=7,padx=3)
ttk.Button(F1,text='Plot',command=_close).grid(row=1,column=8,padx=3)
F1.grid(row=1,column=0)
def feature_config(self):
# =======================
if self.FEATURE.n == 0:
messagebox.showinfo(message='No feature selected yet')
return
if self.Window_featureconfig is None:
feature.Configuration_Menu(self,self.FEATURE)
else:
self.Window_featureconfig.lift()
| {
"content_hash": "a3a44aef3495d6ee52efe3973f6c0049",
"timestamp": "",
"source": "github",
"line_count": 13185,
"max_line_length": 266,
"avg_line_length": 36.009859689040574,
"alnum_prop": 0.5715495271593757,
"repo_name": "quimbp/cosmo",
"id": "52c2c4a78f36ed7d30921663c4c64b1b19233a1a",
"size": "474792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/cosmo/drawing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "290206"
},
{
"name": "Makefile",
"bytes": "6002"
},
{
"name": "NASL",
"bytes": "2848"
},
{
"name": "Python",
"bytes": "1024417"
},
{
"name": "Shell",
"bytes": "5521"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, unicode_literals
import os
DEBUG = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
| {
"content_hash": "16097deab778613b6867a2ec82458748",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 23.6,
"alnum_prop": 0.6412429378531074,
"repo_name": "theeluwin/mashiro",
"id": "05f1c6ba07b83cda8013f56cb5d37f571f65a103",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/local.sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2298"
},
{
"name": "HTML",
"bytes": "3622"
},
{
"name": "JavaScript",
"bytes": "4695"
},
{
"name": "Python",
"bytes": "14690"
}
],
"symlink_target": ""
} |
"""
Check to see if this program is a valid program for the iocc
2) The size of your program source must be <= 4096 bytes in length.
The number of characters excluding whitespace (tab, space,
newline, formfeed, return), and excluding any ; { or } immediately
followed by whitespace or end of file, must be <= 2048.
"""
import re
import sys
def check_size(txt):
size = len(txt)
if size <= 4096:
status = "OK"
else:
status = "BAD"
print "Total size %d: %s" % (size, status)
def check_whitespace(txt):
txt = re.sub(r"\s+", r"", txt)
size = len(txt)
if size <= 2048:
status = "OK"
else:
status = "BAD"
print "Non whitespace %d: %s" % (size, status)
def check_rules_whitespace(txt):
txt = re.sub("(?s)[;{}](\s|$)", r"\1", txt)
txt = re.sub(r"\s+", r"", txt)
size = len(txt)
if size <= 2048:
status = "OK"
else:
status = "BAD"
print "Rules whitespace %d: %s" % (size, status)
def main():
if len(sys.argv) != 2:
print "Syntax: %s file.c" % sys.argv[0]
raise SystemExit(1)
_, in_filename = sys.argv
txt = open(in_filename).read()
print len(in_filename)*"-"
print in_filename
print len(in_filename)*"-"
check_size(txt)
check_whitespace(txt)
print len(in_filename)*"-"
#check_rules_whitespace(txt)
if __name__ == "__main__":
main()
| {
"content_hash": "802edd98736baf37ffeed31a61de5aef",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 73,
"avg_line_length": 25.927272727272726,
"alnum_prop": 0.5638148667601683,
"repo_name": "ncw/ioccc2012",
"id": "89fae477bfabbb36df7a327b728effe7d96f5ca6",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "22264"
},
{
"name": "Python",
"bytes": "18043"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
from ajenti.api import plugin
from ajenti.plugins.dashboard.api import ConfigurableWidget
from ajenti.ui import on
from api import ServiceMultiplexor
@plugin
class ServiceWidget (ConfigurableWidget):
name = _('Service')
icon = 'play'
def on_prepare(self):
self.mgr = ServiceMultiplexor.get()
self.append(self.ui.inflate('services:widget'))
def on_start(self):
self.service = self.mgr.get_one(self.config['service'])
if not self.service:
return
self.find('name').text = self.service.name
self.find('icon').icon = self.service.icon
self.find('start').visible = not self.service.running
self.find('stop').visible = self.service.running
self.find('restart').visible = self.service.running
def create_config(self):
return {'service': ''}
def on_config_start(self):
service_list = self.dialog.find('service')
service_list.labels = service_list.values = [x.name for x in self.mgr.get_all()]
service_list.value = self.config['service']
def on_config_save(self):
self.config['service'] = self.dialog.find('service').value
@on('start', 'click')
def on_s_start(self):
self.service.start()
self.on_start()
@on('restart', 'click')
def on_s_restart(self):
self.service.restart()
self.on_start()
@on('stop', 'click')
def on_s_stop(self):
self.service.stop()
self.on_start()
| {
"content_hash": "96ca2f8b1a937e7f4d753ecd65922b8a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 29.294117647058822,
"alnum_prop": 0.6224899598393574,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "7f6e588b1f6c8224e273be9b286e2d8aeb81a2ef",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usr/share/pyshared/ajenti/plugins/services/widget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(0, '../..')
import test_harness
test_harness.register_generic_assembly_tests(['memory_test.S'], ['fpga'])
test_harness.execute_tests()
| {
"content_hash": "430bb6c6f0afc7e0d4c564c4a5e1e246",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.7134146341463414,
"repo_name": "jbush001/NyuziProcessor",
"id": "9e8b39c1109f8a74e501cee464e2f19789807bbf",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fpga/memory_test/runtest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "213194"
},
{
"name": "C",
"bytes": "2962697"
},
{
"name": "C++",
"bytes": "1644391"
},
{
"name": "CMake",
"bytes": "49462"
},
{
"name": "Dockerfile",
"bytes": "1642"
},
{
"name": "Java",
"bytes": "6570"
},
{
"name": "Perl",
"bytes": "4771"
},
{
"name": "Python",
"bytes": "203129"
},
{
"name": "Shell",
"bytes": "6099"
},
{
"name": "SystemVerilog",
"bytes": "966291"
},
{
"name": "Tcl",
"bytes": "471"
}
],
"symlink_target": ""
} |
"""
Title: Semi-supervised image classification using contrastive pretraining with SimCLR
Author: [András Béres](https://www.linkedin.com/in/andras-beres-789190210)
Date created: 2021/04/24
Last modified: 2021/04/24
Description: Contrastive pretraining with SimCLR for semi-supervised image classification on the STL-10 dataset.
"""
"""
## Introduction
### Semi-supervised learning
Semi-supervised learning is a machine learning paradigm that deals with
**partially labeled datasets**. When applying deep learning in the real world,
one usually has to gather a large dataset to make it work well. However, while
the cost of labeling scales linearly with the dataset size (labeling each
example takes a constant time), model performance only scales
[sublinearly](https://arxiv.org/abs/2001.08361) with it. This means that
labeling more and more samples becomes less and less cost-efficient, while
gathering unlabeled data is generally cheap, as it is usually readily available
in large quantities.
Semi-supervised learning offers to solve this problem by only requiring a
partially labeled dataset, and by being label-efficient by utilizing the
unlabeled examples for learning as well.
In this example, we will pretrain an encoder with contrastive learning on the
[STL-10](https://ai.stanford.edu/~acoates/stl10/) semi-supervised dataset using
no labels at all, and then fine-tune it using only its labeled subset.
### Contrastive learning
On the highest level, the main idea behind contrastive learning is to **learn
representations that are invariant to image augmentations** in a self-supervised
manner. One problem with this objective is that it has a trivial degenerate
solution: the case where the representations are constant, and do not depend at all on the
input images.
Contrastive learning avoids this trap by modifying the objective in the
following way: it pulls representations of augmented versions/views of the same
image closer to each other (contracting positives), while simultaneously pushing
different images away from each other (contrasting negatives) in representation
space.
One such contrastive approach is [SimCLR](https://arxiv.org/abs/2002.05709),
which essentially identifies the core components needed to optimize this
objective, and can achieve high performance by scaling this simple approach.
Another approach is [SimSiam](https://arxiv.org/abs/2011.10566)
([Keras example](https://keras.io/examples/vision/simsiam/)),
whose main difference from
SimCLR is that the former does not use any negatives in its loss. Therefore, it does not
explicitly prevent the trivial solution, and, instead, avoids it implicitly by
architecture design (asymmetric encoding paths using a predictor network and
batch normalization (BatchNorm) are applied in the final layers).
For further reading about SimCLR, check out
[the official Google AI blog post](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html),
and for an overview of self-supervised learning across both vision and language
check out
[this blog post](https://ai.facebook.com/blog/self-supervised-learning-the-dark-matter-of-intelligence/).
"""
"""
## Setup
"""
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from tensorflow.keras import layers
"""
## Hyperparameter setup
"""
# Dataset hyperparameters
unlabeled_dataset_size = 100000
labeled_dataset_size = 5000
image_size = 96
image_channels = 3
# Algorithm hyperparameters
num_epochs = 20
batch_size = 525 # Corresponds to 200 steps per epoch
width = 128
temperature = 0.1
# Stronger augmentations for contrastive, weaker ones for supervised training
contrastive_augmentation = {"min_area": 0.25, "brightness": 0.6, "jitter": 0.2}
classification_augmentation = {"min_area": 0.75, "brightness": 0.3, "jitter": 0.1}
"""
## Dataset
During training we will simultaneously load a large batch of unlabeled images along with a
smaller batch of labeled images.
"""
def prepare_dataset():
# Labeled and unlabeled samples are loaded synchronously
# with batch sizes selected accordingly
steps_per_epoch = (unlabeled_dataset_size + labeled_dataset_size) // batch_size
unlabeled_batch_size = unlabeled_dataset_size // steps_per_epoch
labeled_batch_size = labeled_dataset_size // steps_per_epoch
print(
f"batch size is {unlabeled_batch_size} (unlabeled) + {labeled_batch_size} (labeled)"
)
unlabeled_train_dataset = (
tfds.load("stl10", split="unlabelled", as_supervised=True, shuffle_files=True)
.shuffle(buffer_size=10 * unlabeled_batch_size)
.batch(unlabeled_batch_size)
)
labeled_train_dataset = (
tfds.load("stl10", split="train", as_supervised=True, shuffle_files=True)
.shuffle(buffer_size=10 * labeled_batch_size)
.batch(labeled_batch_size)
)
test_dataset = (
tfds.load("stl10", split="test", as_supervised=True)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# Labeled and unlabeled datasets are zipped together
train_dataset = tf.data.Dataset.zip(
(unlabeled_train_dataset, labeled_train_dataset)
).prefetch(buffer_size=tf.data.AUTOTUNE)
return train_dataset, labeled_train_dataset, test_dataset
# Load STL10 dataset
train_dataset, labeled_train_dataset, test_dataset = prepare_dataset()
"""
## Image augmentations
The two most important image augmentations for contrastive learning are the
following:
- Cropping: forces the model to encode different parts of the same image
similarly, we implement it with the
[RandomTranslation](https://keras.io/api/layers/preprocessing_layers/image_preprocessing/random_translation/)
and
[RandomZoom](https://keras.io/api/layers/preprocessing_layers/image_preprocessing/random_zoom/)
layers
- Color jitter: prevents a trivial color histogram-based solution to the task by
distorting color histograms. A principled way to implement that is by affine
transformations in color space.
In this example we use random horizontal flips as well. Stronger augmentations
are applied for contrastive learning, along with weaker ones for supervised
classification to avoid overfitting on the few labeled examples.
We implement random color jitter as a custom preprocessing layer. Using
preprocessing layers for data augmentation has the following two advantages:
- The data augmentation will run on GPU in batches, so the training will not be
bottlenecked by the data pipeline in environments with constrained CPU
resources (such as a Colab Notebook, or a personal machine)
- Deployment is easier as the data preprocessing pipeline is encapsulated in the
model, and does not have to be reimplemented when deploying it
"""
# Distorts the color distibutions of images
class RandomColorAffine(layers.Layer):
def __init__(self, brightness=0, jitter=0, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.jitter = jitter
def get_config(self):
config = super().get_config()
config.update({"brightness": self.brightness, "jitter": self.jitter})
return config
def call(self, images, training=True):
if training:
batch_size = tf.shape(images)[0]
# Same for all colors
brightness_scales = 1 + tf.random.uniform(
(batch_size, 1, 1, 1), minval=-self.brightness, maxval=self.brightness
)
# Different for all colors
jitter_matrices = tf.random.uniform(
(batch_size, 1, 3, 3), minval=-self.jitter, maxval=self.jitter
)
color_transforms = (
tf.eye(3, batch_shape=[batch_size, 1]) * brightness_scales
+ jitter_matrices
)
images = tf.clip_by_value(tf.matmul(images, color_transforms), 0, 1)
return images
# Image augmentation module
def get_augmenter(min_area, brightness, jitter):
zoom_factor = 1.0 - math.sqrt(min_area)
return keras.Sequential(
[
keras.Input(shape=(image_size, image_size, image_channels)),
layers.Rescaling(1 / 255),
layers.RandomFlip("horizontal"),
layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
RandomColorAffine(brightness, jitter),
]
)
def visualize_augmentations(num_images):
# Sample a batch from a dataset
images = next(iter(train_dataset))[0][0][:num_images]
# Apply augmentations
augmented_images = zip(
images,
get_augmenter(**classification_augmentation)(images),
get_augmenter(**contrastive_augmentation)(images),
get_augmenter(**contrastive_augmentation)(images),
)
row_titles = [
"Original:",
"Weakly augmented:",
"Strongly augmented:",
"Strongly augmented:",
]
plt.figure(figsize=(num_images * 2.2, 4 * 2.2), dpi=100)
for column, image_row in enumerate(augmented_images):
for row, image in enumerate(image_row):
plt.subplot(4, num_images, row * num_images + column + 1)
plt.imshow(image)
if column == 0:
plt.title(row_titles[row], loc="left")
plt.axis("off")
plt.tight_layout()
visualize_augmentations(num_images=8)
"""
## Encoder architecture
"""
# Define the encoder architecture
def get_encoder():
return keras.Sequential(
[
keras.Input(shape=(image_size, image_size, image_channels)),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Flatten(),
layers.Dense(width, activation="relu"),
],
name="encoder",
)
"""
## Supervised baseline model
A baseline supervised model is trained using random initialization.
"""
# Baseline supervised training with random initialization
baseline_model = keras.Sequential(
[
keras.Input(shape=(image_size, image_size, image_channels)),
get_augmenter(**classification_augmentation),
get_encoder(),
layers.Dense(10),
],
name="baseline_model",
)
baseline_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
baseline_history = baseline_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(baseline_history.history["val_acc"]) * 100
)
)
"""
## Self-supervised model for contrastive pretraining
We pretrain an encoder on unlabeled images with a contrastive loss.
A nonlinear projection head is attached to the top of the encoder, as it
improves the quality of representations of the encoder.
We use the InfoNCE/NT-Xent/N-pairs loss, which can be interpreted in the
following way:
1. We treat each image in the batch as if it had its own class.
2. Then, we have two examples (a pair of augmented views) for each "class".
3. Each view's representation is compared to every possible pair's one (for both
augmented versions).
4. We use the temperature-scaled cosine similarity of compared representations as
logits.
5. Finally, we use categorical cross-entropy as the "classification" loss
The following two metrics are used for monitoring the pretraining performance:
- [Contrastive accuracy (SimCLR Table 5)](https://arxiv.org/abs/2002.05709):
Self-supervised metric, the ratio of cases in which the representation of an
image is more similar to its differently augmented version's one, than to the
representation of any other image in the current batch. Self-supervised
metrics can be used for hyperparameter tuning even in the case when there are
no labeled examples.
- [Linear probing accuracy](https://arxiv.org/abs/1603.08511): Linear probing is
a popular metric to evaluate self-supervised classifiers. It is computed as
the accuracy of a logistic regression classifier trained on top of the
encoder's features. In our case, this is done by training a single dense layer
on top of the frozen encoder. Note that contrary to traditional approach where
the classifier is trained after the pretraining phase, in this example we
train it during pretraining. This might slightly decrease its accuracy, but
that way we can monitor its value during training, which helps with
experimentation and debugging.
Another widely used supervised metric is the
[KNN accuracy](https://arxiv.org/abs/1805.01978), which is the accuracy of a KNN
classifier trained on top of the encoder's features, which is not implemented in
this example.
"""
# Define the contrastive model with model-subclassing
class ContrastiveModel(keras.Model):
def __init__(self):
super().__init__()
self.temperature = temperature
self.contrastive_augmenter = get_augmenter(**contrastive_augmentation)
self.classification_augmenter = get_augmenter(**classification_augmentation)
self.encoder = get_encoder()
# Non-linear MLP as projection head
self.projection_head = keras.Sequential(
[
keras.Input(shape=(width,)),
layers.Dense(width, activation="relu"),
layers.Dense(width),
],
name="projection_head",
)
# Single dense layer for linear probing
self.linear_probe = keras.Sequential(
[layers.Input(shape=(width,)), layers.Dense(10)], name="linear_probe"
)
self.encoder.summary()
self.projection_head.summary()
self.linear_probe.summary()
def compile(self, contrastive_optimizer, probe_optimizer, **kwargs):
super().compile(**kwargs)
self.contrastive_optimizer = contrastive_optimizer
self.probe_optimizer = probe_optimizer
# self.contrastive_loss will be defined as a method
self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.contrastive_loss_tracker = keras.metrics.Mean(name="c_loss")
self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy(
name="c_acc"
)
self.probe_loss_tracker = keras.metrics.Mean(name="p_loss")
self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy(name="p_acc")
@property
def metrics(self):
return [
self.contrastive_loss_tracker,
self.contrastive_accuracy,
self.probe_loss_tracker,
self.probe_accuracy,
]
def contrastive_loss(self, projections_1, projections_2):
# InfoNCE loss (information noise-contrastive estimation)
# NT-Xent loss (normalized temperature-scaled cross entropy)
# Cosine similarity: the dot product of the l2-normalized feature vectors
projections_1 = tf.math.l2_normalize(projections_1, axis=1)
projections_2 = tf.math.l2_normalize(projections_2, axis=1)
similarities = (
tf.matmul(projections_1, projections_2, transpose_b=True) / self.temperature
)
# The similarity between the representations of two augmented views of the
# same image should be higher than their similarity with other views
batch_size = tf.shape(projections_1)[0]
contrastive_labels = tf.range(batch_size)
self.contrastive_accuracy.update_state(contrastive_labels, similarities)
self.contrastive_accuracy.update_state(
contrastive_labels, tf.transpose(similarities)
)
# The temperature-scaled similarities are used as logits for cross-entropy
# a symmetrized version of the loss is used here
loss_1_2 = keras.losses.sparse_categorical_crossentropy(
contrastive_labels, similarities, from_logits=True
)
loss_2_1 = keras.losses.sparse_categorical_crossentropy(
contrastive_labels, tf.transpose(similarities), from_logits=True
)
return (loss_1_2 + loss_2_1) / 2
def train_step(self, data):
(unlabeled_images, _), (labeled_images, labels) = data
# Both labeled and unlabeled images are used, without labels
images = tf.concat((unlabeled_images, labeled_images), axis=0)
# Each image is augmented twice, differently
augmented_images_1 = self.contrastive_augmenter(images, training=True)
augmented_images_2 = self.contrastive_augmenter(images, training=True)
with tf.GradientTape() as tape:
features_1 = self.encoder(augmented_images_1, training=True)
features_2 = self.encoder(augmented_images_2, training=True)
# The representations are passed through a projection mlp
projections_1 = self.projection_head(features_1, training=True)
projections_2 = self.projection_head(features_2, training=True)
contrastive_loss = self.contrastive_loss(projections_1, projections_2)
gradients = tape.gradient(
contrastive_loss,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
self.contrastive_optimizer.apply_gradients(
zip(
gradients,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
)
self.contrastive_loss_tracker.update_state(contrastive_loss)
# Labels are only used in evalutation for an on-the-fly logistic regression
preprocessed_images = self.classification_augmenter(
labeled_images, training=True
)
with tf.GradientTape() as tape:
# the encoder is used in inference mode here to avoid regularization
# and updating the batch normalization paramers if they are used
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=True)
probe_loss = self.probe_loss(labels, class_logits)
gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights)
self.probe_optimizer.apply_gradients(
zip(gradients, self.linear_probe.trainable_weights)
)
self.probe_loss_tracker.update_state(probe_loss)
self.probe_accuracy.update_state(labels, class_logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
labeled_images, labels = data
# For testing the components are used with a training=False flag
preprocessed_images = self.classification_augmenter(
labeled_images, training=False
)
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=False)
probe_loss = self.probe_loss(labels, class_logits)
self.probe_loss_tracker.update_state(probe_loss)
self.probe_accuracy.update_state(labels, class_logits)
# Only the probe metrics are logged at test time
return {m.name: m.result() for m in self.metrics[2:]}
# Contrastive pretraining
pretraining_model = ContrastiveModel()
pretraining_model.compile(
contrastive_optimizer=keras.optimizers.Adam(),
probe_optimizer=keras.optimizers.Adam(),
)
pretraining_history = pretraining_model.fit(
train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(pretraining_history.history["val_p_acc"]) * 100
)
)
"""
## Supervised finetuning of the pretrained encoder
We then finetune the encoder on the labeled examples, by attaching
a single randomly initalized fully connected classification layer on its top.
"""
# Supervised finetuning of the pretrained encoder
finetuning_model = keras.Sequential(
[
layers.Input(shape=(image_size, image_size, image_channels)),
get_augmenter(**classification_augmentation),
pretraining_model.encoder,
layers.Dense(10),
],
name="finetuning_model",
)
finetuning_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
finetuning_history = finetuning_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(finetuning_history.history["val_acc"]) * 100
)
)
"""
## Comparison against the baseline
"""
# The classification accuracies of the baseline and the pretraining + finetuning process:
def plot_training_curves(pretraining_history, finetuning_history, baseline_history):
for metric_key, metric_name in zip(["acc", "loss"], ["accuracy", "loss"]):
plt.figure(figsize=(8, 5), dpi=100)
plt.plot(
baseline_history.history[f"val_{metric_key}"], label="supervised baseline"
)
plt.plot(
pretraining_history.history[f"val_p_{metric_key}"],
label="self-supervised pretraining",
)
plt.plot(
finetuning_history.history[f"val_{metric_key}"],
label="supervised finetuning",
)
plt.legend()
plt.title(f"Classification {metric_name} during training")
plt.xlabel("epochs")
plt.ylabel(f"validation {metric_name}")
plot_training_curves(pretraining_history, finetuning_history, baseline_history)
"""
By comparing the training curves, we can see that when using contrastive
pretraining, a higher validation accuracy can be reached, paired with a lower
validation loss, which means that the pretrained network was able to generalize
better when seeing only a small amount of labeled examples.
"""
"""
## Improving further
### Architecture
The experiment in the original paper demonstrated that increasing the width and depth of the
models improves performance at a higher rate than for supervised learning. Also,
using a [ResNet-50](https://keras.io/api/applications/resnet/#resnet50-function)
encoder is quite standard in the literature. However keep in mind, that more
powerful models will not only increase training time but will also require more
memory and will limit the maximal batch size you can use.
It has [been](https://arxiv.org/abs/1905.09272)
[reported](https://arxiv.org/abs/1911.05722) that the usage of BatchNorm layers
could sometimes degrade performance, as it introduces an intra-batch dependency
between samples, which is why I did not have used them in this example. In my
experiments however, using BatchNorm, especially in the projection head,
improves performance.
### Hyperparameters
The hyperparameters used in this example have been tuned manually for this task and
architecture. Therefore, without changing them, only marginal gains can be expected
from further hyperparameter tuning.
However for a different task or model architecture these would need tuning, so
here are my notes on the most important ones:
- **Batch size**: since the objective can be interpreted as a classification
over a batch of images (loosely speaking), the batch size is actually a more
important hyperparameter than usual. The higher, the better.
- **Temperature**: the temperature defines the "softness" of the softmax
distribution that is used in the cross-entropy loss, and is an important
hyperparameter. Lower values generally lead to a higher contrastive accuracy.
A recent trick (in [ALIGN](https://arxiv.org/abs/2102.05918)) is to learn
the temperature's value as well (which can be done by defining it as a
tf.Variable, and applying gradients on it). Even though this provides a good baseline
value, in my experiments the learned temperature was somewhat lower
than optimal, as it is optimized with respect to the contrastive loss, which is not a
perfect proxy for representation quality.
- **Image augmentation strength**: during pretraining stronger augmentations
increase the difficulty of the task, however after a point too strong
augmentations will degrade performance. During finetuning stronger
augmentations reduce overfitting while in my experience too strong
augmentations decrease the performance gains from pretraining. The whole data
augmentation pipeline can be seen as an important hyperparameter of the
algorithm, implementations of other custom image augmentation layers in Keras
can be found in
[this repository](https://github.com/beresandras/image-augmentation-layers-keras).
- **Learning rate schedule**: a constant schedule is used here, but it is
quite common in the literature to use a
[cosine decay schedule](https://www.tensorflow.org/api_docs/python/tf/keras/experimental/CosineDecay),
which can further improve performance.
- **Optimizer**: Adam is used in this example, as it provides good performance
with default parameters. SGD with momentum requires more tuning, however it
could slightly increase performance.
"""
"""
## Related works
Other instance-level (image-level) contrastive learning methods:
- [MoCo](https://arxiv.org/abs/1911.05722)
([v2](https://arxiv.org/abs/2003.04297),
[v3](https://arxiv.org/abs/2104.02057)): uses a momentum-encoder as well,
whose weights are an exponential moving average of the target encoder
- [SwAV](https://arxiv.org/abs/2006.09882): uses clustering instead of pairwise
comparison
- [BarlowTwins](https://arxiv.org/abs/2103.03230): uses a cross
correlation-based objective instead of pairwise comparison
Keras implementations of **MoCo** and **BarlowTwins** can be found in
[this repository](https://github.com/beresandras/contrastive-classification-keras),
which includes a Colab notebook.
There is also a new line of works, which optimize a similar objective, but
without the use of any negatives:
- [BYOL](https://arxiv.org/abs/2006.07733): momentum-encoder + no negatives
- [SimSiam](https://arxiv.org/abs/2011.10566)
([Keras example](https://keras.io/examples/vision/simsiam/)):
no momentum-encoder + no negatives
In my experience, these methods are more brittle (they can collapse to a constant
representation, I could not get them to work using this encoder architecture).
Even though they are generally more dependent on the
[model](https://generallyintelligent.ai/understanding-self-supervised-contrastive-learning.html)
[architecture](https://arxiv.org/abs/2010.10241), they can improve
performance at smaller batch sizes.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/semi-supervised-classification-simclr)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/semi-supervised-classification).
"""
| {
"content_hash": "6b2b789271ee30343c6f613005a2105d",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 129,
"avg_line_length": 41.65263157894737,
"alnum_prop": 0.7004585003068703,
"repo_name": "keras-team/keras-io",
"id": "c6d48337523219cf375d6ac8192b9b92b6a3af73",
"size": "27701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/vision/semisupervised_simclr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15929"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "21968"
},
{
"name": "Jupyter Notebook",
"bytes": "718942"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "680865"
}
],
"symlink_target": ""
} |
import io
import os
from os.path import join as pjoin
from setuptools import setup, find_packages
def get_version(file, name='__version__'):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding="utf8") as f:
exec(f.read(), {}, version_ns)
return version_ns[name]
with open('README.md') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
NAME = 'regulus'
VERSION = get_version(pjoin(NAME, '_version.py'))
setup(
name=NAME,
version=VERSION,
description='Regulus',
long_description=README,
author='Yarden Livnat',
author_email='yarden@sci.utah.edu',
url='https://github.com/yarden_livnat/regulus',
license=LICENSE,
zip_safe=False,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'topopy=0.1', 'numpy', 'sklearn', 'pandas'
],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': [
'regulus=regulus.command_line:main'
],
}
)
| {
"content_hash": "9cb17377ceef3dcea8bcfe73cc1bd709",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 60,
"avg_line_length": 23.52,
"alnum_prop": 0.6224489795918368,
"repo_name": "yarden-livnat/regulus",
"id": "b7c966edaccd17f3578cb626995becb01cd04620",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70587"
}
],
"symlink_target": ""
} |
import os
from webtest import TestApp
import tg
import tests
from tg.util import DottedFileNameFinder
from tg.configuration import AppConfig
class TestConfig(AppConfig):
def __init__(self):
AppConfig.__init__(self)
self.renderers = ['json', 'genshi', 'mako']
tg.config['renderers'] = ['json', 'genshi', 'mako']
self.render_functions = tg.util.Bunch()
self.package = tests
#self.default_renderer = 'genshi'
self.globals = self
self.helpers = {}
self.auth_backend = None
self.auto_reload_templates = False
self.use_legacy_renderer = False
self.use_dotted_templatenames = False
self.use_sqlalchemy=True
self.serve_static = False
self.prefer_toscawidgets2 = True
self['templating.genshi.name_constant_patch'] = True
root = os.path.dirname(os.path.dirname(tests.__file__))
test_base_path = os.path.join(root, 'tests')
test_config_path = test_base_path #os.path.join(test_base_path, folder)
self.paths=tg.util.Bunch(
root=test_base_path,
controllers=os.path.join(test_config_path, 'controllers'),
static_files=os.path.join(test_config_path, 'public'),
templates=[os.path.join(test_config_path, 'templates')],
i18n=os.path.join(test_config_path, 'i18n')
)
#xxx: why not use memory db?
values = {
'sqlalchemy.url':'sqlite:///'+root+'/test.db',
'session':tests.model.DBSession,
'model':tests.model,
'use_dotted_templatenames': True,
'renderers': ['json', 'genshi', 'mako']
}
#Then we overide those values with what was passed in
for key, value in values.items():
setattr(self, key, value)
_app = None
def app_from_config(base_config, deployment_config=None):
global _app
if _app is None:
if not deployment_config:
deployment_config = {'debug': 'true',
'error_email_from': 'paste@localhost',
'smtp_server': 'localhost',
}
env_loader = base_config.make_load_environment()
app_maker = base_config.setup_tg_wsgi_app(env_loader)
app = TestApp(app_maker(deployment_config, full_stack=True))
_app = app
return _app
| {
"content_hash": "a4bee8ecf18cb023fbe02bca07126239",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 36.608695652173914,
"alnum_prop": 0.5542359461599367,
"repo_name": "TurboGears/tgext.admin",
"id": "b1ddecf958033cedbafc249c6315271cdebc2ee0",
"size": "2526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/t_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27956"
},
{
"name": "Makefile",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "54025"
}
],
"symlink_target": ""
} |
"""
WSGI config for rwf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# reddit directory.
#app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
#sys.path.append(os.path.join(app_path, 'core'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend.config.settings.local")
application = get_wsgi_application()
| {
"content_hash": "8c3a993cec57b3645b28e7a2f707bedd",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 80,
"avg_line_length": 28.045454545454547,
"alnum_prop": 0.7504051863857374,
"repo_name": "ianmilliken/rwf",
"id": "6bf97ec05b155ab14c88278aedeacf8dc3ffacf9",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/config/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "441558"
},
{
"name": "HTML",
"bytes": "13521"
},
{
"name": "JavaScript",
"bytes": "1036656"
},
{
"name": "Python",
"bytes": "28122"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
#from discounts.views import SolicitarDescuentoView
from discounts.views import CrearSolicitudView
from discounts.views import TipoDescuentoCreateView
from discounts.views import AprobarDescuentoView
from discounts.views import DetalleDescuentoView
from discounts.views import TipoDescuentoListView
from discounts.views import TipoDescuentoUpdateView
from discounts.views import TipoDescuentoUpdateEndView
urlpatterns = [
# URL para tipos de servicios
#url(r'^solicitar/$', SolicitarDescuentoView.as_view(), name="solicitar_descuento"),
url(r'^solicitar/create$', CrearSolicitudView.as_view(), name="crear_solicitud"),
url(r'^tipodescuento/create$', TipoDescuentoCreateView.as_view(), name="tipo_descuento_create"),
url(r'^approve/$', AprobarDescuentoView.as_view(), name="aprobar_descuentos"),
url(r'^detail/$', DetalleDescuentoView.as_view(), name="detalle_descuentos"),
url(r'^tipodescuento/list/$', TipoDescuentoListView.as_view(), name="tipo_descuento_list"),
url(r'^tipodescuento/update/$', TipoDescuentoUpdateView.as_view(), name="tipo_descuento_update"),
url(r'^tipodescuento/update/end/$', TipoDescuentoUpdateEndView.as_view(), name="tipo_descuento_update_end"),
]
| {
"content_hash": "23787714cb99afe8baf51091812115d0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 112,
"avg_line_length": 59.23809523809524,
"alnum_prop": 0.7781350482315113,
"repo_name": "furthz/colegio",
"id": "f8fab0a9c0678f4291730fffa7d90476cd2c0d90",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/discounts/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1252299"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "2381690"
},
{
"name": "JavaScript",
"bytes": "6252620"
},
{
"name": "PHP",
"bytes": "2157"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "774970"
},
{
"name": "Roff",
"bytes": "130"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "1723"
}
],
"symlink_target": ""
} |
from rpython.jit.metainterp.test.test_recursive import RecursiveTests
from rpython.jit.backend.ppc.test.support import JitPPCMixin
class TestRecursive(JitPPCMixin, RecursiveTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_recursive.py
pass
| {
"content_hash": "48fcaa913fd15ab6e7a4293b626c9948",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 40.142857142857146,
"alnum_prop": 0.7580071174377224,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "84fa0baeaec92be3893a748e979f05d718fb56ed",
"size": "282",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/backend/ppc/test/test_recursive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AuthProfile'
db.create_table(u'fastapp_authprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='authprofile', unique=True, to=orm['auth.User'])),
('access_token', self.gf('django.db.models.fields.CharField')(max_length=72)),
))
db.send_create_signal(u'fastapp', ['AuthProfile'])
# Adding model 'Base'
db.create_table(u'fastapp_base', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32)),
('uuid', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('content', self.gf('django.db.models.fields.CharField')(max_length=8192)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(default=0, related_name='+', blank=True, to=orm['auth.User'])),
))
db.send_create_signal(u'fastapp', ['Base'])
# Adding model 'Exec'
db.create_table(u'fastapp_exec', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('module', self.gf('django.db.models.fields.CharField')(max_length=8192)),
('base', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='execs', null=True, to=orm['fastapp.Base'])),
))
db.send_create_signal(u'fastapp', ['Exec'])
def backwards(self, orm):
# Deleting model 'AuthProfile'
db.delete_table(u'fastapp_authprofile')
# Deleting model 'Base'
db.delete_table(u'fastapp_base')
# Deleting model 'Exec'
db.delete_table(u'fastapp_exec')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fastapp.authprofile': {
'Meta': {'object_name': 'AuthProfile'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '72'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'authprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'fastapp.base': {
'Meta': {'object_name': 'Base'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '8192'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'fastapp.exec': {
'Meta': {'object_name': 'Exec'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'execs'", 'null': 'True', 'to': u"orm['fastapp.Base']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'max_length': '8192'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['fastapp'] | {
"content_hash": "a9e2c14cf9f59a3e721d23155d43067d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 187,
"avg_line_length": 62.211009174311926,
"alnum_prop": 0.5617165609792066,
"repo_name": "fatrix/django-fastapp",
"id": "86c21629f0dddfab6dd71f691a5fbacdf24c303d",
"size": "6805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fastapp/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16762"
},
{
"name": "JavaScript",
"bytes": "639204"
},
{
"name": "Python",
"bytes": "262234"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import sys
import webbrowser
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 10960
server_address = ('127.0.0.1', port)
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
webbrowser.open_new_tab('http://127.0.0.1:%s' % port)
httpd.serve_forever()
| {
"content_hash": "72a21d42441cfcbea2fceba1909bcb5a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 53,
"avg_line_length": 27,
"alnum_prop": 0.7425044091710759,
"repo_name": "Revolution1/ID_generator",
"id": "9bf6bf9193c600c3957d46339562396fc25df2f0",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "5442"
}
],
"symlink_target": ""
} |
"""
Cloud Controller: Implementation of EC2 REST API calls, which are
dispatched to other nodes via AMQP RPC. State is via distributed
datastore.
"""
import base64
import time
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import s3
from nova import network
from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
ec2_opts = [
cfg.StrOpt('ec2_host',
default='$my_ip',
help='the ip of the ec2 api server'),
cfg.StrOpt('ec2_dmz_host',
default='$my_ip',
help='the internal ip of the ec2 api server'),
cfg.IntOpt('ec2_port',
default=8773,
help='the port of the ec2 api server'),
cfg.StrOpt('ec2_scheme',
default='http',
help='the protocol to use when connecting to the ec2 api '
'server (http, https)'),
cfg.StrOpt('ec2_path',
default='/services/Cloud',
help='the path prefix used to call the ec2 api server'),
cfg.ListOpt('region_list',
default=[],
help='list of region=fqdn pairs separated by commas'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def validate_ec2_id(val):
if not validator.validate_str()(val):
raise exception.InvalidInstanceIDMalformed(val=val)
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
raise exception.InvalidInstanceIDMalformed(val=val)
# EC2 API can return the following values as documented in the EC2 API
# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
# ApiReference-ItemType-InstanceStateType.html
# pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 |
# stopped 80
_STATE_DESCRIPTION_MAP = {
None: inst_state.PENDING,
vm_states.ACTIVE: inst_state.RUNNING,
vm_states.BUILDING: inst_state.PENDING,
vm_states.DELETED: inst_state.TERMINATED,
vm_states.SOFT_DELETED: inst_state.TERMINATED,
vm_states.STOPPED: inst_state.STOPPED,
vm_states.PAUSED: inst_state.PAUSE,
vm_states.SUSPENDED: inst_state.SUSPEND,
vm_states.RESCUED: inst_state.RESCUE,
vm_states.RESIZED: inst_state.RESIZE,
}
def _state_description(vm_state, _shutdown_terminate):
"""Map the vm state to the server status string."""
# Note(maoy): We do not provide EC2 compatibility
# in shutdown_terminate flag behavior. So we ignore
# it here.
name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state)
return {'code': inst_state.name_to_code(name),
'name': name}
def _parse_block_device_mapping(bdm):
"""Parse BlockDeviceMappingItemType into flat hash
BlockDevicedMapping.<N>.DeviceName
BlockDevicedMapping.<N>.Ebs.SnapshotId
BlockDevicedMapping.<N>.Ebs.VolumeSize
BlockDevicedMapping.<N>.Ebs.DeleteOnTermination
BlockDevicedMapping.<N>.Ebs.NoDevice
BlockDevicedMapping.<N>.VirtualName
=> remove .Ebs and allow volume id in SnapshotId
"""
ebs = bdm.pop('ebs', None)
if ebs:
ec2_id = ebs.pop('snapshot_id', None)
if ec2_id:
if ec2_id.startswith('snap-'):
bdm['snapshot_id'] = ec2utils.ec2_snap_id_to_uuid(ec2_id)
elif ec2_id.startswith('vol-'):
bdm['volume_id'] = ec2utils.ec2_vol_id_to_uuid(ec2_id)
ebs.setdefault('delete_on_termination', True)
bdm.update(ebs)
return bdm
def _properties_get_mappings(properties):
return block_device.mappings_prepend_dev(properties.get('mappings', []))
def _format_block_device_mapping(bdm):
"""Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
keys = (('deviceName', 'device_name'),
('virtualName', 'virtual_name'))
item = {}
for name, k in keys:
if k in bdm:
item[name] = bdm[k]
if bdm.get('no_device'):
item['noDevice'] = True
if ('snapshot_id' in bdm) or ('volume_id' in bdm):
ebs_keys = (('snapshotId', 'snapshot_id'),
('snapshotId', 'volume_id'), # snapshotId is abused
('volumeSize', 'volume_size'),
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
if k in bdm:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k])
else:
ebs[name] = bdm[k]
assert 'snapshotId' in ebs
item['ebs'] = ebs
return item
def _format_mappings(properties, result):
"""Format multiple BlockDeviceMappingItemType."""
mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']}
for m in _properties_get_mappings(properties)
if block_device.is_swap_or_ephemeral(m['virtual'])]
block_device_mapping = [_format_block_device_mapping(bdm) for bdm in
properties.get('block_device_mapping', [])]
# NOTE(yamahata): overwrite mappings with block_device_mapping
for bdm in block_device_mapping:
for i in range(len(mappings)):
if bdm['deviceName'] == mappings[i]['deviceName']:
del mappings[i]
break
mappings.append(bdm)
# NOTE(yamahata): trim ebs.no_device == true. Is this necessary?
mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))]
if mappings:
result['blockDeviceMapping'] = mappings
class CloudController(object):
"""CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
self.keypair_api = compute_api.KeypairAPI()
self.servicegroup_api = servicegroup.API()
def __str__(self):
return 'CloudController'
def _enforce_valid_instance_ids(self, context, instance_ids):
# NOTE(mikal): Amazon's implementation of the EC2 API requires that
# _all_ instance ids passed in be valid.
instances = {}
if instance_ids:
for ec2_id in instance_ids:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances[ec2_id] = instance
return instances
def _get_image_state(self, image):
# NOTE(vish): fallback status if image_state isn't set
state = image.get('status')
if state == 'active':
state = 'available'
return image['properties'].get('image_state', state)
def describe_availability_zones(self, context, **kwargs):
if ('zone_name' in kwargs and
'verbose' in kwargs['zone_name'] and
context.is_admin):
return self._describe_availability_zones_verbose(context,
**kwargs)
else:
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
result = []
for zone in available_zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': "available"})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = db.service_get_all(context, False)
enabled_services = availability_zones.set_availability_zones(context,
enabled_services)
zone_hosts = {}
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
result.append({'zoneName': zone,
'zoneState': "available"})
for host in zone_hosts[zone]:
result.append({'zoneName': '|- %s' % host,
'zoneState': ''})
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if service['disabled']:
active = 'disabled'
result.append({'zoneName': '| |- %s' % service['binary'],
'zoneState': ('%s %s %s'
% (active, art,
service['updated_at']))})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': "not available"})
return {'availabilityZoneInfo': result}
def describe_regions(self, context, region_name=None, **kwargs):
if CONF.region_list:
regions = []
for region in CONF.region_list:
name, _sep, host = region.partition('=')
endpoint = '%s://%s:%s%s' % (CONF.ec2_scheme,
host,
CONF.ec2_port,
CONF.ec2_path)
regions.append({'regionName': name,
'regionEndpoint': endpoint})
else:
regions = [{'regionName': 'nova',
'regionEndpoint': '%s://%s:%s%s' % (CONF.ec2_scheme,
CONF.ec2_host,
CONF.ec2_port,
CONF.ec2_path)}]
return {'regionInfo': regions}
def describe_snapshots(self,
context,
snapshot_id=None,
owner=None,
restorable_by=None,
**kwargs):
if snapshot_id:
snapshots = []
for ec2_id in snapshot_id:
internal_id = ec2utils.ec2_snap_id_to_uuid(ec2_id)
snapshot = self.volume_api.get_snapshot(
context,
snapshot_id=internal_id)
snapshots.append(snapshot)
else:
snapshots = self.volume_api.get_all_snapshots(context)
formatted_snapshots = []
for s in snapshots:
formatted = self._format_snapshot(context, s)
if formatted:
formatted_snapshots.append(formatted)
return {'snapshotSet': formatted_snapshots}
def _format_snapshot(self, context, snapshot):
# NOTE(mikal): this is just a set of strings in cinder. If they
# implement an enum, then we should move this code to use it. The
# valid ec2 statuses are "pending", "completed", and "error".
status_map = {'new': 'pending',
'creating': 'pending',
'available': 'completed',
'active': 'completed',
'deleting': 'pending',
'deleted': None,
'error': 'error'}
mapped_status = status_map.get(snapshot['status'], snapshot['status'])
if not mapped_status:
return None
s = {}
s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id'])
s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id'])
s['status'] = mapped_status
s['startTime'] = snapshot['created_at']
s['progress'] = snapshot['progress']
s['ownerId'] = snapshot['project_id']
s['volumeSize'] = snapshot['volume_size']
s['description'] = snapshot['display_description']
return s
def create_snapshot(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
volume = self.volume_api.get(context, volume_id)
args = (context, volume, kwargs.get('name'), kwargs.get('description'))
if kwargs.get('force', False):
snapshot = self.volume_api.create_snapshot_force(*args)
else:
snapshot = self.volume_api.create_snapshot(*args)
db.ec2_snapshot_create(context, snapshot['id'])
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id)
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
self.volume_api.delete_snapshot(context, snapshot)
return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(msg,
code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
# filter out the vpn keys
suffix = CONF.vpn_key_suffix
if context.is_admin or not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
try:
keypair = self.keypair_api.create_key_pair(context,
context.user_id,
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
# TODO(vish): when context is no longer an object, pass it here
def import_key_pair(self, context, key_name, public_key_material,
**kwargs):
LOG.audit(_("Import key %s"), key_name, context=context)
public_key = base64.b64decode(public_key_material)
try:
keypair = self.keypair_api.import_key_pair(context,
context.user_id,
key_name,
public_key)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise exception.EC2APIError(msg)
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
self.keypair_api.delete_key_pair(context, context.user_id,
key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
return True
def describe_security_groups(self, context, group_name=None, group_id=None,
**kwargs):
search_opts = ec2utils.search_opts_from_filters(kwargs.get('filter'))
raw_groups = self.security_group_api.list(context,
group_name,
group_id,
context.project_id,
search_opts=search_opts)
groups = [self._format_security_group(context, g) for g in raw_groups]
return {'securityGroupInfo':
list(sorted(groups,
key=lambda k: (k['ownerId'], k['groupName'])))}
def _format_security_group(self, context, group):
g = {}
g['groupDescription'] = group['description']
g['groupName'] = group['name']
g['ownerId'] = group['project_id']
g['ipPermissions'] = []
for rule in group['rules']:
r = {}
r['groups'] = []
r['ipRanges'] = []
if rule['group_id']:
if rule.get('grantee_group'):
source_group = rule['grantee_group']
r['groups'] += [{'groupName': source_group['name'],
'userId': source_group['project_id']}]
else:
# rule is not always joined with grantee_group
# for example when using quantum driver.
source_group = self.security_group_api.get(
context, id=rule['group_id'])
r['groups'] += [{'groupName': source_group.get('name'),
'userId': source_group.get('project_id')}]
if rule['protocol']:
r['ipProtocol'] = rule['protocol'].lower()
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
g['ipPermissions'] += [dict(r)]
else:
for protocol, min_port, max_port in (('icmp', -1, -1),
('tcp', 1, 65535),
('udp', 1, 65535)):
r['ipProtocol'] = protocol
r['fromPort'] = min_port
r['toPort'] = max_port
g['ipPermissions'] += [dict(r)]
else:
r['ipProtocol'] = rule['protocol']
r['fromPort'] = rule['from_port']
r['toPort'] = rule['to_port']
r['ipRanges'] += [{'cidrIp': rule['cidr']}]
g['ipPermissions'] += [r]
return g
def _rule_args_to_dict(self, context, kwargs):
rules = []
if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
return rules
if 'ip_ranges' in kwargs:
rules = self._cidr_args_split(kwargs)
else:
rules = [kwargs]
finalset = []
for rule in rules:
if 'groups' in rule:
groups_values = self._groups_args_split(rule)
for groups_value in groups_values:
final = self._rule_dict_last_step(context, **groups_value)
finalset.append(final)
else:
final = self._rule_dict_last_step(context, **rule)
finalset.append(final)
return finalset
def _cidr_args_split(self, kwargs):
cidr_args_split = []
cidrs = kwargs['ip_ranges']
for key, cidr in cidrs.iteritems():
mykwargs = kwargs.copy()
del mykwargs['ip_ranges']
mykwargs['cidr_ip'] = cidr['cidr_ip']
cidr_args_split.append(mykwargs)
return cidr_args_split
def _groups_args_split(self, kwargs):
groups_args_split = []
groups = kwargs['groups']
for key, group in groups.iteritems():
mykwargs = kwargs.copy()
del mykwargs['groups']
if 'group_name' in group:
mykwargs['source_security_group_name'] = group['group_name']
if 'user_id' in group:
mykwargs['source_security_group_owner_id'] = group['user_id']
if 'group_id' in group:
mykwargs['source_security_group_id'] = group['group_id']
groups_args_split.append(mykwargs)
return groups_args_split
def _rule_dict_last_step(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr_ip=None, user_id=None,
source_security_group_name=None,
source_security_group_owner_id=None):
if source_security_group_name:
source_project_id = self._get_source_project_id(context,
source_security_group_owner_id)
source_security_group = db.security_group_get_by_name(
context.elevated(),
source_project_id,
source_security_group_name)
notfound = exception.SecurityGroupNotFound
if not source_security_group:
raise notfound(security_group_id=source_security_group_name)
group_id = source_security_group['id']
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr_ip)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def _validate_group_identifier(self, group_name, group_id):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
def _validate_rulevalues(self, rulesvalues):
if not rulesvalues:
err = _("%s Not enough parameters to build a valid rule")
raise exception.EC2APIError(err % rulesvalues)
def _validate_security_group_protocol(self, values):
validprotocols = ['tcp', 'udp', 'icmp', '6', '17', '1']
if 'ip_protocol' in values and \
values['ip_protocol'] not in validprotocols:
protocol = values['ip_protocol']
err = _("Invalid IP protocol %(protocol)s.") % locals()
raise exception.EC2APIError(message=err, code="400")
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
rule_ids.append(self.security_group_api.rule_exists(
security_group, values_for_rule))
rule_ids = [id for id in rule_ids if id]
if rule_ids:
self.security_group_api.remove_rules(context, security_group,
rule_ids)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
self._validate_group_identifier(group_name, group_id)
security_group = self.security_group_api.get(context, group_name,
group_id)
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
self._validate_security_group_protocol(values)
rulesvalues = self._rule_args_to_dict(context, values)
self._validate_rulevalues(rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group['id']
if self.security_group_api.rule_exists(security_group,
values_for_rule):
err = _('%s - This rule already exists in group')
raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
if postvalues:
self.security_group_api.add_rules(context, security_group['id'],
security_group['name'], postvalues)
return True
raise exception.EC2APIError(_("No rule for the specified parameters."))
def _get_source_project_id(self, context, source_security_group_owner_id):
if source_security_group_owner_id:
# Parse user:project for source group.
source_parts = source_security_group_owner_id.split(':')
# If no project name specified, assume it's same as user name.
# Since we're looking up by project name, the user name is not
# used here. It's only read for EC2 API compatibility.
if len(source_parts) == 2:
source_project_id = source_parts[1]
else:
source_project_id = source_parts[0]
else:
source_project_id = context.project_id
return source_project_id
def create_security_group(self, context, group_name, group_description):
if isinstance(group_name, unicode):
group_name = group_name.encode('utf-8')
if CONF.ec2_strict_validation:
# EC2 specification gives constraints for name and description:
# Accepts alphanumeric characters, spaces, dashes, and underscores
allowed = '^[a-zA-Z0-9_\- ]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
self.security_group_api.validate_property(group_description,
'description', allowed)
else:
# Amazon accepts more symbols.
# So, allow POSIX [:print:] characters.
allowed = r'^[\x20-\x7E]+$'
self.security_group_api.validate_property(group_name, 'name',
allowed)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
err = _("Not enough parameters, need group_name or group_id")
raise exception.EC2APIError(err)
security_group = self.security_group_api.get(context, group_name,
group_id)
self.security_group_api.destroy(context, security_group)
return True
def get_password_data(self, context, instance_id, **kwargs):
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
# NOTE(vish): this should be timestamp from the metadata fields
# but it isn't important enough to implement properly
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"passwordData": output}
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
# instance_id may be passed in as a list of instances
if isinstance(instance_id, list):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
validate_ec2_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
volumes = [self._format_volume(context, v) for v in volumes]
return {'volumeSet': volumes}
def _format_volume(self, context, volume):
instance_ec2_id = None
instance_data = None
if volume.get('instance_uuid', None):
instance_uuid = volume['instance_uuid']
instance = db.instance_get_by_uuid(context.elevated(),
instance_uuid)
instance_ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
instance_data = '%s[%s]' % (instance_ec2_id,
instance['host'])
v = {}
v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id'])
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if context.is_admin:
# NOTE(dprince): project_id and host_id are unset w/ Cinder
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
volume.get('project_id', ''),
volume.get('host', ''),
instance_data,
volume['mountpoint'])
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
if volume.get('snapshot_id') is not None:
v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id'])
else:
v['snapshotId'] = None
return v
def create_volume(self, context, **kwargs):
snapshot_ec2id = kwargs.get('snapshot_id', None)
if snapshot_ec2id is not None:
snapshot_id = ec2utils.ec2_snap_id_to_uuid(kwargs['snapshot_id'])
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
LOG.audit(_("Create volume from snapshot %s"), snapshot_ec2id,
context=context)
else:
snapshot = None
LOG.audit(_("Create volume of %s GB"),
kwargs.get('size'),
context=context)
create_kwargs = dict(snapshot=snapshot,
volume_type=kwargs.get('volume_type'),
metadata=kwargs.get('metadata'),
availability_zone=kwargs.get('availability_zone'))
volume = self.volume_api.create(context,
kwargs.get('size'),
kwargs.get('name'),
kwargs.get('description'),
**create_kwargs)
db.ec2_volume_create(context, volume['id'])
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.delete(context, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Delete Failed'))
return True
def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
try:
self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Attach Failed.'))
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
if volume['instance_uuid']:
try:
return db.instance_get_by_uuid(context,
volume['instance_uuid'])
except exception.InstanceNotFound:
pass
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
instance = self._get_instance_from_volume(context, volume)
try:
self.compute_api.detach_volume(context, instance, volume)
except exception.InvalidVolume:
raise exception.EC2APIError(_('Detach Volume Failed.'))
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
'instanceId': ec2utils.id_to_ec2_inst_id(
volume['instance_uuid']),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _format_kernel_id(self, context, instance_ref, result, key):
kernel_uuid = instance_ref['kernel_id']
if kernel_uuid is None or kernel_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki')
def _format_ramdisk_id(self, context, instance_ref, result, key):
ramdisk_uuid = instance_ref['ramdisk_id']
if ramdisk_uuid is None or ramdisk_uuid == '':
return
result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid,
'ari')
def describe_instance_attribute(self, context, instance_id, attribute,
**kwargs):
def _unsupported_attribute(instance, result):
raise exception.EC2APIError(_('attribute not supported: %s') %
attribute)
def _format_attr_block_device_mapping(instance, result):
tmp = {}
self._format_instance_root_device_name(instance, tmp)
self._format_instance_bdm(context, instance['uuid'],
tmp['rootDeviceName'], result)
def _format_attr_disable_api_termination(instance, result):
result['disableApiTermination'] = instance['disable_terminate']
def _format_attr_group_set(instance, result):
CloudController._format_group_set(instance, result)
def _format_attr_instance_initiated_shutdown_behavior(instance,
result):
if instance['shutdown_terminate']:
result['instanceInitiatedShutdownBehavior'] = 'terminate'
else:
result['instanceInitiatedShutdownBehavior'] = 'stop'
def _format_attr_instance_type(instance, result):
self._format_instance_type(instance, result)
def _format_attr_kernel(instance, result):
self._format_kernel_id(context, instance, result, 'kernel')
def _format_attr_ramdisk(instance, result):
self._format_ramdisk_id(context, instance, result, 'ramdisk')
def _format_attr_root_device_name(instance, result):
self._format_instance_root_device_name(instance, result)
def _format_attr_source_dest_check(instance, result):
_unsupported_attribute(instance, result)
def _format_attr_user_data(instance, result):
result['userData'] = base64.b64decode(instance['user_data'])
attribute_formatter = {
'blockDeviceMapping': _format_attr_block_device_mapping,
'disableApiTermination': _format_attr_disable_api_termination,
'groupSet': _format_attr_group_set,
'instanceInitiatedShutdownBehavior':
_format_attr_instance_initiated_shutdown_behavior,
'instanceType': _format_attr_instance_type,
'kernel': _format_attr_kernel,
'ramdisk': _format_attr_ramdisk,
'rootDeviceName': _format_attr_root_device_name,
'sourceDestCheck': _format_attr_source_dest_check,
'userData': _format_attr_user_data,
}
fn = attribute_formatter.get(attribute)
if fn is None:
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
validate_ec2_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
result = {'instance_id': instance_id}
fn(instance, result)
return result
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
return {'reservationSet': self._format_instances(context, **kwargs)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id=reservation_id)
assert len(i) == 1
return i[0]
def _format_terminate_instances(self, context, instance_id,
previous_states):
instances_set = []
for (ec2_id, previous_state) in zip(instance_id, previous_states):
i = {}
i['instanceId'] = ec2_id
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
i['currentState'] = _state_description(vm_states.DELETED,
True)
instances_set.append(i)
return {'instancesSet': instances_set}
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
root_device_type = 'instance-store'
mapping = []
for bdm in db.block_device_mapping_get_all_by_instance(context,
instance_uuid):
volume_id = bdm['volume_id']
if (volume_id is None or bdm['no_device']):
continue
if (bdm['device_name'] == root_device_name and
(bdm['snapshot_id'] or bdm['volume_id'])):
assert not bdm['virtual_name']
root_device_type = 'ebs'
vol = self.volume_api.get(context, volume_id)
LOG.debug(_("vol = %s\n"), vol)
# TODO(yamahata): volume attach time
ebs = {'volumeId': volume_id,
'deleteOnTermination': bdm['delete_on_termination'],
'attachTime': vol['attach_time'] or '',
'status': vol['status'], }
res = {'deviceName': bdm['device_name'],
'ebs': ebs, }
mapping.append(res)
if mapping:
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or
block_device.DEFAULT_ROOT_DEV_NAME)
@staticmethod
def _format_instance_type(instance, result):
instance_type = instance_types.extract_instance_type(instance)
result['instanceType'] = instance_type['name']
@staticmethod
def _format_group_set(instance, result):
security_group_names = []
if instance.get('security_groups'):
for security_group in instance['security_groups']:
security_group_names.append(security_group['name'])
result['groupSet'] = utils.convert_to_list_dict(
security_group_names, 'groupId')
def _format_instances(self, context, instance_id=None, use_v6=False,
instances_cache=None, **search_opts):
# TODO(termie): this method is poorly named as its name does not imply
# that it will be making a variety of database calls
# rather than simply formatting a bunch of instances that
# were handed to it
reservations = {}
if not instances_cache:
instances_cache = {}
# NOTE(vish): instance_id is an optional list of ids to filter by
if instance_id:
instances = []
for ec2_id in instance_id:
if ec2_id in instances_cache:
instances.append(instances_cache[ec2_id])
else:
try:
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context,
ec2_id)
instance = self.compute_api.get(context, instance_uuid)
except exception.NotFound:
continue
instances.append(instance)
else:
try:
# always filter out deleted instances
search_opts['deleted'] = False
instances = self.compute_api.get_all(context,
search_opts=search_opts,
sort_dir='asc')
except exception.NotFound:
instances = []
for instance in instances:
if not context.is_admin:
if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
ec2_id = ec2utils.id_to_ec2_inst_id(instance_uuid)
i['instanceId'] = ec2_id
image_uuid = instance['image_ref']
i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid)
self._format_kernel_id(context, instance, i, 'kernelId')
self._format_ramdisk_id(context, instance, i, 'ramdiskId')
i['instanceState'] = _state_description(
instance['vm_state'], instance['shutdown_terminate'])
fixed_ip = None
floating_ip = None
ip_info = ec2utils.get_ip_info_for_instance(context, instance)
if ip_info['fixed_ips']:
fixed_ip = ip_info['fixed_ips'][0]
if ip_info['floating_ips']:
floating_ip = ip_info['floating_ips'][0]
if ip_info['fixed_ip6s']:
i['dnsNameV6'] = ip_info['fixed_ip6s'][0]
if CONF.ec2_private_dns_show_ip:
i['privateDnsName'] = fixed_ip
else:
i['privateDnsName'] = instance['hostname']
i['privateIpAddress'] = fixed_ip
i['publicDnsName'] = floating_ip
i['ipAddress'] = floating_ip or fixed_ip
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
i['productCodesSet'] = utils.convert_to_list_dict([],
'product_codes')
self._format_instance_type(instance, i)
i['launchTime'] = instance['created_at']
i['amiLaunchIndex'] = instance['launch_index']
self._format_instance_root_device_name(instance, i)
self._format_instance_bdm(context, instance['uuid'],
i['rootDeviceName'], i)
host = instance['host']
zone = ec2utils.get_availability_zone_by_host(host)
i['placement'] = {'availabilityZone': zone}
if instance['reservation_id'] not in reservations:
r = {}
r['reservationId'] = instance['reservation_id']
r['ownerId'] = instance['project_id']
self._format_group_set(instance, r)
r['instancesSet'] = []
reservations[instance['reservation_id']] = r
reservations[instance['reservation_id']]['instancesSet'].append(i)
return list(reservations.values())
def describe_addresses(self, context, public_ip=None, **kwargs):
if public_ip:
floatings = []
for address in public_ip:
floating = self.network_api.get_floating_ip_by_address(context,
address)
floatings.append(floating)
else:
floatings = self.network_api.get_floating_ips_by_project(context)
addresses = [self._format_address(context, f) for f in floatings]
return {'addressesSet': addresses}
def _format_address(self, context, floating_ip):
ec2_id = None
if floating_ip['fixed_ip_id']:
fixed_id = floating_ip['fixed_ip_id']
fixed = self.network_api.get_fixed_ip(context, fixed_id)
if fixed['instance_uuid'] is not None:
ec2_id = ec2utils.id_to_ec2_inst_id(fixed['instance_uuid'])
address = {'public_ip': floating_ip['address'],
'instance_id': ec2_id}
if context.is_admin:
details = "%s (%s)" % (address['instance_id'],
floating_ip['project_id'])
address['instance_id'] = details
return address
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
try:
public_ip = self.network_api.allocate_floating_ip(context)
except exception.FloatingIpLimitExceeded:
raise exception.EC2APIError(_('No more floating IPs available'))
return {'publicIp': public_ip}
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
try:
self.network_api.release_floating_ip(context, address=public_ip)
return {'return': "true"}
except exception.FloatingIpNotFound:
raise exception.EC2APIError(_('Unable to release IP Address.'))
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
if not fixed_ips:
msg = _('Unable to associate IP Address, no fixed_ips.')
raise exception.EC2APIError(msg)
# TODO(tr3buchet): this will associate the floating IP with the
# first fixed_ip an instance has. This should be
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
msg = _('multiple fixed_ips exist, using the first: %s')
LOG.warning(msg, fixed_ips[0])
try:
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
fixed_address=fixed_ips[0])
return {'return': 'true'}
except exception.FloatingIpAssociated:
msg = _('Floating ip is already associated.')
raise exception.EC2APIError(msg)
except exception.NoFloatingIpInterface:
msg = _('l3driver call to add floating ip failed.')
raise exception.EC2APIError(msg)
except Exception:
msg = _('Error, unable to associate floating ip.')
LOG.exception(msg)
raise exception.EC2APIError(msg)
def disassociate_address(self, context, public_ip, **kwargs):
instance_id = self.network_api.get_instance_id_by_floating_address(
context, public_ip)
instance = self.compute_api.get(context, instance_id)
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
try:
self.network_api.disassociate_floating_ip(context, instance,
address=public_ip)
except exception.FloatingIpNotAssociated:
msg = _('Floating ip is not associated.')
raise exception.EC2APIError(msg)
except exception.CannotDisassociateAutoAssignedFloatingIP:
msg = _('Cannot disassociate auto assigned floating ip')
raise exception.EC2APIError(msg)
return {'return': "true"}
def run_instances(self, context, **kwargs):
min_count = int(kwargs.get('min_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
kernel['id'])
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context,
ramdisk['id'])
for bdm in kwargs.get('block_device_mapping', []):
_parse_block_device_mapping(bdm)
image = self._get_image(context, kwargs['image_id'])
image_uuid = ec2utils.id_to_glance_id(context, image['id'])
if image:
image_state = self._get_image_state(image)
else:
raise exception.ImageNotFoundEC2(image_id=kwargs['image_id'])
if image_state != 'available':
raise exception.EC2APIError(_('Image must be available'))
(instances, resv_id) = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
max_count=int(kwargs.get('max_count', min_count)),
min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
user_data=kwargs.get('user_data'),
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
block_device_mapping=kwargs.get('block_device_mapping', {}))
return self._format_run_instances(context, resv_id)
def _ec2_ids_to_instances(self, context, instance_id):
"""Get all instances first, to prevent partial executions."""
instances = []
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
instances.append(instance)
return instances
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
instance_id is a kwarg so its name cannot be modified."""
previous_states = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start terminating instances"))
for instance in previous_states:
self.compute_api.delete(context, instance)
return self._format_terminate_instances(context,
instance_id,
previous_states)
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids."""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for instance in instances:
self.compute_api.reboot(context, instance, 'HARD')
return True
def stop_instances(self, context, instance_id, **kwargs):
"""Stop each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to stop instances"))
for instance in instances:
self.compute_api.stop(context, instance)
return True
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
Here instance_id is a list of instance ids"""
instances = self._ec2_ids_to_instances(context, instance_id)
LOG.debug(_("Going to start instances"))
for instance in instances:
self.compute_api.start(context, instance)
return True
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
filters = {'name': ec2_id}
images = self.image_service.detail(context, filters=filters)
try:
return images[0]
except IndexError:
raise exception.ImageNotFound(image_id=ec2_id)
image_type = ec2_id.split('-')[0]
if ec2utils.image_type(image.get('container_format')) != image_type:
raise exception.ImageNotFound(image_id=ec2_id)
return image
def _format_image(self, image):
"""Convert from format defined by GlanceImageService to S3 format."""
i = {}
image_type = ec2utils.image_type(image.get('container_format'))
ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image.get('owner')
img_loc = image['properties'].get('image_location')
if img_loc:
i['imageLocation'] = img_loc
else:
i['imageLocation'] = "%s (%s)" % (img_loc, name)
i['name'] = name
if not name and img_loc:
# This should only occur for images registered with ec2 api
# prior to that api populating the glance name
i['name'] = img_loc
i['imageState'] = self._get_image_state(image)
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
'ari': 'ramdisk',
'ami': 'machine'}
i['imageType'] = display_mapping.get(image_type)
i['isPublic'] = not not image.get('is_public')
i['architecture'] = image['properties'].get('architecture')
properties = image['properties']
root_device_name = block_device.properties_root_device_name(properties)
root_device_type = 'instance-store'
for bdm in properties.get('block_device_mapping', []):
if (block_device.strip_dev(bdm.get('device_name')) ==
block_device.strip_dev(root_device_name) and
('snapshot_id' in bdm or 'volume_id' in bdm) and
not bdm.get('no_device')):
root_device_type = 'ebs'
i['rootDeviceName'] = (root_device_name or
block_device.DEFAULT_ROOT_DEV_NAME)
i['rootDeviceType'] = root_device_type
_format_mappings(properties, i)
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
if image_id:
images = []
for ec2_id in image_id:
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
image = self._get_image(context, image_id)
internal_id = image['id']
self.image_service.delete(context, internal_id)
return True
def _register_image(self, context, metadata):
image = self.image_service.create(context, metadata)
image_type = ec2utils.image_type(image.get('container_format'))
image_id = ec2utils.image_ec2_id(image['id'], image_type)
return image_id
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and kwargs.get('name'):
image_location = kwargs['name']
if image_location is None:
raise exception.EC2APIError(_('imageLocation is required'))
metadata = {'properties': {'image_location': image_location}}
if kwargs.get('name'):
metadata['name'] = kwargs['name']
else:
metadata['name'] = image_location
if 'root_device_name' in kwargs:
metadata['properties']['root_device_name'] = kwargs.get(
'root_device_name')
mappings = [_parse_block_device_mapping(bdm) for bdm in
kwargs.get('block_device_mapping', [])]
if mappings:
metadata['properties']['block_device_mapping'] = mappings
image_id = self._register_image(context, metadata)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
def _block_device_mapping_attribute(image, result):
_format_mappings(image['properties'], result)
def _launch_permission_attribute(image, result):
result['launchPermission'] = []
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
def _root_device_name_attribute(image, result):
_prop_root_dev_name = block_device.properties_root_device_name
result['rootDeviceName'] = _prop_root_dev_name(image['properties'])
if result['rootDeviceName'] is None:
result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME
def _kernel_attribute(image, result):
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
result['kernel'] = {
'value': ec2utils.image_ec2_id(kernel_id, 'aki')
}
def _ramdisk_attribute(image, result):
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
result['ramdisk'] = {
'value': ec2utils.image_ec2_id(ramdisk_id, 'ari')
}
supported_attributes = {
'blockDeviceMapping': _block_device_mapping_attribute,
'launchPermission': _launch_permission_attribute,
'rootDeviceName': _root_device_name_attribute,
'kernel': _kernel_attribute,
'ramdisk': _ramdisk_attribute,
}
fn = supported_attributes.get(attribute)
if fn is None:
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id}
fn(image, result)
return result
def modify_image_attribute(self, context, image_id, attribute,
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
if 'user_group' not in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
try:
image = self._get_image(context, image_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
image['is_public'] = (operation_type == 'add')
try:
return self.image_service.update(context, internal_id, image)
except exception.ImageNotAuthorized:
msg = _('Not allowed to modify attributes for image %s')
raise exception.EC2APIError(msg % image_id)
def update_image(self, context, image_id, **kwargs):
internal_id = ec2utils.ec2_id_to_id(image_id)
result = self.image_service.update(context, internal_id, dict(kwargs))
return result
# TODO(yamahata): race condition
# At the moment there is no way to prevent others from
# manipulating instances/volumes/snapshots.
# As other code doesn't take it into consideration, here we don't
# care of it for now. Ostrich algorithm
def create_image(self, context, instance_id, **kwargs):
# NOTE(yamahata): name/description are ignored by register_image(),
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid)
bdms = self.compute_api.get_instance_bdms(context, instance)
# CreateImage only supported for the analogue of EBS-backed instances
if not self.compute_api.is_volume_backed_instance(context, instance,
bdms):
root = instance['root_device_name']
msg = _("Invalid value '%(ec2_instance_id)s' for instanceId. "
"Instance does not have a volume attached at root "
"(%(root)s)") % locals()
raise exception.InvalidParameterValue(err=msg)
# stop the instance if necessary
restart_instance = False
if not no_reboot:
vm_state = instance['vm_state']
# if the instance is in subtle state, refuse to proceed.
if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED):
raise exception.InstanceNotRunning(instance_id=ec2_instance_id)
if vm_state == vm_states.ACTIVE:
restart_instance = True
self.compute_api.stop(context, instance)
# wait instance for really stopped
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
instance = self.compute_api.get(context, instance_uuid)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
# Or is there any better way?
timeout = 1 * 60 * 60
if time.time() > start_time + timeout:
raise exception.EC2APIError(
_('Couldn\'t stop instance with in %d sec') % timeout)
glance_uuid = instance['image_ref']
ec2_image_id = ec2utils.glance_id_to_ec2_id(context, glance_uuid)
src_image = self._get_image(context, ec2_image_id)
image_meta = dict(src_image)
def _unmap_id_property(properties, name):
if properties[name]:
properties[name] = ec2utils.id_to_glance_id(context,
properties[name])
# ensure the ID properties are unmapped back to the glance UUID
_unmap_id_property(image_meta['properties'], 'kernel_id')
_unmap_id_property(image_meta['properties'], 'ramdisk_id')
# meaningful image name
name_map = dict(instance=instance['uuid'], now=timeutils.isotime())
name = name or _('image of %(instance)s at %(now)s') % name_map
new_image = self.compute_api.snapshot_volume_backed(context,
instance,
image_meta,
name)
ec2_id = ec2utils.glance_id_to_ec2_id(context, new_image['id'])
if restart_instance:
self.compute_api.start(context, instance)
return {'imageId': ec2_id}
class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.InvalidGroup(reason=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
if decoding_exception:
raise decoding_exception
else:
raise exception.EC2APIError(_("Invalid CIDR"))
@staticmethod
def raise_over_quota(msg):
raise exception.EC2APIError(message=msg)
@staticmethod
def raise_not_found(msg):
pass
class CloudSecurityGroupNovaAPI(EC2SecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class CloudSecurityGroupQuantumAPI(EC2SecurityGroupExceptions,
quantum_driver.SecurityGroupAPI):
pass
def get_cloud_security_group_api():
if cfg.CONF.security_group_api.lower() == 'nova':
return CloudSecurityGroupNovaAPI()
elif cfg.CONF.security_group_api.lower() == 'quantum':
return CloudSecurityGroupQuantumAPI()
else:
raise NotImplementedError()
| {
"content_hash": "9d7dfa6f83a502999993e9c920e4576b",
"timestamp": "",
"source": "github",
"line_count": 1708,
"max_line_length": 79,
"avg_line_length": 43.290983606557376,
"alnum_prop": 0.5494515897810417,
"repo_name": "gspilio/nova",
"id": "0d06dde33db916e6c0563c087404b4b2f9063e12",
"size": "74718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/ec2/cloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9099328"
},
{
"name": "Shell",
"bytes": "17117"
}
],
"symlink_target": ""
} |
"""
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| {
"content_hash": "e3cc12d11a5399624fe9e3fc9978ae06",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 75,
"avg_line_length": 26.925925925925927,
"alnum_prop": 0.6263182026593306,
"repo_name": "DailyActie/Surrogate-Model",
"id": "d9c64cdc37c1e64f7135543ef2eb9fe0db6e537e",
"size": "2181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scikit-learn-master/examples/linear_model/plot_sgd_iris.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
"""Support for Tasmota sensors."""
from __future__ import annotations
from datetime import datetime
from typing import Any
from hatasmota import const as hc, sensor as tasmota_sensor, status_sensor
from hatasmota.entity import TasmotaEntity as HATasmotaEntity
from hatasmota.models import DiscoveryHashType
from homeassistant.components import sensor
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
LENGTH_CENTIMETERS,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_VOLT_AMPERE,
POWER_WATT,
PRESSURE_HPA,
SIGNAL_STRENGTH_DECIBELS,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
SPEED_KILOMETERS_PER_HOUR,
SPEED_METERS_PER_SECOND,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMP_KELVIN,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DATA_REMOVE_DISCOVER_COMPONENT
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEVICE_CLASS = "device_class"
STATE_CLASS = "state_class"
ICON = "icon"
# A Tasmota sensor type may be mapped to either a device class or an icon, not both
SENSOR_DEVICE_CLASS_ICON_MAP: dict[str, dict[str, Any]] = {
hc.SENSOR_AMBIENT: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_APPARENT_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_BATTERY: {
DEVICE_CLASS: SensorDeviceClass.BATTERY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CCT: {
ICON: "mdi:temperature-kelvin",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_CO2: {
DEVICE_CLASS: SensorDeviceClass.CO2,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_COLOR_BLUE: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_GREEN: {ICON: "mdi:palette"},
hc.SENSOR_COLOR_RED: {ICON: "mdi:palette"},
hc.SENSOR_CURRENT: {
ICON: "mdi:alpha-a-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DEWPOINT: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
ICON: "mdi:weather-rainy",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_DISTANCE: {
ICON: "mdi:leak",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ECO2: {ICON: "mdi:molecule-co2"},
hc.SENSOR_FREQUENCY: {
DEVICE_CLASS: SensorDeviceClass.FREQUENCY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_HUMIDITY: {
DEVICE_CLASS: SensorDeviceClass.HUMIDITY,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_ILLUMINANCE: {
DEVICE_CLASS: SensorDeviceClass.ILLUMINANCE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_IP: {ICON: "mdi:ip-network"},
hc.SENSOR_STATUS_LINK_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_MOISTURE: {ICON: "mdi:cup-water"},
hc.SENSOR_STATUS_MQTT_COUNT: {ICON: "mdi:counter"},
hc.SENSOR_PB0_3: {ICON: "mdi:flask"},
hc.SENSOR_PB0_5: {ICON: "mdi:flask"},
hc.SENSOR_PB10: {ICON: "mdi:flask"},
hc.SENSOR_PB1: {ICON: "mdi:flask"},
hc.SENSOR_PB2_5: {ICON: "mdi:flask"},
hc.SENSOR_PB5: {ICON: "mdi:flask"},
hc.SENSOR_PM10: {
DEVICE_CLASS: SensorDeviceClass.PM10,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM1: {
DEVICE_CLASS: SensorDeviceClass.PM1,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PM2_5: {
DEVICE_CLASS: SensorDeviceClass.PM25,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERFACTOR: {
ICON: "mdi:alpha-f-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_POWERUSAGE: {
DEVICE_CLASS: SensorDeviceClass.POWER,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSURE: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PRESSUREATSEALEVEL: {
DEVICE_CLASS: SensorDeviceClass.PRESSURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_PROXIMITY: {ICON: "mdi:ruler"},
hc.SENSOR_REACTIVE_POWERUSAGE: {
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_LAST_RESTART_TIME: {DEVICE_CLASS: SensorDeviceClass.TIMESTAMP},
hc.SENSOR_STATUS_RESTART_REASON: {ICON: "mdi:information-outline"},
hc.SENSOR_STATUS_SIGNAL: {
DEVICE_CLASS: SensorDeviceClass.SIGNAL_STRENGTH,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_RSSI: {
ICON: "mdi:access-point",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_STATUS_SSID: {ICON: "mdi:access-point-network"},
hc.SENSOR_TEMPERATURE: {
DEVICE_CLASS: SensorDeviceClass.TEMPERATURE,
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_TODAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
hc.SENSOR_TOTAL: {
DEVICE_CLASS: SensorDeviceClass.ENERGY,
STATE_CLASS: SensorStateClass.TOTAL_INCREASING,
},
hc.SENSOR_TOTAL_START_TIME: {ICON: "mdi:progress-clock"},
hc.SENSOR_TVOC: {ICON: "mdi:air-filter"},
hc.SENSOR_VOLTAGE: {
ICON: "mdi:alpha-v-circle-outline",
STATE_CLASS: SensorStateClass.MEASUREMENT,
},
hc.SENSOR_WEIGHT: {ICON: "mdi:scale", STATE_CLASS: SensorStateClass.MEASUREMENT},
hc.SENSOR_YESTERDAY: {DEVICE_CLASS: SensorDeviceClass.ENERGY},
}
SENSOR_UNIT_MAP = {
hc.CONCENTRATION_MICROGRAMS_PER_CUBIC_METER: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
hc.CONCENTRATION_PARTS_PER_BILLION: CONCENTRATION_PARTS_PER_BILLION,
hc.CONCENTRATION_PARTS_PER_MILLION: CONCENTRATION_PARTS_PER_MILLION,
hc.ELECTRICAL_CURRENT_AMPERE: ELECTRIC_CURRENT_AMPERE,
hc.ELECTRICAL_VOLT_AMPERE: POWER_VOLT_AMPERE,
hc.ENERGY_KILO_WATT_HOUR: ENERGY_KILO_WATT_HOUR,
hc.FREQUENCY_HERTZ: FREQUENCY_HERTZ,
hc.LENGTH_CENTIMETERS: LENGTH_CENTIMETERS,
hc.LIGHT_LUX: LIGHT_LUX,
hc.MASS_KILOGRAMS: MASS_KILOGRAMS,
hc.PERCENTAGE: PERCENTAGE,
hc.POWER_WATT: POWER_WATT,
hc.PRESSURE_HPA: PRESSURE_HPA,
hc.SIGNAL_STRENGTH_DECIBELS: SIGNAL_STRENGTH_DECIBELS,
hc.SIGNAL_STRENGTH_DECIBELS_MILLIWATT: SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
hc.SPEED_KILOMETERS_PER_HOUR: SPEED_KILOMETERS_PER_HOUR,
hc.SPEED_METERS_PER_SECOND: SPEED_METERS_PER_SECOND,
hc.SPEED_MILES_PER_HOUR: SPEED_MILES_PER_HOUR,
hc.TEMP_CELSIUS: TEMP_CELSIUS,
hc.TEMP_FAHRENHEIT: TEMP_FAHRENHEIT,
hc.TEMP_KELVIN: TEMP_KELVIN,
hc.VOLT: ELECTRIC_POTENTIAL_VOLT,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Tasmota sensor dynamically through discovery."""
@callback
def async_discover(
tasmota_entity: HATasmotaEntity, discovery_hash: DiscoveryHashType
) -> None:
"""Discover and add a Tasmota sensor."""
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(sensor.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(sensor.DOMAIN),
async_discover,
)
class TasmotaSensor(TasmotaAvailability, TasmotaDiscoveryUpdate, SensorEntity):
"""Representation of a Tasmota sensor."""
_tasmota_entity: tasmota_sensor.TasmotaSensor
def __init__(self, **kwds: Any) -> None:
"""Initialize the Tasmota sensor."""
self._state: Any | None = None
self._state_timestamp: datetime | None = None
super().__init__(
**kwds,
)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.sensor_state_updated)
await super().async_added_to_hass()
@callback
def sensor_state_updated(self, state: Any, **kwargs: Any) -> None:
"""Handle state updates."""
if self.device_class == SensorDeviceClass.TIMESTAMP:
self._state_timestamp = state
else:
self._state = state
self.async_write_ha_state()
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(STATE_CLASS)
@property
def entity_category(self) -> str | None:
"""Return the category of the entity, if any."""
if self._tasmota_entity.quantity in status_sensor.SENSORS:
return EntityCategory.DIAGNOSTIC
return None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# Hide fast changing status sensors
if self._tasmota_entity.quantity in (
hc.SENSOR_STATUS_IP,
hc.SENSOR_STATUS_RSSI,
hc.SENSOR_STATUS_SIGNAL,
hc.SENSOR_STATUS_VERSION,
):
return False
return True
@property
def icon(self) -> str | None:
"""Return the icon."""
class_or_icon = SENSOR_DEVICE_CLASS_ICON_MAP.get(
self._tasmota_entity.quantity, {}
)
return class_or_icon.get(ICON)
@property
def native_value(self) -> datetime | str | None:
"""Return the state of the entity."""
if self._state_timestamp and self.device_class == SensorDeviceClass.TIMESTAMP:
return self._state_timestamp
return self._state
@property
def force_update(self) -> bool:
"""Force update."""
return True
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit this state is expressed in."""
return SENSOR_UNIT_MAP.get(self._tasmota_entity.unit, self._tasmota_entity.unit)
| {
"content_hash": "ba468a8f4499be197573d5dc23748c2b",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 93,
"avg_line_length": 34.59561128526646,
"alnum_prop": 0.6629213483146067,
"repo_name": "home-assistant/home-assistant",
"id": "ece45b9dfd75a1e501484cf5dfa0b457b689f138",
"size": "11036",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tasmota/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import ConfigParser
import os
class OccamConfig:
def __init__(self, fn=None):
self._cfg = ConfigParser.SafeConfigParser(allow_no_value=True)
if not (fn is None):
self._cfg.read(fn)
def getRoot(self):
if self._cfg.has_section('fs') and self._cfg.has_option('fs', 'root'):
return self._cfg.get('fs','root')
return "/"
def getPath(self):
if self._cfg.has_option('paths', 'path'):
return self._cfg.get('paths','path')
return os.getenv('PATH')
def getLlvmLibSearchPath(self):
if self._cfg.has_option('paths', 'llvm-lib-search'):
return self._cfg.get('paths','llvm-lib-search')
return os.getenv('LLVM_LIB_SEARCH_PATH')
def getLibSearchPath(self):
if self._cfg.has_option('paths', 'lib-search'):
return self._cfg.get('paths','lib-search')
return os.getenv('LIB_SEARCH')
def allVars(self):
# TODO
return []
CONFIG = None
FOUND_CONFIG = False
def foundConfig():
return FOUND_CONFIG
def defaultConfig():
return OccamConfig()
def getConfig():
global CONFIG
if not (CONFIG is None):
return CONFIG
FOUND_CONFIG = True
p = os.getcwd()
while p != '/':
if os.path.exists(os.path.join(p, '.occam')):
CONFIG = OccamConfig(os.path.join(p, '.occam'))
return CONFIG
p = os.path.dirname(p)
p = os.path.join(os.getenv('HOME'), '.occam')
if os.path.exists(p):
CONFIG = OccamConfig(p)
return CONFIG
CONFIG = defaultConfig()
FOUND_CONFIG = False
return CONFIG
| {
"content_hash": "c4e696298c1273f9fe3fc2649529c292",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 26.9672131147541,
"alnum_prop": 0.5781155015197569,
"repo_name": "Wajihulhassan/SelfContainedPrevirt",
"id": "1664c7e0bce2c4fbc70342fea7db33c2b280d6a7",
"size": "3366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/occam/occam/configuration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7600"
},
{
"name": "C++",
"bytes": "250635"
},
{
"name": "Shell",
"bytes": "2442"
}
],
"symlink_target": ""
} |
"""
"""
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
from xml.sax.saxutils import handler, quoteattr, escape
from urlparse import urljoin, urldefrag
from rdflib.namespace import RDF
from rdflib.term import URIRef
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.exceptions import ParserError, Error
from rdflib.syntax.xml_names import is_ncname
from rdflib.syntax.parsers import Parser
RDFNS = RDF
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to there qualified version.
UNQUALIFIED = {"about" : RDF.about,
"ID" : RDF.ID,
"type" : RDF.type,
"resource": RDF.resource,
"parseType": RDF.parseType}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
CORE_SYNTAX_TERMS = [RDF.RDF, RDF.ID, RDF.about, RDF.parseType, RDF.resource, RDF.nodeID, RDF.datatype]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
# http://www.w3.org/TR/rdf-syntax-grammar/#oldTerms
OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEach"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEachPrefix"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID")]
NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li,] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description,] + OLD_TERMS
PROPERTY_ATTRIBUTE_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
PROPERTY_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.resource, RDF.nodeID]
XMLNS = "http://www.w3.org/XML/1998/namespace"
BASE = (XMLNS, "base")
LANG = (XMLNS, "lang")
class BagID(URIRef):
__slots__ = ['li']
def __init__(self, val):
super(URIRef, self).__init__(val)
self.li = 0
def next_li(self):
self.li += 1
return RDFNS[self.li]
class ElementHandler(object):
__slots__ = ['start', 'char', 'end', 'li', 'id',
'base', 'subject', 'predicate', 'object',
'list', 'language', 'datatype', 'declared', 'data']
def __init__(self):
self.start = None
self.char = None
self.end = None
self.li = 0
self.id = None
self.base = None
self.subject = None
self.object = None
self.list = None
self.language = None
self.datatype = None
self.declared = None
self.data = None
def next_li(self):
self.li += 1
return RDFNS[self.li]
class RDFXMLHandler(handler.ContentHandler):
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
self.reset()
def reset(self):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
self.stack = [None, document_element,]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
# ContentHandler methods
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
pass
def startPrefixMapping(self, prefix, namespace):
self._ns_contexts.append(self._current_context.copy())
self._current_context[namespace] = prefix
self.store.bind(prefix, URIRef(namespace), override=False)
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElementNS(self, name, qname, attrs):
stack = self.stack
stack.append(ElementHandler())
current = self.current
parent = self.parent
base = attrs.get(BASE, None)
if base is not None:
base, frag = urldefrag(base)
if parent and parent.base:
base = urljoin(parent.base, base)
else:
systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base = urljoin(systemId, base)
else:
if parent:
base = parent.base
if base is None:
systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
language = attrs.get(LANG, None)
if language is None:
if parent:
language = parent.language
current.language = language
current.start(name, qname, attrs)
def endElementNS(self, name, qname):
self.current.end(name, qname)
self.stack.pop()
def characters(self, content):
char = self.current.char
if char:
char(content)
def ignorableWhitespace(self, content):
pass
def processingInstruction(self, target, data):
pass
def add_reified(self, sid, (s, p, o)):
self.store.add((sid, RDF.type, RDF.Statement))
self.store.add((sid, RDF.subject, s))
self.store.add((sid, RDF.predicate, p))
self.store.add((sid, RDF.object, o))
def error(self, message):
locator = self.locator
info = "%s:%s:%s: " % (locator.getSystemId(),
locator.getLineNumber(), locator.getColumnNumber())
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
def absolutize(self, uri):
result = urljoin(self.current.base, uri, allow_fragments=1)
if uri and uri[-1]=="#" and result[-1]!="#":
result = "%s#" % result
return URIRef(result)
def convert(self, name, qname, attrs):
if name[0] is None:
name = URIRef(name[1])
else:
name = URIRef("".join(name))
atts = {}
for (n, v) in attrs.items(): #attrs._attrs.iteritems(): #
if n[0] is None:
att = URIRef(n[1])
else:
att = URIRef("".join(n))
if att.startswith(XMLNS) or att[0:3].lower()=="xml":
pass
elif att in UNQUALIFIED:
#if not RDFNS[att] in atts:
atts[RDFNS[att]] = v
else:
atts[URIRef(att)] = v
return name, atts
def document_element_start(self, name, qname, attrs):
if name[0] and URIRef("".join(name)) == RDF.RDF:
next = self.next
next.start = self.node_element_start
next.end = self.node_element_end
else:
self.node_element_start(name, qname, attrs)
#self.current.end = self.node_element_end
# TODO... set end to something that sets start such that
# another element will cause error
def node_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
next = self.next
next.start = self.property_element_start
next.end = self.property_element_end
if name in NODE_ELEMENT_EXCEPTIONS:
self.error("Invalid node element URI: %s" % name)
if RDF.ID in atts:
if RDF.about in atts or RDF.nodeID in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
id = atts[RDF.ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
self.error("two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif RDF.nodeID in atts:
if RDF.ID in atts or RDF.about in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
nodeID = atts[RDF.nodeID]
if not is_ncname(nodeID):
self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
subject = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
else:
subject = BNode(nodeID)
elif RDF.about in atts:
if RDF.ID in atts or RDF.nodeID in atts:
self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
subject = absolutize(atts[RDF.about])
else:
subject = BNode()
if name!=RDF.Description: # S1
self.store.add((subject, RDF.type, absolutize(name)))
language = current.language
for att in atts:
if not att.startswith(str(RDFNS)):
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error, e:
self.error(e.msg)
elif att==RDF.type: #S2
predicate = RDF.type
object = absolutize(atts[RDF.type])
elif att in NODE_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS: #S3
self.error("Invalid property attribute URI: %s" % att)
continue # for when error does not throw an exception
else:
predicate = absolutize(att)
try:
object = Literal(atts[att], language)
except Error, e:
self.error(e.msg)
self.store.add((subject, predicate, object))
current.subject = subject
def node_element_end(self, name, qname):
self.parent.object = self.current.subject
def property_element_start(self, name, qname, attrs):
name, atts = self.convert(name, qname, attrs)
current = self.current
absolutize = self.absolutize
next = self.next
object = None
current.data = None
current.list = None
if not name.startswith(str(RDFNS)):
current.predicate = absolutize(name)
elif name==RDF.li:
current.predicate = current.next_li()
elif name in PROPERTY_ELEMENT_EXCEPTIONS:
self.error("Invalid property element URI: %s" % name)
else:
current.predicate = absolutize(name)
id = atts.get(RDF.ID, None)
if id is not None:
if not is_ncname(id):
self.error("rdf:ID value is not a value NCName: %s" % id)
current.id = absolutize("#%s" % id)
else:
current.id = None
resource = atts.get(RDF.resource, None)
nodeID = atts.get(RDF.nodeID, None)
parse_type = atts.get(RDF.parseType, None)
if resource is not None and nodeID is not None:
self.error("Property element cannot have both rdf:nodeID and rdf:resource")
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
object = self.bnode[nodeID]
else:
subject = BNode()
self.bnode[nodeID] = subject
object = subject
else:
object = subject = BNode(nodeID)
next.start = self.node_element_start
next.end = self.node_element_end
else:
if parse_type is not None:
for att in atts:
if att!=RDF.parseType and att!=RDF.ID:
self.error("Property attr '%s' now allowed here" % att)
if parse_type=="Resource":
current.subject = object = BNode()
current.char = self.property_element_char
next.start = self.property_element_start
next.end = self.property_element_end
elif parse_type=="Collection":
current.char = None
object = current.list = RDF.nil #BNode()#self.parent.subject
next.start = self.node_element_start
next.end = self.list_node_element_end
else: #if parse_type=="Literal":
# All other values are treated as Literal
# See: http://www.w3.org/TR/rdf-syntax-grammar/#parseTypeOtherPropertyElt
object = Literal("", datatype=RDF.XMLLiteral)
current.char = self.literal_element_char
current.declared = {}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
current.object = object
return
else:
object = None
current.char = self.property_element_char
next.start = self.node_element_start
next.end = self.node_element_end
datatype = current.datatype = atts.get(RDF.datatype, None)
language = current.language
if datatype is not None:
# TODO: check that there are no atts other than datatype and id
datatype = absolutize(datatype)
else:
for att in atts:
if not att.startswith(str(RDFNS)):
predicate = absolutize(att)
elif att in PROPERTY_ELEMENT_ATTRIBUTES:
continue
elif att in PROPERTY_ATTRIBUTE_EXCEPTIONS:
self.error("""Invalid property attribute URI: %s""" % att)
else:
predicate = absolutize(att)
if att==RDF.type:
o = URIRef(atts[att])
else:
if datatype is not None:
language = None
o = Literal(atts[att], language, datatype)
if object is None:
object = BNode()
self.store.add((object, predicate, o))
if object is None:
current.data = ""
current.object = None
else:
current.data = None
current.object = object
def property_element_char(self, data):
current = self.current
if current.data is not None:
current.data += data
def property_element_end(self, name, qname):
current = self.current
if current.data is not None and current.object is None:
literalLang = current.language
if current.datatype is not None:
literalLang = None
current.object = Literal(current.data, literalLang, current.datatype)
current.data = None
if self.next.end==self.list_node_element_end:
if current.object!=RDF.nil:
self.store.add((current.list, RDF.rest, RDF.nil))
if current.object is not None:
self.store.add((self.parent.subject, current.predicate, current.object))
if current.id is not None:
self.add_reified(current.id, (self.parent.subject,
current.predicate, current.object))
current.subject = None
def list_node_element_end(self, name, qname):
current = self.current
if self.parent.list==RDF.nil:
list = BNode()
# Removed between 20030123 and 20030905
#self.store.add((list, RDF.type, LIST))
self.parent.list = list
self.store.add((self.parent.list, RDF.first, current.subject))
self.parent.object = list
self.parent.char = None
else:
list = BNode()
# Removed between 20030123 and 20030905
#self.store.add((list, RDF.type, LIST))
self.store.add((self.parent.list, RDF.rest, list))
self.store.add((list, RDF.first, current.subject))
self.parent.list = list
def literal_element_start(self, name, qname, attrs):
current = self.current
self.next.start = self.literal_element_start
self.next.char = self.literal_element_char
self.next.end = self.literal_element_end
current.declared = self.parent.declared.copy()
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
current.object = "<%s:%s" % (prefix, name[1])
else:
current.object = "<%s" % name[1]
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
else:
current.object += (' xmlns="%s"' % name[0])
else:
current.object = "<%s" % name[1]
for (name, value) in attrs.items():
if name[0]:
if not name[0] in current.declared:
current.declared[name[0]] = self._current_context[name[0]]
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
current.object += (' %s=%s' % (name, quoteattr(value)))
current.object += ">"
def literal_element_char(self, data):
self.current.object += escape(data)
def literal_element_end(self, name, qname):
if name[0]:
prefix = self._current_context[name[0]]
if prefix:
end = u"</%s:%s>" % (prefix, name[1])
else:
end = u"</%s>" % name[1]
else:
end = u"</%s>" % name[1]
self.parent.object += self.current.object + end
def create_parser(target, store):
parser = make_parser()
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
rdfxml = RDFXMLHandler(store)
rdfxml.setDocumentLocator(target)
#rdfxml.setDocumentLocator(_Locator(self.url, self.parser))
parser.setContentHandler(rdfxml)
parser.setErrorHandler(ErrorHandler())
return parser
class RDFXMLParser(Parser):
def __init__(self):
pass
def parse(self, source, sink, **args):
self._parser = create_parser(source, sink)
content_handler = self._parser.getContentHandler()
preserve_bnode_ids = args.get("preserve_bnode_ids", None)
if preserve_bnode_ids is not None:
content_handler.preserve_bnode_ids = preserve_bnode_ids
# We're only using it once now
#content_handler.reset()
#self._parser.reset()
self._parser.parse(source)
| {
"content_hash": "40b54c779519269791ec14a6fc52d5b7",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 103,
"avg_line_length": 36.98526703499079,
"alnum_prop": 0.5575362246676293,
"repo_name": "alcides/rdflib",
"id": "6376f594119067c73ba98a3308faafbf419ff156",
"size": "21636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdflib/syntax/parsers/rdfxml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47529"
},
{
"name": "Python",
"bytes": "1477729"
}
],
"symlink_target": ""
} |
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ
from zerver.models import UserProfile
from zerver.lib.validator import check_int
from zerver.lib.response import json_success
from zerver.lib.attachments import user_attachments, remove_attachment, \
access_attachment_by_id
def list_by_user(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({"attachments": user_attachments(user_profile)})
def remove(request, user_profile, attachment_id=REQ(validator=check_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
attachment = access_attachment_by_id(user_profile, attachment_id,
needs_owner=True)
remove_attachment(user_profile, attachment)
return json_success()
| {
"content_hash": "832059d45f8ed1c26fc3ce456ba1ed8a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 39.19047619047619,
"alnum_prop": 0.732685297691373,
"repo_name": "amanharitsh123/zulip",
"id": "c4f0d598d68d5ebe79ff29815aa699b6f5edb4e3",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/views/attachments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432211"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "494378"
},
{
"name": "JavaScript",
"bytes": "2167185"
},
{
"name": "Nginx",
"bytes": "1485"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86921"
},
{
"name": "Python",
"bytes": "3792729"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "61752"
}
],
"symlink_target": ""
} |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F
from django.db.models.fields import AutoField
from django.db.models.functions import Trunc
from django.db.models.query_utils import (
InvalidQuery, Q, check_rel_lookup_compatibility,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<QuerySet %r>' % data
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_ids_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
ids = self._batched_insert(objs_without_pk, fields, batch_size)
if connection.features.can_return_ids_from_bulk_insert:
assert len(ids) == len(objs_without_pk)
for obj_without_pk, pk in zip(objs_without_pk, ids):
obj_without_pk.pk = pk
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
if param != 'pk': # It's okay to use a model's pk property.
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query._annotations = None
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
def _values(self, *fields, **expressions):
clone = self._clone()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
_fields = []
expressions = {}
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id = str(id(field))
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False, skip_locked=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_ids = []
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if connections[self.db].features.can_return_ids_from_bulk_insert:
inserted_id = self._insert(item, fields=fields, using=self.db, return_id=True)
if len(objs) > 1:
inserted_ids.extend(inserted_id)
if len(objs) == 1:
inserted_ids.append(inserted_id)
else:
self._insert(item, fields=fields, using=self.db)
return inserted_ids
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self, field):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
elif self.model != field.model:
# If the query is used as a subquery for a ForeignKey with non-pk
# target field, make sure to select the target field in the subquery.
foreign_fields = getattr(field, 'foreign_related_fields', ())
if len(foreign_fields) == 1 and not foreign_fields[0].primary_key:
return self.values(foreign_fields[0].name)
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and queryset._iterable_class is not ModelIterable:
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if len(model_instances) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
try:
apply_rel_filter = manager._apply_rel_filters
except AttributeError:
warnings.warn(
"The `%s.%s` class must implement a `_apply_rel_filters()` "
"method that accepts a `QuerySet` as its single "
"argument and returns an appropriately filtered version "
"of it." % (manager.__class__.__module__, manager.__class__.__name__),
RemovedInDjango20Warning,
)
qs = manager.get_queryset()
else:
qs = apply_rel_filter(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| {
"content_hash": "ed4b47871535ba07715a468c21e9f965",
"timestamp": "",
"source": "github",
"line_count": 1747,
"max_line_length": 115,
"avg_line_length": 40.66227819118489,
"alnum_prop": 0.5881723608823571,
"repo_name": "koniiiik/django",
"id": "ee59103017a6aad4fd040dbdcc24da6ac59cfdd6",
"size": "71037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12155415"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
import webbrowser
import FloatGetter
from totp import generateAuthCode
import time
from gevent import sleep
from collections import OrderedDict
from decimal import Decimal
import sys
from errno import WSAEHOSTUNREACH
from socket import error as socket_error
import itemIndex
sys.setrecursionlimit(5000)
open('log.txt', 'w').close()
def create_settings():
with open('settings.txt', 'w') as settings:
settings.write('defaultmarketcount=50\ndefaultcurrency=0\ndefaultdelay=0.7\nlogging=0\n//sharedsecret=')
class Ui_MainWindow(QtCore.QObject):
processItems = QtCore.Signal(object)
getMarketData = QtCore.Signal(object)
_login = QtCore.Signal(bool)
_worker_login = QtCore.Signal(tuple)
_disconnect_user = QtCore.Signal(bool)
init_login = QtCore.Signal(bool)
_process_single = QtCore.Signal(bool)
def __init__(self):
QtCore.QObject.__init__(self)
self.WorkerThread = WorkerThread(self)
self.t = QtCore.QThread(self, objectName='workerThread')
self.WorkerThread.moveToThread(self.t)
self.t.start()
self.PassedHere = False
self.currency = None
self.soldcount = 0
self.start = 0
self.logging = 0
self.WorkerThread.progresscount = 0
self.WorkerThread.SetStatus.connect(lambda x: self.StatusLabel.setText(x))
self.WorkerThread.progressSignal.connect(lambda x: self.progressBar_2.setProperty("value", x))
self.WorkerThread.StartEn.connect(lambda x: self.StartButton.setEnabled(x))
self.WorkerThread.StartDis.connect(lambda x: self.StartButton.setDisabled(x))
self.WorkerThread.PauseEn.connect(lambda x: self.PauseButton.setEnabled(x))
self.WorkerThread.PauseDis.connect(lambda x: self.PauseButton.setDisabled(x))
self.WorkerThread.RetrieveEn.connect(lambda x: self.RetrieveButton.setEnabled(x))
self.WorkerThread.RetrieveDis.connect(lambda x: self.RetrieveButton.setDisabled(x))
self.WorkerThread.TableSorting.connect(lambda x: self.tableWidget.setSortingEnabled(x))
self.WorkerThread.NewRow.connect(lambda x: self.tableWidget.insertRow(x))
self.WorkerThread.SetTableItem.connect(self.SetTable)
self.WorkerThread.ShowError.connect(self.showError)
self.WorkerThread.ShowInfo.connect(self.showInfo)
self.WorkerThread.SetCurrHeader.connect(self.setCurrHeader)
self.WorkerThread.display_error.connect(lambda x: self.display_error(x))
self.WorkerThread.log_event.connect(lambda x: self.logEvent(x))
self.WorkerThread.MainLogin.connect(self.login)
self.init_login.emit(True)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label_3 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setItalic(True)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setMinimumSize(QtCore.QSize(70, 0))
self.label_2.setMaximumSize(QtCore.QSize(70, 16777215))
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.DelaySpinner = QtGui.QDoubleSpinBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.DelaySpinner.sizePolicy().hasHeightForWidth())
self.DelaySpinner.setSizePolicy(sizePolicy)
self.DelaySpinner.setMaximum(20.0)
self.DelaySpinner.setSingleStep(0.05)
self.DelaySpinner.setProperty("value", 0.7)
self.DelaySpinner.setObjectName("DelaySpinner")
self.horizontalLayout.addWidget(self.DelaySpinner)
spacerItem = QtGui.QSpacerItem(5, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.PauseButton = QtGui.QPushButton(self.centralwidget)
self.PauseButton.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PauseButton.sizePolicy().hasHeightForWidth())
self.PauseButton.setSizePolicy(sizePolicy)
self.PauseButton.setMinimumSize(QtCore.QSize(150, 0))
self.PauseButton.setMaximumSize(QtCore.QSize(150, 16777215))
self.PauseButton.setObjectName("PauseButton")
self.horizontalLayout.addWidget(self.PauseButton)
self.StartButton = QtGui.QPushButton(self.centralwidget)
self.StartButton.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartButton.sizePolicy().hasHeightForWidth())
self.StartButton.setSizePolicy(sizePolicy)
self.StartButton.setMinimumSize(QtCore.QSize(150, 0))
self.StartButton.setMaximumSize(QtCore.QSize(150, 16777215))
self.StartButton.setObjectName("StartButton")
self.horizontalLayout.addWidget(self.StartButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.ClearButton = QtGui.QPushButton(self.centralwidget)
self.ClearButton.setObjectName("ClearButton")
self.horizontalLayout.addWidget(self.ClearButton)
self.gridLayout.addLayout(self.horizontalLayout, 4, 0, 1, 1)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label = QtGui.QLabel(self.centralwidget)
self.label.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(70, 0))
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
self.URLBox = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.URLBox.sizePolicy().hasHeightForWidth())
self.URLBox.setSizePolicy(sizePolicy)
self.URLBox.setMinimumSize(QtCore.QSize(400, 0))
self.URLBox.setMaximumSize(QtCore.QSize(600, 16777215))
self.URLBox.setObjectName("URLBox")
self.horizontalLayout_2.addWidget(self.URLBox)
self.CountSpinner = QtGui.QSpinBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.CountSpinner.sizePolicy().hasHeightForWidth())
self.CountSpinner.setSizePolicy(sizePolicy)
self.CountSpinner.setMinimumSize(QtCore.QSize(50, 0))
self.CountSpinner.setMaximumSize(QtCore.QSize(100, 20))
self.CountSpinner.setMinimum(1)
self.CountSpinner.setMaximum(3000)
self.CountSpinner.setProperty("value", 20)
self.CountSpinner.setObjectName("CountSpinner")
self.horizontalLayout_2.addWidget(self.CountSpinner)
self.CurrencySelector = QtGui.QComboBox(self.centralwidget)
self.CurrencySelector.setObjectName("CurrencySelector")
for currency in FloatGetter.CURRENCY:
self.CurrencySelector.addItem("")
self.horizontalLayout_2.addWidget(self.CurrencySelector)
self.RetrieveButton = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.RetrieveButton.sizePolicy().hasHeightForWidth())
self.RetrieveButton.setSizePolicy(sizePolicy)
self.RetrieveButton.setMinimumSize(QtCore.QSize(85, 0))
self.RetrieveButton.setMaximumSize(QtCore.QSize(85, 16777215))
self.RetrieveButton.setToolTip("")
self.RetrieveButton.setObjectName("RetrieveButton")
self.horizontalLayout_2.addWidget(self.RetrieveButton)
spacerItem2 = QtGui.QSpacerItem(5, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.gridLayout.addLayout(self.horizontalLayout_2, 1, 0, 1, 1)
spacerItem3 = QtGui.QSpacerItem(20, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem3, 2, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setItalic(True)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.progressBar_2 = QtGui.QProgressBar(self.centralwidget)
self.progressBar_2.setEnabled(True)
self.progressBar_2.setMaximumSize(QtCore.QSize(16777215, 15))
self.progressBar_2.setProperty("value", 0)
self.progressBar_2.setTextVisible(False)
self.progressBar_2.setInvertedAppearance(False)
self.progressBar_2.setObjectName("progressBar_2")
self.verticalLayout.addWidget(self.progressBar_2)
self.tableWidget = QtGui.QTableWidget(self.centralwidget)
self.tableWidget.setEnabled(True)
self.tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidget.setAlternatingRowColors(True)
self.tableWidget.setIconSize(QtCore.QSize(0, 0))
self.tableWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.tableWidget.setCornerButtonEnabled(True)
self.tableWidget.setColumnCount(8)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.verticalHeader().hide()
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(7, item)
self.tableWidget.horizontalHeader().setDefaultSectionSize(155)
self.tableWidget.horizontalHeader().setMinimumSectionSize(50)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.verticalLayout.addWidget(self.tableWidget)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.StatusLabel = QtGui.QLabel(self.centralwidget)
self.StatusLabel.setObjectName("label_5")
self.verticalLayout_2.addWidget(self.StatusLabel)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 867, 21))
self.menubar.setObjectName("menubar")
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOption1 = QtGui.QAction(MainWindow)
self.actionOption1.setObjectName("actionOption1")
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionExport_results_as_csv = QtGui.QAction(MainWindow)
self.actionExport_results_as_csv.setObjectName("actionExport_results_as_csv")
self.actionParseSingle = QtGui.QAction(MainWindow)
self.actionParseSingle.setObjectName("actionParseSingle")
self.changesettings = QtGui.QAction(MainWindow)
self.changesettings.setObjectName("Settings")
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.menuHelp.addAction(self.actionAbout)
self.menuFile.addAction(self.actionExport_results_as_csv)
self.menuFile.addAction(self.actionParseSingle)
self.menuFile.addAction(self.changesettings)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
while True:
try:
with open('settings.txt', 'r') as settings:
for line in settings.readlines():
if line.startswith('defaultcurrency='):
self.CurrencySelector.setCurrentIndex(int(line.replace('defaultcurrency=', '')))
continue
if line.startswith('defaultmarketcount='):
self.CountSpinner.setValue(int(line.replace('defaultmarketcount=', '')))
continue
if line.startswith('defaultdelay='):
self.DelaySpinner.setValue(eval(line.replace('defaultdelay=', '')))
continue
if line.startswith('logging='):
self.logging = int(line.replace('logging=', ''))
break
except IOError:
self.logEvent('No settings file found!', forced=True)
create_settings()
continue
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL("activated()"), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
QtCore.QObject.connect(self.actionAbout, QtCore.SIGNAL("activated()"), self.ReadMe)
QtCore.QObject.connect(self.changesettings, QtCore.SIGNAL("activated()"), self.ChangeSettings)
QtCore.QObject.connect(self.actionExport_results_as_csv, QtCore.SIGNAL("activated()"), self.ExportCSV)
QtCore.QObject.connect(self.actionParseSingle, QtCore.SIGNAL("activated()"), self.ParseSingle)
QtCore.QObject.connect(self.RetrieveButton, QtCore.SIGNAL("clicked()"), self.RetrieveItems)
QtCore.QObject.connect(self.StartButton, QtCore.SIGNAL("clicked()"), self.ProcessItems)
QtCore.QObject.connect(self.PauseButton, QtCore.SIGNAL("clicked()"), self.Pause)
QtCore.QObject.connect(self.ClearButton, QtCore.SIGNAL("clicked()"), self.ClearTable)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "CS:GO Market Float Finder", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", " Set a time delay (seconds) between getting each skin\'s float value. See README.txt for more info.", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Time Delay:", None, QtGui.QApplication.UnicodeUTF8))
self.DelaySpinner.setToolTip(QtGui.QApplication.translate("MainWindow", "Delay between loading float value (seconds)", None, QtGui.QApplication.UnicodeUTF8))
self.PauseButton.setToolTip(QtGui.QApplication.translate("MainWindow", "Pause processing skin floats", None, QtGui.QApplication.UnicodeUTF8))
self.PauseButton.setText(QtGui.QApplication.translate("MainWindow", "Pause", None, QtGui.QApplication.UnicodeUTF8))
self.StartButton.setToolTip(QtGui.QApplication.translate("MainWindow", "Start processing skin floats", None, QtGui.QApplication.UnicodeUTF8))
self.StartButton.setText(QtGui.QApplication.translate("MainWindow", "Start", None, QtGui.QApplication.UnicodeUTF8))
self.ClearButton.setText(QtGui.QApplication.translate("MainWindow", "Clear Table", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Market URL:", None, QtGui.QApplication.UnicodeUTF8))
self.URLBox.setToolTip(QtGui.QApplication.translate("MainWindow", "Steam Market URL", None, QtGui.QApplication.UnicodeUTF8))
self.CountSpinner.setToolTip(QtGui.QApplication.translate("MainWindow", "Number of skins to retrieve (1 - 100)", None, QtGui.QApplication.UnicodeUTF8))
self.CurrencySelector.setToolTip(QtGui.QApplication.translate("MainWindow", "Desired currency of skins", None, QtGui.QApplication.UnicodeUTF8))
i = 0
for currency in FloatGetter.CURRENCY:
self.CurrencySelector.setItemText(i, QtGui.QApplication.translate("MainWindow", currency, None, QtGui.QApplication.UnicodeUTF8))
i += 1
self.RetrieveButton.setText(QtGui.QApplication.translate("MainWindow", "Retrieve Items", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", " Paste Steam Market item URL, then choose number of results to grab. Finally, choose a currency.", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setSortingEnabled(True)
self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("MainWindow", "Position", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("MainWindow", "Float Value", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("MainWindow", "Price", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(3).setText(QtGui.QApplication.translate("MainWindow", "Listing ID", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(4).setText(QtGui.QApplication.translate("MainWindow", "Skin Type/Index", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(5).setText(QtGui.QApplication.translate("MainWindow", "Skin Seed", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(6).setText(QtGui.QApplication.translate("MainWindow", "Javascript Market Link", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(7).setText(QtGui.QApplication.translate("MainWindow", "Inspect Link", None, QtGui.QApplication.UnicodeUTF8))
self.StatusLabel.setText(QtGui.QApplication.translate("MainWindow", "", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.actionOption1.setText(QtGui.QApplication.translate("MainWindow", "Option1", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "Readme", None, QtGui.QApplication.UnicodeUTF8))
self.actionExport_results_as_csv.setText(QtGui.QApplication.translate("MainWindow", "Export results as .csv", None, QtGui.QApplication.UnicodeUTF8))
self.actionParseSingle.setText(QtGui.QApplication.translate("MainWindow", "Parse Single Item", None, QtGui.QApplication.UnicodeUTF8))
self.changesettings.setText(QtGui.QApplication.translate("MainWindow", "Settings...", None, QtGui.QApplication.UnicodeUTF8))
self.actionQuit.setText(QtGui.QApplication.translate("MainWindow", "Quit", None, QtGui.QApplication.UnicodeUTF8))
def ReadMe(self):
webbrowser.open('README.txt')
def ChangeSettings(self):
webbrowser.open('settings.txt')
def ParseSingle(self):
self.popup = PopupDialog(self)
self.popup.setupUi(self.popup)
self.popup._get_single.connect(self.process_single)
self.popup.exec_()
def process_single(self):
self.WorkerThread.idle = False
self._process_single.emit(True)
def ExportCSV(self):
outname, _ = QtGui.QFileDialog.getSaveFileName(MainWindow, 'Open file', '', 'Comma Separated Values (*.csv)')
with open(outname, 'w') as outfile:
outfile.write('Position,Float Value,Price (%s),MarketID,Skin Type/Index,Skin Seed,Javascript Market Link\n' % self.currency)
for row in xrange(0, self.tableWidget.rowCount()):
col0 = self.tableWidget.item(row, 0)
col1 = self.tableWidget.item(row, 1)
col2 = self.tableWidget.item(row, 2)
col3 = self.tableWidget.item(row, 3)
col4 = self.tableWidget.item(row, 4)
col5 = self.tableWidget.item(row, 5)
col6 = self.tableWidget.item(row, 6)
col7 = self.tableWidget.item(row, 7)
outfile.write('%s,%s,%s,%s,%s,%s,"%s",%s\n' % (col0.text(), col1.text(), col2.text(), col3.text(), col4.text(), col5.text(), col6.text(), col7.text()))
def logEvent(self, text, forced=False):
if self.logging > 0 or forced:
t = time.strftime("%H:%M:%S")
with open('log.txt', 'a') as logfile:
logfile.write('%s: %s\n' % (t, text))
def ClearTable(self):
caution = QtGui.QMessageBox.warning(MainWindow, 'Are you sure?', 'Clearing the table will remove all table data and cancel the skin processing. \nAre you sure you wish to continue?', QtGui.QMessageBox.Yes|QtGui.QMessageBox.No)
if caution == QtGui.QMessageBox.StandardButton.Yes:
self.WorkerThread.idle = False
self.WorkerThread.pause = True
time.sleep(2.5)
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0)
self.PauseButton.setDisabled(True)
self.StartButton.setEnabled(True)
self.StatusLabel.setText("Table Cleared. Ready to process.")
self.WorkerThread.progresscount = 0
self.WorkerThread.pause = False
def RetrieveItems(self):
url = self.URLBox.displayText()
self.WorkerThread.idle = False
self.WorkerThread.marketdata = None
self.WorkerThread.soldcount = 0
self.WorkerThread.currency = self.CurrencySelector.currentText()
self.WorkerThread.currencysym = FloatGetter.CURRENCY[self.WorkerThread.currency][1]
self.WorkerThread.count = self.CountSpinner.value()
self.WorkerThread.delay = self.DelaySpinner.value()
if url == '':
QtGui.QMessageBox.warning(MainWindow, 'Error', "Please enter a market URL", QtGui.QMessageBox.Close)
else:
self.getMarketData.emit(url)
def SetTable(self, data):
self.tableWidget.setItem(self.tableWidget.rowCount()-1, data[0], QCustomTableWidgetItem(data[1]))
def ProcessItems(self):
self.WorkerThread.idle = False
self.WorkerThread.delay = self.DelaySpinner.value()
self.StartButton.setDisabled(True)
self.PauseButton.setEnabled(True)
self.RetrieveButton.setDisabled(True)
self.StatusLabel.setText("Processing skins...")
self.processItems.emit(object)
def Pause(self):
self.WorkerThread.idle = False
self.StatusLabel.setText("Pausing...")
self.WorkerThread.pause = True
def showError(self, message):
QtGui.QMessageBox.warning(MainWindow, 'Warning!', message, QtGui.QMessageBox.Close)
def showInfo(self, NameMessage):
QtGui.QMessageBox.information(MainWindow, NameMessage[0], NameMessage[1], QtGui.QMessageBox.Close)
def setCurrHeader(self, header):
self.tableWidget.horizontalHeaderItem(2).setText('Price (%s)' % header)
def login(self):
loginPopup = LoginUI(self)
loginPopup._login.connect(self.worker_login)
loginPopup._disconnect_user.connect(self.disconnect_user)
loginPopup.setupUi(loginPopup)
loginPopup.exec_()
def worker_login(self):
self.WorkerThread.idle = False
self._worker_login.emit(True)
def disconnect_user(self):
self.WorkerThread.idle = False
self._disconnect_user.emit(True)
def display_error(self, msg):
QtGui.QMessageBox.warning(QtGui.QWidget(), 'Error', msg, QtGui.QMessageBox.Close)
class WorkerThread(QtCore.QObject):
taskDone = QtCore.Signal(str)
progressSignal = QtCore.Signal(int)
StartEn = QtCore.Signal(bool)
PauseEn = QtCore.Signal(bool)
RetrieveEn = QtCore.Signal(bool)
StartDis = QtCore.Signal(bool)
PauseDis = QtCore.Signal(bool)
RetrieveDis = QtCore.Signal(bool)
NewRow = QtCore.Signal(int)
SetTableItem = QtCore.Signal(list)
TableSorting = QtCore.Signal(bool)
SetStatus = QtCore.Signal(str)
ShowError = QtCore.Signal(str)
ShowInfo = QtCore.Signal(tuple)
SetCurrHeader = QtCore.Signal(str)
display_error = QtCore.Signal(str)
log_event = QtCore.Signal(str)
MainLogin = QtCore.Signal(bool)
single_post_float = QtCore.Signal(str)
single_post_type = QtCore.Signal(str)
single_post_seed = QtCore.Signal(str)
def __init__(self, parent):
QtCore.QObject.__init__(self)
self.parent = parent
self.parent.processItems.connect(self.ProcessItems)
self.parent.getMarketData.connect(self.GetMarketData)
self.parent._process_single.connect(self.process_single)
self.parent._worker_login.connect(self.login)
self.parent._disconnect_user.connect(self.disconnect_user)
self.parent.init_login.connect(self.init_login)
self.progresscount = None
self.marketdata = None
self.singlelink = None
self.delay = None
self.pause = False
self.soldcount = 0
self.currency = None
self.currencysym = None
self.count = 0
self.UserObject = None
self.username = None
self.password = None
self.sharedsecret = None
self.auth_code = None
self.auth_type = None
self.loggedin = False
self.loginstatus = None
self.idle = False
def init_login(self):
self.MainLogin.emit(True)
def run_idle(self):
self.idle = True
self.log_event.emit('running idle... ')
while self.idle:
sleep(0.00001)
def login(self):
self.log_event.emit('Attempting login...')
if self.UserObject:
self.UserObject.disconnect()
self.UserObject = FloatGetter.User()
if self.auth_type == '2fa':
self.loginstatus = self.UserObject.login(self.username, self.password, two_factor_code=self.auth_code)
elif self.auth_type == 'email':
self.loginstatus = self.UserObject.login(self.username, self.password, authcode=self.auth_code)
else:
self.loginstatus = self.UserObject.login(self.username, self.password)
if self.loginstatus:
self.loggedin = True
if self.loggedin and self.UserObject.client.connection.connected:
self.log_event.emit('Attempting to launch CSGO...')
self.UserObject.csgo.launch()
self.run_idle()
def disconnect_user(self):
self.UserObject.disconnect()
def ProcessItems(self):
self.log_event.emit('Processing items...')
if not self.username and not self.password:
self.display_error.emit('You must sign in first. Please restart the program.')
return
if not self.UserObject.client.connection.connected:
self.display_error.emit('You are not connected to Steam. Please restart the program.')
self.log_event.emit('Not connected to Steam ' + str(self.UserObject.client.connection.socket))
return
for n in range(self.progresscount, len(self.marketdata)):
time.sleep(self.delay)
if not self.pause:
self.SetStatus.emit('Processing... %s/%s' % (self.progresscount+1, len(self.marketdata)))
skininfo = self.marketdata.items()[n]
inspectlink = skininfo[1][1]
itemcode = inspectlink.replace('steam://rungame/730/76561202255233023/+csgo_econ_action_preview%20' ,'').split('A')
# If Market item or Inventory item
if itemcode[0].startswith('S'):
param_s = int(itemcode[0].replace('S',''))
param_m = None
else:
param_m = int(itemcode[0].replace('M',''))
param_s = None
itemAD = itemcode[1].split('D')
param_a = int(itemAD[0])
param_d = int(itemAD[1])
pos = n+1
try:
if param_s:
data = self.UserObject.csgo.requestEconData(param_a, param_d, param_s=param_s)
elif param_m:
data = self.UserObject.csgo.requestEconData(param_a, param_d, param_m=param_m)
else:
self.display_error.emit('Bad inspect link ' + inspectlink)
self.log_event.emit('Bad inspect link ' + inspectlink)
self.run_idle()
return
if type(data) == str:
self.pause = True
self.display_error.emit(data)
self.log_event.emit('EconData response ' + str(data))
continue
paintseed = data.iteminfo.paintseed
paintindex = data.iteminfo.paintindex
paintwear = data.iteminfo.paintwear
skinFloat = FloatGetter.getfloat(paintwear)
floatvalue = Decimal(skinFloat).quantize(Decimal('1.000000000000'))
try:
skinid = 'ID' + str(paintindex)
paintindex = itemIndex.index[skinid]
except KeyError:
pass
except TypeError as e:
self.PauseDis.emit(True)
self.StartEn.emit(True)
self.RetrieveEn.emit(True)
self.log_event.emit('TypeError: ' + str(e.message))
self.run_idle()
return
price = Decimal(skininfo[1][2])
listingid = skininfo[1][0]
assetid = skininfo[0]
javascript = "javascript:BuyMarketListing('listing', '%s', 730, '2', '%s')" % (listingid, assetid)
self.TableSorting.emit(False)
self.NewRow.emit(n)
self.SetTableItem.emit([0, pos])
self.SetTableItem.emit([1, floatvalue])
self.SetTableItem.emit([2, price])
self.SetTableItem.emit([3, listingid])
self.SetTableItem.emit([4, paintindex])
self.SetTableItem.emit([5, paintseed])
self.SetTableItem.emit([6, javascript])
self.SetTableItem.emit([7, inspectlink])
self.TableSorting.emit(True)
self.progresscount += 1
self.progressSignal.emit(int(float(self.progresscount/float(len(self.marketdata)))*100))
else:
self.pause = False
self.StartEn.emit(True)
self.PauseDis.emit(True)
self.RetrieveEn.emit(True)
self.progressSignal.emit(0)
self.SetStatus.emit('Processing paused, press "Start" to continue. If you want to process a different set of data, clear the table first.')
self.run_idle()
return
self.SetStatus.emit('Processing Finished. Clear table before starting a new process.')
self.StartEn.emit(True)
self.PauseDis.emit(True)
self.RetrieveEn.emit(True)
self.log_event.emit('Finished processing')
self.run_idle()
@QtCore.Slot(str)
def GetMarketData(self, url):
initialcount = self.count
start = 0
iteration = 1
self.SetStatus.emit('Gathering Data...')
self.log_event.emit('Gathering Market Data from %s' % url)
while self.count > 100:
self.count -= 100
tempdata, tempsold = FloatGetter.getMarketItems(url, 100, self.currency, start)
if type(tempdata) != str:
if self.marketdata:
newtempdict = OrderedDict()
for k, e in self.marketdata.items()+tempdata.items():
if k in tempdata.keys() and k in self.marketdata.keys():
pass
else:
newtempdict.setdefault(k, e)
self.marketdata = newtempdict
else:
self.marketdata = tempdata
self.soldcount += int(tempsold)
iteration += 1
start += 100
if initialcount > 2000:
time.sleep(1)
else:
tempdata, tempsold = FloatGetter.getMarketItems(url, self.count, self.currency, start)
if type(tempdata) != str:
if self.marketdata:
newtempdict = OrderedDict()
for k, e in self.marketdata.items()+tempdata.items():
if k in tempdata.keys() and k in self.marketdata.keys():
pass
else:
newtempdict.setdefault(k, e)
self.marketdata = newtempdict
else:
self.marketdata = tempdata
self.soldcount += int(tempsold)
if type(self.marketdata) != OrderedDict:
self.ShowError.emit(tempdata)
self.log_event.emit('Error gathering Market Data: ' + str(tempdata))
self.run_idle()
return
else:
if len(self.marketdata) + self.soldcount < initialcount:
message = "Found %s available skins and %s sold skins. \nThe other %s skins did not exist, you may retry or just process current data (%s skins)." % (len(self.marketdata), self.soldcount, initialcount-(len(self.marketdata)+self.soldcount), len(self.marketdata))
self.ShowInfo.emit(('Information', message))
self.StartEn.emit(True)
self.SetStatus.emit("%s skins retrieved. Read to start processing. Estimated processing time, with %s delay, is %s seconds." % (len(self.marketdata), self.delay, self.delay*len(self.marketdata)))
self.SetCurrHeader.emit(self.currencysym)
elif len(self.marketdata) < 1:
message = "Found %s available skins and %s sold skins. \nNo skins to process, please check market has available items." % (len(self.marketdata), self.soldcount)
self.ShowError.emit(message)
self.SetStatus.emit('Ready')
else:
message = "Successfully found %s available skins and %s sold skins. Any sold skins will not be processed. \nClose this message and press 'Start'." % (len(self.marketdata), self.soldcount)
self.ShowInfo.emit(('Success!', message))
self.StartEn.emit(True)
self.SetCurrHeader.emit(self.currencysym)
self.SetStatus.emit("%s skins retrieved. Read to start processing. Estimated processing time, with %s delay, is %s seconds." % (len(self.marketdata), self.delay, self.delay*len(self.marketdata)))
self.log_event.emit('Successfully retrieved %s skins.' % len(self.marketdata))
self.run_idle()
def process_single(self):
self.log_event.emit('Processing item...')
if str(self.singlelink).startswith('steam://rungame/730/'):
if not self.username and not self.password:
self.display_error.emit('You must sign in first. Please restart the program.')
return
if not self.UserObject.client.connection.connected:
self.display_error.emit('You are not connected to Steam. Please restart the program.')
self.log_event.emit('Not connected to Steam ' + str(self.UserObject.client.connection.socket))
return
itemcode = self.singlelink.replace('steam://rungame/730/76561202255233023/+csgo_econ_action_preview%20' ,'').split('A')
# If Market item or Inventory item
if itemcode[0].startswith('S'):
param_s = int(itemcode[0].replace('S',''))
param_m = None
else:
param_m = int(itemcode[0].replace('M',''))
param_s = None
itemAD = itemcode[1].split('D')
param_a = int(itemAD[0])
param_d = int(itemAD[1])
try:
if param_s:
data = self.UserObject.csgo.requestEconData(param_a, param_d, param_s=param_s)
elif param_m:
data = self.UserObject.csgo.requestEconData(param_a, param_d, param_m=param_m)
else:
self.display_error.emit('Bad inspect link ' + self.singlelink)
self.log_event.emit('Bad inspect link ' + self.singlelink)
self.run_idle()
return
if type(data) == str:
self.display_error.emit(data)
self.log_event.emit('EconData response ' + str(data))
paintseed = data.iteminfo.paintseed
paintindex = data.iteminfo.paintindex
paintwear = data.iteminfo.paintwear
skinFloat = FloatGetter.getfloat(paintwear)
floatvalue = Decimal(skinFloat).quantize(Decimal('1.000000000000'))
try:
skinid = 'ID' + str(paintindex)
paintindex = itemIndex.index[skinid]
except KeyError:
pass
self.single_post_float.emit(str(floatvalue))
self.single_post_type.emit(str(floatvalue))
self.single_post_seed.emit(str(paintseed))
except TypeError as e:
self.log_event.emit('TypeError: ' + str(e.message))
self.run_idle()
return
else:
self.display_error.emit('Please enter a inspect in game link.')
self.run_idle()
class QCustomTableWidgetItem(QtGui.QTableWidgetItem):
def __init__(self, value):
super(QCustomTableWidgetItem, self).__init__(str('%s' % value))
def __lt__(self, other):
if isinstance(other, QCustomTableWidgetItem):
try:
selfDataValue = float(self.data(QtCore.Qt.EditRole))
otherDataValue = float(other.data(QtCore.Qt.EditRole))
return selfDataValue < otherDataValue
except ValueError:
# Can not be converted to float, so probably does not need to be (str, unicode)
selfDataValue = self.data(QtCore.Qt.EditRole)
otherDataValue = other.data(QtCore.Qt.EditRole)
return selfDataValue < otherDataValue
else:
return QtGui.QTableWidgetItem.__lt__(self, other)
class PopupDialog(QtGui.QDialog):
_get_single = QtCore.Signal(bool)
def __init__(self, parent=None):
super(PopupDialog, self).__init__()
self.callback = parent
self.setWindowFlags(QtCore.Qt.WindowSystemMenuHint)
self.callback.WorkerThread.single_post_float.connect(lambda x: self.post_float(x))
self.callback.WorkerThread.single_post_type.connect(lambda x: self.post_type(x))
self.callback.WorkerThread.single_post_seed.connect(lambda x: self.post_seed(x))
def setupUi(self, Form):
Form.setObjectName("Parse Single Item")
Form.resize(600, 71)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMaximumSize(QtCore.QSize(800, 71))
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.InspectLinkBox = QtGui.QLineEdit(Form)
self.InspectLinkBox.setObjectName("InspectLinkBox")
self.horizontalLayout.addWidget(self.InspectLinkBox)
self.GetValue = QtGui.QPushButton(Form)
self.GetValue.setObjectName("GetValue")
self.horizontalLayout.addWidget(self.GetValue)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.ResultBox = QtGui.QLineEdit(Form)
self.ResultBox.setText("")
self.ResultBox.setAlignment(QtCore.Qt.AlignCenter)
self.ResultBox.setReadOnly(True)
self.ResultBox.setObjectName("ResultBox")
self.horizontalLayout_2.addWidget(self.ResultBox)
self.label_3 = QtGui.QLabel(Form)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.SkinTypeBox = QtGui.QLineEdit(Form)
self.SkinTypeBox.setText("")
self.SkinTypeBox.setAlignment(QtCore.Qt.AlignCenter)
self.SkinTypeBox.setReadOnly(True)
self.SkinTypeBox.setObjectName("SkinTypeBox")
self.horizontalLayout_2.addWidget(self.SkinTypeBox)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setObjectName("label_4")
self.horizontalLayout_2.addWidget(self.label_4)
self.SkinSeedBox = QtGui.QLineEdit(Form)
self.SkinSeedBox.setText("")
self.SkinSeedBox.setAlignment(QtCore.Qt.AlignCenter)
self.SkinSeedBox.setReadOnly(True)
self.SkinSeedBox.setObjectName("SkinSeedBox")
self.horizontalLayout_2.addWidget(self.SkinSeedBox)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
QtCore.QObject.connect(self.GetValue, QtCore.SIGNAL("clicked()"), self.GetSingle)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Parse Single Item", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Inspect Link", None, QtGui.QApplication.UnicodeUTF8))
self.GetValue.setText(QtGui.QApplication.translate("Form", "Get Value", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "Skin Float Value:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Form", "Skin Type:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Form", "Skin Seed:", None, QtGui.QApplication.UnicodeUTF8))
def GetSingle(self):
self.callback.WorkerThread.singlelink = self.InspectLinkBox.displayText()
self._get_single.emit(True)
def post_float(self, float):
self.ResultBox.setText(float)
def post_type(self, type):
self.SkinTypeBox.setText(type)
def post_seed(self, seed):
self.SkinSeedBox.setText(seed)
class LoginUI(QtGui.QDialog):
_login = QtCore.Signal(bool)
_disconnect_user = QtCore.Signal(bool)
def __init__(self, parent=None):
super(LoginUI, self).__init__()
self.callback = parent
self.setWindowFlags(QtCore.Qt.WindowSystemMenuHint)
self.overwritten = False
self.sharedsecret = None
def setupUi(self, Form):
Form.setObjectName("Login to Steam")
Form.resize(255, 150)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(255, 150))
Form.setMaximumSize(QtCore.QSize(255, 150))
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtGui.QLabel(Form)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.UsernameBox = QtGui.QLineEdit(Form)
self.UsernameBox.setObjectName("UsernameBox")
self.horizontalLayout.addWidget(self.UsernameBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_2 = QtGui.QLabel(Form)
self.label_2.setMargin(1)
self.label_2.setObjectName("label_2")
self.horizontalLayout_2.addWidget(self.label_2)
self.PasswordBox = QtGui.QLineEdit(Form)
self.PasswordBox.setInputMask("")
self.PasswordBox.setText("")
self.PasswordBox.setFrame(True)
self.PasswordBox.setEchoMode(QtGui.QLineEdit.Password)
self.PasswordBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.PasswordBox.setReadOnly(False)
self.PasswordBox.setPlaceholderText("")
self.PasswordBox.setObjectName("PasswordBox")
self.horizontalLayout_2.addWidget(self.PasswordBox)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtGui.QSpacerItem(59, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.RememberBox = QtGui.QCheckBox(Form)
self.RememberBox.setObjectName("RememberBox")
self.horizontalLayout_3.addWidget(self.RememberBox)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.LoginButton = QtGui.QPushButton(Form)
self.LoginButton.setObjectName("LoginButton")
self.horizontalLayout_4.addWidget(self.LoginButton)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_4)
while True:
try:
with open('settings.txt', 'r') as settings:
for line in settings.readlines():
if line.startswith('username='):
self.UsernameBox.setText(line.replace('username=', '').replace('\n',''))
self.RememberBox.setChecked(True)
if line.startswith('password='):
self.PasswordBox.setText(line.replace('password=', ''))
if line.startswith('sharedsecret='):
self.sharedsecret = line.replace('sharedsecret=', '')
self.callback.sharedsecret = self.sharedsecret
break
except IOError:
self.callback.logEvent('No settings file found!', forced=True)
create_settings()
continue
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
QtCore.QObject.connect(self.LoginButton, QtCore.SIGNAL("clicked()"), self.login)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Login to Steam", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Form", "Sign in to Steam", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Username:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "Password:", None, QtGui.QApplication.UnicodeUTF8))
self.RememberBox.setText(QtGui.QApplication.translate("Form", "Remember details", None, QtGui.QApplication.UnicodeUTF8))
self.LoginButton.setText(QtGui.QApplication.translate("Form", "Login", None, QtGui.QApplication.UnicodeUTF8))
def callback_login(self):
self._login.emit(True)
def login(self):
self.callback.logEvent('Attempting initial login...')
username = self.UsernameBox.text().encode('ascii')
password = self.PasswordBox.text().encode('ascii')
remember = self.RememberBox.isChecked()
if remember:
while True:
try:
with open('settings.txt', 'r') as settings:
data = settings.readlines()
for num, line in enumerate(data):
if line.startswith('username='):
data[num] = line.replace(line, 'username='+username+'\n')
if line.startswith('password='):
data[num] = line.replace(line, 'password='+password)
self.overwritten = True
with open('settings.txt', 'w') as settings:
settings.writelines(data)
if not self.overwritten:
settings.seek(0, 2)
settings.writelines(['\nusername='+username, '\npassword='+password])
break
except IOError:
self.callback.logEvent('No settings file found!', forced=True)
create_settings()
if username and password:
try:
if self.sharedsecret:
self.callback.WorkerThread.username = username
self.callback.WorkerThread.password = password
self.callback.WorkerThread.auth_code = generateAuthCode(self.sharedsecret)
self.callback.WorkerThread.auth_type = '2fa'
self._login.emit(True)
else:
self.callback.WorkerThread.username = username
self.callback.WorkerThread.password = password
self.callback.WorkerThread.auth_code = None
self.callback.WorkerThread.auth_type = None
self._login.emit(True)
time.sleep(1.7)
loginstatus = self.callback.WorkerThread.loginstatus
self.callback.logEvent('Login attempt response ' + str(loginstatus))
if loginstatus != True:
if loginstatus == 5:
QtGui.QMessageBox.warning(self, 'Error', 'Incorrect password or username, or too many login attempts.', QtGui.QMessageBox.Close)
elif loginstatus == 63 or loginstatus == 85 or loginstatus == 88:
self._disconnect_user.emit(True)
authPopup = AuthUI(parent=self, authstatus=loginstatus)
authPopup.setupUi(authPopup)
authPopup._login.connect(self.callback_login)
authPopup.exec_()
self.close()
else:
self.close()
QtGui.QMessageBox.information(self, 'Success!', 'Signed in to Steam.', QtGui.QMessageBox.Close)
except socket_error as serr:
self.callback.logEvent('Login socket error ' + str(serr.errno))
if serr.errno == WSAEHOSTUNREACH:
QtGui.QMessageBox.warning(self, 'Error', 'Could not connect to Steam.', QtGui.QMessageBox.Close)
else:
QtGui.QMessageBox.warning(self, 'Error', 'Socket error ' + str(serr.errno), QtGui.QMessageBox.Close)
else:
QtGui.QMessageBox.warning(self, 'Error', 'Please enter your username and password.', QtGui.QMessageBox.Close)
class AuthUI(QtGui.QDialog):
_login = QtCore.Signal(bool)
def __init__(self, parent=None, authstatus=0):
self.callback = parent
self.authstatus = authstatus # 63 = auth_code, 85 = 2fa code
super(AuthUI, self).__init__()
self.setWindowFlags(QtCore.Qt.WindowSystemMenuHint)
def setupUi(self, Form):
Form.setObjectName("Authenticate")
Form.resize(220, 105)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(220, 105))
Form.setMaximumSize(QtCore.QSize(220, 105))
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.label_3 = QtGui.QLabel(Form)
self.label_3.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.verticalLayout.addWidget(self.label_3)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtGui.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.AuthBox = QtGui.QLineEdit(Form)
self.AuthBox.setObjectName("AuthBox")
self.horizontalLayout.addWidget(self.AuthBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.AuthButton = QtGui.QPushButton(Form)
self.AuthButton.setObjectName("AuthButton")
self.horizontalLayout_2.addWidget(self.AuthButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
QtCore.QObject.connect(self.AuthButton, QtCore.SIGNAL("clicked()"), self.auth)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Authenticate", None, QtGui.QApplication.UnicodeUTF8))
if self.authstatus == 63:
self.label_3.setText(QtGui.QApplication.translate("Form", "Steam has sent an authentication code to your email, please enter it below.", None, QtGui.QApplication.UnicodeUTF8))
elif self.authstatus == 85:
self.label_3.setText(QtGui.QApplication.translate("Form", "Please enter your Steam Guard mobile 2FA code.", None, QtGui.QApplication.UnicodeUTF8))
elif self.authstatus == 88:
self.label_3.setText(QtGui.QApplication.translate("Form", "2FA code incorrect, try again.", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Auth Code:", None, QtGui.QApplication.UnicodeUTF8))
self.AuthButton.setText(QtGui.QApplication.translate("Form", "OK", None, QtGui.QApplication.UnicodeUTF8))
def auth(self):
authcode = self.AuthBox.text().encode('ascii')
self.callback.callback.logEvent('Authentication response ' + str(self.authstatus))
if self.authstatus == 85 or self.authstatus == 88:
self.callback.callback.WorkerThread.auth_code = authcode
self.callback.callback.WorkerThread.auth_type = '2fa'
self._login.emit(True)
else:
self.callback.callback.WorkerThread.auth_code = authcode
self.callback.callback.WorkerThread.auth_type = 'email'
self._login.emit(True)
time.sleep(1.7)
loginstatus = self.callback.callback.WorkerThread.loginstatus
self.callback.callback.logEvent('Post-auth login attempt response ' + str(loginstatus))
if loginstatus == True:
self.close()
QtGui.QMessageBox.information(self, 'Success!', 'Signed in to Steam.', QtGui.QMessageBox.Close)
elif loginstatus == 65 or loginstatus == 88:
QtGui.QMessageBox.warning(self, 'Error', 'Incorrect auth code.', QtGui.QMessageBox.Close)
elif loginstatus == 63:
QtGui.QMessageBox.warning(self, 'Error', 'Please enter auth code.', QtGui.QMessageBox.Close)
elif loginstatus == 85:
QtGui.QMessageBox.warning(self, 'Error', 'Please enter your mobile 2FA code.', QtGui.QMessageBox.Close)
elif loginstatus == 5:
QtGui.QMessageBox.warning(self, 'Error', 'Auth failed: InvalidPassword. Maybe too many login attempts recently, try later.', QtGui.QMessageBox.Close)
else:
QtGui.QMessageBox.warning(self, 'Error', 'Auth failed with error %s.' % str(loginstatus), QtGui.QMessageBox.Close)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
QtCore.QThread.currentThread().setObjectName('main')
MainWindow.show()
sys.exit(app.exec_())
| {
"content_hash": "7ae4383a83220f13b13b3751ea2953ea",
"timestamp": "",
"source": "github",
"line_count": 1226,
"max_line_length": 281,
"avg_line_length": 50.41598694942904,
"alnum_prop": 0.6514479857628216,
"repo_name": "adamb70/CSGO-Market-Float-Finder",
"id": "06d9806f2200031e5ca0247f9b60341664b05a93",
"size": "61835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163661"
}
],
"symlink_target": ""
} |