repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
enixdark/rabbitmq-py | ria/monitor/rb_ping.py | 1 | 1206 | import sys
from optparse import OptionParser
from config import *
from rb import AMQP
if __name__ == "__main__":
opt_parse = OptionParser()
opt_parse.add_option('-m', '--message', dest = 'message',
help = "message to send to rabbit server")
opt_parse.add_option('-b', '--host', dest = 'hostname',
help = "message to send to rabbit server")
opt_parse.add_option('-p', '--port', dest = 'port',
help = "port server")
opt_parse.add_option('-u', '--user', dest = 'username',
help = "username")
opt_parse.add_option('-P', '--pass', dest = 'pasword',
help = "pasword")
params = opt_parse.parse_args()[0]
if params.hostname == None and len(sys.argv) > 1:
params = urlparse(sys.argv[1])
try:
USERNAME = params.username if params.username != None else USERNAME
PASSWORD = params.pasword if params.password != None else PASSWORD
except:
pass
AMQP_HOST = params.hostname
AMQP_PORT = int(params.port)
try:
amqp = AMQP(USERNAME,PASSWORD,AMQP_HOST,AMQP_PORT,PATH)
amqp.connect()
except Exception, e:
print "CRITICAL: Could not connect to %s:%s!" % (AMQP_HOST, AMQP_PORT)
exit(EXIT_CRITICAL)
print "OK: Connect to %s:%s successful." % (AMQP_HOST, AMQP_PORT)
exit(EXIT_OK)
| mit | -1,848,245,385,236,411,400 | 32.5 | 72 | 0.665008 | false |
LittleBun/Personal | ics632/tutorial_sim_grid/topic2/generate_xml_bintree_and_hostfile.py | 1 | 5081 | #!/usr/bin/env python2.7
import sys
import os
import math
# Link parameters
link_latency = "10us"
link_bandwidth = 10
link_bandwidth_unit = "Gbps"
# Convenient math wrappers
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def pow2(x):
return int(math.pow(2,x))
# XML generation functions
def issueHead():
head = ("<?xml version='1.0'?>\n"
"<!DOCTYPE platform SYSTEM \"http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd\">\n"
"<platform version=\"4\">\n\n")
config_clause = ("<!-- WARNING: This <config></config> clause below\n"
"makes it so that NO COMPUTATION TIME is simulated. This is because\n"
"in this module, for pedagogic purposes, we don't want to muddy the\n"
"(simulation) waters with computational times. As a results, this\n"
"XML platform file may not be suitable for running other\n"
"simulations, unless you remove the <config></config> clause.\n"
"-->\n"
"<config>\n"
"<prop id=\"smpi/simulate-computation\" value=\"0\"></prop>\n"
"<prop id=\"smpi/running-power\" value=\"200000000000\"></prop>\n"
"</config>\n\n")
AS_head = "<AS id=\"AS0\" routing=\"Full\">\n"
return head + config_clause + AS_head
def issueTail():
return "</AS>\n</platform>\n"
def issueLink1(x):
return " <link id=\"link-"+str(x)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(link_bandwidth)+link_bandwidth_unit+"\"/>\n"
def issueLink2(x,y):
return " <link id=\"link-"+str(x)+"-"+str(y)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(link_bandwidth)+link_bandwidth_unit+"\"/>\n"
def issueLink3(x,y,bw):
return " <link id=\"link-"+str(x)+"-"+str(y)+"\" latency=\""+str(link_latency)+"\" bandwidth=\""+str(bw)+link_bandwidth_unit+"\"/>\n"
def issueHost(index):
return " <host id=\"host-"+str(index)+".hawaii.edu\" speed=\"200Gf\"/>\n"
def issueRouteHead(index1, index2):
return " <route src=\"host-"+str(index1)+".hawaii.edu\" dst=\"host-"+str(index2)+".hawaii.edu\">\n"
def issueRouteTail():
return " </route>\n"
def issueRouteLink1(x):
return "\t<link_ctn id=\"link-"+str(x)+"\"/>\n"
def issueRouteLink2(x,y):
return "\t<link_ctn id=\"link-"+str(x)+"-"+str(y)+"\"/>\n"
######################################################################
# Parse command-line arguments
if (len(sys.argv) != 2):
print >> sys.stderr, "Usage:a"+sys.argv[0]+" <num hosts>\n"
print >> sys.stderr, " Will generate a bintree_<num hosts>.xml and hostfile_<num hosts>.txt file\n"
exit(1)
num_hosts = int(sys.argv[1])
###############################################################
# Generate Binary Tree XML file
filename = "./bintree_"+str(num_hosts)+".xml"
fh = open(filename, 'w')
fh.write(issueHead())
# Create all hosts and links
for i in range(0,num_hosts):
fh.write(issueHost(i))
if (i*2+1 < num_hosts):
fh.write(issueLink2(i,i*2+1))
if (i*2+2 < num_hosts):
fh.write(issueLink2(i,i*2+2))
# Create all routes
for i in range(0,num_hosts):
level_i = floor(math.log(1+i,2))
for j in range(i+1,num_hosts):
fh.write(issueRouteHead(j,i))
# Host j is at the same of lower level than host i
level_j = floor(math.log(1+j,2))
current_host_path_j = j
# Go up to the same level of that of host i
for l in range(level_j,level_i,-1):
parent_host = floor(float(current_host_path_j-1)/2)
fh.write(issueRouteLink2(min(current_host_path_j,parent_host),max(current_host_path_j,parent_host)))
current_host_path_j = parent_host
# Find the common ancestor
current_host_path_i = i
while (current_host_path_j != current_host_path_i):
fh.write(issueRouteLink2(min(current_host_path_j,floor(float(current_host_path_j-1)/2)), max(current_host_path_j,floor(float(current_host_path_j-1)/2))))
current_host_path_i = floor(float(current_host_path_i-1)/2)
current_host_path_j = floor(float(current_host_path_j-1)/2)
common_ancestor = current_host_path_j
# Go back from i to the common ancestor
current_host_path_i = i
sequence = []
sequence.append(current_host_path_i)
while (current_host_path_i != common_ancestor):
parent_host = floor(float(current_host_path_i-1)/2)
sequence.append(parent_host)
current_host_path_i = parent_host
# Issue links in the common ancestor -> i order
sequence = sequence[::-1]
for k in range(0,len(sequence)-1):
fh.write(issueRouteLink2(min(sequence[k],sequence[k+1]),max(sequence[k],sequence[k+1])))
fh.write(issueRouteTail())
fh.write(issueTail())
fh.close()
print >> sys.stderr, "BinTree XML platform description file created: "+filename
###############################################################
## Generate host file
filename = "./hostfile_"+str(num_hosts)+".txt"
fh = open(filename, 'w')
for i in range(0,num_hosts):
fh.write("host-"+str(i)+".hawaii.edu\n")
fh.close()
print >> sys.stderr, "Hostfile created: "+filename
| unlicense | -6,806,824,665,409,877,000 | 34.78169 | 156 | 0.604212 | false |
Lao-liu/mist.io | src/mist/io/model.py | 1 | 7013 | """Mist Io Model
Here we define the schema of our data structure in an object oriented
way.
Simple, low level, helper functions can also be added to the
following classes. (eg user.get_num_mon_machines(), user.keys.unused()).
It is recommended that only pure functions (no side-effects) are used
as class methods.
How this works:
The basic class is the OODict. This defines a dict to object mapper.
When we need a new data structure, we define a new subclass of OODict.
Class properties that are instances of Field subclasses are considered to be
OODict fields. These are the keys in the underlying dict that will be used.
There is a large variety of standard type fields.
One can create an OODict that has a field which is also parsed by some OODict.
To do so, you define a field on the outer OODict that is created by make_field.
Finally, list or dict like collections can be created by subclassing FieldsList
and FieldsDict. The items of these collections will be parsed according to
the field type defined in the class. This collection can be used as a field
in some OODict by use of make_field. If it sounds too complicated, just look
the code below, it should be pretty self-explanatory.
"""
import os
import logging
from Crypto.PublicKey import RSA
from hashlib import sha1
from mist.io.dal import StrField, HtmlSafeStrField
from mist.io.dal import IntField, FloatField, BoolField
from mist.io.dal import ListField, DictField
from mist.io.dal import OODict, FieldsDict, FieldsList, make_field
try:
from mist.core.dal import User as DalUser
from mist.core.dal import FieldsDict # escapes dots in keys (for mongo)
except ImportError:
from mist.io.dal import User as DalUser
from mist.io import exceptions
try:
from mist.core import config
except ImportError:
from mist.io import config
import logging
logging.basicConfig(level=config.PY_LOG_LEVEL,
format=config.PY_LOG_FORMAT,
datefmt=config.PY_LOG_FORMAT_DATE)
log = logging.getLogger(__name__)
class Machine(OODict):
"""A saved machine in the machines list of some cloud.
For the time being, only bare metal machines are saved, for API clouds
we get the machine list from the provider.
"""
## hasMonitoring = BoolField()
uuid = StrField()
## monitor_server = make_field(MonitorServer)()
dns_name = HtmlSafeStrField()
public_ips = ListField()
## collectd_password = StrField()
name = HtmlSafeStrField()
ssh_port = IntField(22)
os_type = StrField('unix')
remote_desktop_port = IntField(3389)
class Machines(FieldsDict):
"""Collection of machines of a certain cloud.
For the time being, only bare metal machines are saved, for API clouds
we get the machine list from the provider.
"""
_item_type = make_field(Machine)
_key_error = exceptions.MachineNotFoundError
class Cloud(OODict):
"""A cloud vm provider cloud"""
enabled = BoolField()
machine_count = IntField()
apiurl = StrField()
apikey = HtmlSafeStrField()
apisecret = StrField()
title = HtmlSafeStrField()
tenant_name = HtmlSafeStrField()
auth_version = HtmlSafeStrField()
region = HtmlSafeStrField()
poll_interval = IntField(10000)
provider = HtmlSafeStrField()
## datacenter = StrField()
compute_endpoint = StrField()
key_file = StrField()
cert_file = StrField()
ca_cert_file = StrField()
ssh_port = IntField(22)
docker_port = IntField(4243)
machines = make_field(Machines)()
starred = ListField()
unstarred = ListField()
images_location = StrField()
def __repr__(self):
print_fields = ['title', 'provider', 'region']
return super(Cloud, self).__repr__(print_fields)
def get_id(self):
from mist.io.helpers import b58_encode
if self.provider == 'docker':
concat = '%s%s%s' % (self.provider, self.title, self.apiurl)
elif self.provider in ['bare_metal', 'coreos']:
name = self.machines.values()[0].name
concat = '%s%s%s' % (self.provider, '', name)
elif self.provider == 'openstack' or 'hpcloud' in self.provider:
concat = "%s%s%s%s%s" % (self.provider, self.region, self.apikey, self.apiurl, self.tenant_name)
elif self.provider == 'libvirt':
concat = "%s%s" % (self.provider, self.apiurl)
elif self.provider in ['vcloud', 'indonesian_vcloud', 'vsphere']:
concat = "%s%s%s%s" % (self.provider, self.apikey, self.apisecret, self.apiurl)
else:
concat = '%s%s%s' % (self.provider, self.region, self.apikey)
return b58_encode(int(sha1(concat).hexdigest(), 16))
class Clouds(FieldsDict):
_item_type = make_field(Cloud)
_key_error = exceptions.CloudNotFoundError
class Keypair(OODict):
"""An ssh keypair."""
public = StrField()
private = StrField()
default = BoolField()
machines = ListField()
def generate(self):
"""Generates a new RSA keypair and assignes to self."""
from Crypto import Random
Random.atfork()
key = RSA.generate(2048)
self.private = key.exportKey()
self.public = key.exportKey('OpenSSH')
def isvalid(self):
"""Checks if self is a valid RSA keypair."""
from Crypto import Random
Random.atfork()
message = 'Message 1234567890'
if 'ssh-rsa' in self.public:
public_key_container = RSA.importKey(self.public)
private_key_container = RSA.importKey(self.private)
encr_message = public_key_container.encrypt(message, 0)
decr_message = private_key_container.decrypt(encr_message)
if message == decr_message:
return True
return False
def construct_public_from_private(self):
"""Constructs pub key from self.private and assignes to self.public.
Only works for RSA.
"""
from Crypto import Random
Random.atfork()
if 'RSA' in self.private:
try:
key = RSA.importKey(self.private)
public = key.publickey().exportKey('OpenSSH')
self.public = public
return True
except:
pass
return False
def __repr__(self):
return super(Keypair, self).__repr__(['default', 'machines'])
class Keypairs(FieldsDict):
_item_type = make_field(Keypair)
_key_error = exceptions.KeypairNotFoundError
class User(DalUser):
"""The basic model class is User. It contains all the methods
necessary to find and save users in memcache and in mongo.
It transforms the user dict into an object with consistent
attributes instead of inconsistent dict with missing keys.
"""
email = StrField()
mist_api_token = StrField()
clouds = make_field(Clouds)()
keypairs = make_field(Keypairs)()
def __repr__(self):
return super(User, self).__repr__(['email'])
| agpl-3.0 | -2,193,338,107,679,193,000 | 31.618605 | 108 | 0.65949 | false |
LinDA-tools/LindaWorkbench | linda/linda_app/management/commands/update_datasources.py | 1 | 1077 | __author__ = 'dimitris'
import time
from datetime import datetime
from django.db.models import Q
from django.core.management import BaseCommand
from linda_app.models import DatasourceDescription
class Command(BaseCommand):
help = 'Update existing datasources'
def handle(self, *args, **options):
while True:
# only update RSS datasources
for datasource in DatasourceDescription.objects.filter(~Q(rss_info=None)):
diff = datetime.now() - datasource.rss_info.lastDataFetchOn.replace(tzinfo=None)
if diff.total_seconds() >= datasource.rss_info.interval:
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# update rss
datasource.update_rss()
# update info & save
datasource.rss_info.lastDataFetchOn = datetime.now()
datasource.rss_info.save()
print(datasource.rss_info.lastDataFetchOn.strftime("%Y-%m-%d %H:%M:%S") + ' updated ' +
datasource.title)
# sleep for a minute
time.sleep(60) | mit | 272,534,954,458,373,500 | 37.5 | 107 | 0.597029 | false |
oss/shrunk | backend/shrunk/client/search.py | 1 | 6400 | """Implements the :py:class:`SearchClient` class."""
from typing import Any, List
from datetime import datetime, timezone
from pymongo.collation import Collation
import pymongo
__all__ = ['SearchClient']
class SearchClient:
"""This class executes search queries."""
def __init__(self, *, db: pymongo.database.Database, client: Any):
self.db = db
self.client = client
def execute(self, user_netid: str, query: Any) -> Any: # pylint: disable=too-many-branches,too-many-statements
"""Execute a search query
:param user_netid: The NetID of the user performing the search
:param query: The search query. See :py:mod:`shrunk.api.search` for
the search query format
"""
# We're going to build up an aggregation pipeline based on the submitted query.
# This pipeline will be executed on the organizations collection if set.set == 'org',
# or on the urls collection otherwise.
pipeline: List[Any] = []
# Filter the appropriate links set.
if query['set']['set'] == 'user': # search within `user_netid`'s links
pipeline.append({'$match': {'netid': user_netid}})
elif query['set']['set'] == 'shared':
# If the set is 'shared', the pipeline will be executed against the 'organizations'
# collection instead of the 'urls' collection.
pipeline += [
{'$match': {'members.netid': user_netid} },
{'$lookup': {
'from': 'urls',
'localField': '_id',
'foreignField': 'viewers._id',
'as': 'shared_urls',
}},
{'$unwind': '$shared_urls'},
{'$replaceRoot': {'newRoot': '$shared_urls'}},
{'$unionWith': {
'coll': 'urls',
'pipeline': [{'$match': {'viewers._id': user_netid}}],
}},
]
elif query['set']['set'] == 'org': # search within the given org
pipeline.append({'$match': {'viewers.type': 'org', 'viewers._id': query['set']['org']}})
# Filter based on search string, if provided.
if 'query' in query:
pipeline += [{
'$match': {
'$text': {
'$search': query['query'],
},
},
},
{
'$addFields': {
'text_search_score': {'$meta': 'textScore'},
},
}]
# Sort results.
sort_order = 1 if query['sort']['order'] == 'ascending' else -1
if query['sort']['key'] == 'created_time':
sort_key = 'timeCreated'
elif query['sort']['key'] == 'title':
sort_key = 'title'
elif query['sort']['key'] == 'visits':
sort_key = 'visits'
elif query['sort']['key'] == 'relevance':
sort_key = 'text_search_score'
else:
# This should never happen
raise RuntimeError(f'Bad sort key {query["sort"]["key"]}')
pipeline.append({'$sort': {sort_key: sort_order}})
# Add is_expired field
now = datetime.now(timezone.utc)
pipeline.append({
'$addFields': {
'is_expired': {
'$and': [
{'$toBool': '$expiration_time'},
{'$gte': [now, '$expiration_time']},
],
},
},
})
if not query.get('show_deleted_links', False):
pipeline.append({'$match': {'deleted': {'$ne': True}}})
if not query.get('show_expired_links', False):
pipeline.append({'$match': {'is_expired': False}})
if 'begin_time' in query:
pipeline.append({'$match': {'timeCreated': {'$gte': query['begin_time']}}})
if 'end_time' in query:
pipeline.append({'$match': {'timeCreated': {'$lte': query['end_time']}}})
# Pagination.
facet = {
'count': [{'$count': 'count'}],
'result': [{'$skip': 0}],
}
if 'pagination' in query:
facet['result'] = [
{'$skip': query['pagination']['skip']},
{'$limit': query['pagination']['limit']},
]
pipeline.append({'$facet': facet})
# Execute the query. Make sure we use the 'en' collation so strings
# are sorted properly (e.g. wrt case and punctuation).
if query['set']['set'] == 'shared':
cursor = self.db.organizations.aggregate(pipeline, collation=Collation('en'))
else:
cursor = self.db.urls.aggregate(pipeline, collation=Collation('en'))
def prepare_result(res: Any) -> Any:
"""Turn a result from the DB into something than can be JSON-serialized."""
def is_alias_visible(alias: Any) -> bool:
if query.get('show_deleted_links', False):
return True
return not alias['deleted']
if res.get('expiration_time'):
expiration_time = res['expiration_time']
else:
expiration_time = None
prepared = {
'id': res['_id'],
'title': res['title'],
'long_url': res['long_url'],
'created_time': res['timeCreated'],
'expiration_time': expiration_time,
'visits': res['visits'],
'unique_visits': res.get('unique_visits', 0),
'owner': res['netid'],
'aliases': [alias for alias in res['aliases'] if is_alias_visible(alias)],
'is_expired': res['is_expired'],
'may_edit': self.client.links.may_edit(res['_id'], user_netid),
}
if res.get('deleted'):
prepared['deletion_info'] = {
'deleted_by': res['deleted_by'],
'deleted_time': res['deleted_time'],
}
return prepared
result = next(cursor)
count = result['count'][0]['count'] if result['count'] else 0
results = [prepare_result(res) for res in result['result']]
return {
'count': count,
'results': results,
}
| mit | 5,381,660,293,846,415,000 | 36.426901 | 115 | 0.483125 | false |
eggplantbren/NSwMCMC | python/straightline2.py | 1 | 2303 | import copy
import numpy as np
import numpy.random as rng
import scipy.special
from utils import randh
from numba import jit
# How many parameters are there?
num_params = 4
# Some data
data = np.loadtxt("road.txt")
N = data.shape[0] # Number of data points
# Plot the data
import matplotlib.pyplot as plt
plt.plot(data[:,0], data[:,1], "o")
plt.xlabel("Age of person (years)")
plt.ylabel("Maximum vision distance (feet)")
plt.show()
# Some idea of how big the Metropolis proposals should be
jump_sizes = np.array([1000.0, 1000.0, 20.0, 5.0])
@jit
def from_prior():
"""
A function to generate parameter values from the prior.
Returns a numpy array of parameter values.
"""
m = 1000.0*rng.randn()
b = 1000.0*rng.randn()
log_sigma = -10.0 + 20.0*rng.rand()
log_nu = 5.0*rng.rand()
return np.array([m, b, log_sigma, log_nu])
@jit
def log_prior(params):
"""
Evaluate the (log of the) prior distribution
"""
# Rename the parameters
m, b, log_sigma, log_nu = params
logp = 0.0
# Normal prior for m and b
# Metropolis only needs the ratio, so I've left out the 2pi bits
logp += -0.5*(m/1000.0)**2
logp += -0.5*(b/1000.0)**2
if log_sigma < -10.0 or log_sigma > 10.0:
return -np.Inf
if log_nu < 0.0 or log_nu > 5.0:
return -np.Inf
return logp
@jit
def log_likelihood(params):
"""
Evaluate the (log of the) likelihood function
"""
# Rename the parameters
m, b, log_sigma, log_nu = params
# Get sigma and nu
sigma = np.exp(log_sigma)
nu = np.exp(log_nu)
# First calculate the straight line
line = m*data[:,0] + b
# t distribution (compare with the pdf on wikipedia, under
# Non-standardized Student's t-distribution)
return N*scipy.special.gammaln(0.5*(nu+1.0))\
-N*0.5*np.log(nu*np.pi) - N*scipy.special.gammaln(0.5*nu) - N*np.log(sigma)\
-0.5*(nu+1.0)*np.sum(np.log(1.0 + ((data[:,1] - line)**2/sigma**2)/nu))
@jit
def proposal(params):
"""
Generate new values for the parameters, for the Metropolis algorithm.
"""
# Copy the parameters
new = copy.deepcopy(params)
# Which one should we change?
which = rng.randint(num_params)
new[which] += jump_sizes[which]*randh()
return new
| gpl-2.0 | 5,760,810,357,822,276,000 | 22.989583 | 87 | 0.6231 | false |
dmizverev/robot-framework-library | library/RabbitMqManager.py | 1 | 16205 | # -*- coding: utf-8 -*-
from robot.api import logger
from robot.utils import ConnectionCache
import httplib
import base64
import json
import socket
import urlparse
import urllib
class RabbitMqManager(object):
"""
Библиотека для управления сервером RabbitMq.
Реализована на основе:
- [ http://hg.rabbitmq.com/rabbitmq-management/raw-file/3646dee55e02/priv/www-api/help.html | RabbitMQ Management HTTP API ]
- [ https://github.com/rabbitmq/rabbitmq-management/blob/master/bin/rabbitmqadmin | rabbitmqadmin ]
== Зависимости ==
| robot framework | http://robotframework.org |
== Example ==
| *Settings* | *Value* |
| Library | RabbitMqManager |
| Library | Collections |
| *Test Cases* | *Action* | *Argument* | *Argument* | *Argument* | *Argument* | *Argument* |
| Simple |
| | Connect To Rabbitmq | my_host_name | 15672 | guest | guest | alias=rmq |
| | ${overview}= | Overview |
| | Log Dictionary | ${overview} |
| | Close All Rabbitmq Connections |
"""
ROBOT_LIBRARY_SCOPE='GLOBAL'
def __init__(self):
self._connection=None
self.headers=None
self._cache=ConnectionCache()
def connect_to_rabbitmq (self, host, port, username = 'guest', password = 'guest', timeout = 15, alias = None):
"""
Подключение к серверу RabbitMq.
*Args:*\n
_host_ - имя сервера;\n
_port_ - номер порта;\n
_username_ - имя пользователя;\n
_password_ - пароль пользователя;\n
_timeout_ - время ожидания соединения;\n
_alias_ - псевдоним соединения;\n
*Returns:*\n
Индекс текущего соединения.
*Raises:*\n
socket.error в том случае, если невозможно создать соединение.
*Example:*\n
| Connect To Rabbitmq | my_host_name | 15672 | guest | guest | alias=rmq |
"""
port=int (port)
timeout=int (timeout)
logger.debug ('Connecting using : host=%s, port=%d, username=%s, password=%s, timeout=%d, alias=%s '%(host, port, username, password, timeout, alias))
self.headers={"Authorization":"Basic "+base64.b64encode(username+":"+password)}
try:
self._connection=httplib.HTTPConnection (host, port, timeout)
self._connection.connect()
return self._cache.register(self._connection, alias)
except socket.error, e:
raise Exception ("Could not connect to RabbitMq", str(e))
def switch_rabbitmq_connection (self, index_or_alias):
"""
Переключение между активными соединениями с RabbitMq, используя их индекс или псевдоним.
Псевдоним задается в keyword [#Connect To Rabbitmq|Connect To Rabbitmq], который также возвращает индекс соединения.
*Args:*\n
_index_or_alias_ -индекс соединения или его псевдоним;
*Returns:*\n
Индекс предыдущего соединения.
*Example:*\n
| Connect To Rabbitmq | my_host_name_1 | 15672 | guest | guest | alias=rmq1 |
| Connect To Rabbitmq | my_host_name_2 | 15672 | guest | guest | alias=rmq2 |
| Switch Rabbitmq Connection | rmq1 |
| ${live}= | Is alive |
| Switch Rabbitmq Connection | rmq2 |
| ${live}= | Is alive |
| Close All Rabbitmq Connections |
"""
old_index=self._cache.current_index
self._connection=self._cache.switch(index_or_alias)
return old_index
def disconnect_from_rabbitmq(self):
"""
Закрытие текущего соединения с RabbitMq.
*Example:*\n
| Connect To Rabbitmq | my_host_name | 15672 | guest | guest | alias=rmq |
| Disconnect From Rabbitmq |
"""
logger.debug ('Close connection with : host=%s, port=%d '%(self._connection.host, self._connection.port))
self._connection.close()
def close_all_rabbitmq_connections (self):
"""
Закрытие всех соединений с RabbitMq.
Данный keyword используется для закрытия всех соединений в том случае, если их было открыто несколько штук.
Использовать [#Disconnect From Rabbitmq|Disconnect From Rabbitmq] и [#Close All Rabbitmq Connections|Close All Rabbitmq Connections]
вместе нельзя.
После выполнения этого keyword индекс, возвращаемый [#Connect To Rabbitmq|Connect To Rabbitmq], начинается с 1.
*Example:*\n
| Connect To Rabbitmq | my_host_name | 15672 | guest | guest | alias=rmq |
| Close All Rabbitmq Connections |
"""
self._connection=self._cache.close_all()
def _http_request (self, method, path, body):
"""
Выполнение запросов к RabbitMq
*Args:*\n
_method_ - метод запроса;\n
_path_ - uri запроса;\n
_body_ - тело POST-запроса;\n
"""
if body!="":
self.headers["Content-Type"]="application/json"
logger.debug ('Prepared request with metod '+method+' to '+'http://'+self._connection.host+':'+str(self._connection.port)+path+' and body\n'+body)
try:
self._connection.request(method, path, body, self.headers)
except socket.error, e:
raise Exception("Could not send request: {0}".format(e))
resp=self._connection.getresponse()
if resp.status==400:
raise Exception (json.loads(resp.read())['reason'])
if resp.status==401:
raise Exception("Access refused: {0}".format('http://'+self._connection.host+':'+str(self._connection.port)+path))
if resp.status==404:
raise Exception("Not found: {0}".format('http://'+self._connection.host+':'+str(self._connection.port)+path))
if resp.status==301:
url=urlparse.urlparse(resp.getheader('location'))
[host, port]=url.netloc.split(':')
self.options.hostname=host
self.options.port=int(port)
return self.http(method, url.path+'?'+url.query, body)
if resp.status<200 or resp.status>400:
raise Exception("Received %d %s for request %s\n%s"
%(resp.status, resp.reason, 'http://'+self._connection.host+':'+str(self._connection.port)+path, resp.read()))
return resp.read()
def _get (self, path):
return self._http_request('GET', '/api%s'%path, '')
def _put (self, path, body):
return self._http_request("PUT", "/api%s"%path, body)
def _post (self, path, body):
return self._http_request("POST", "/api%s"%path, body)
def _delete (self, path):
return self._http_request("DELETE", "/api%s"%path, "")
def _quote_vhost (self, vhost):
"""
Декодирование vhost.
"""
if vhost=='/':
vhost='%2F'
if vhost!='%2F':
vhost=urllib.quote(vhost)
return vhost
def is_alive(self):
"""
Проверка работоспособности RabbitMq.
Отправляется GET-запрос следующего вида: 'http://<host>:<port>/api/' и проверяется код возврата.
*Returns:*\n
bool True, если код возврата равен 200.\n
bool False во всех остальных случаях.
*Raises:*\n
socket.error в том случае, если невозмодно отправить GET-запрос.
*Example:*\n
| ${live}= | Is Alive |
=>\n
True
"""
try:
self._connection.request('GET', '/api/')
except socket.error, e:
raise Exception("Could not send request: {0}".format(e))
resp=self._connection.getresponse()
resp.read()
logger.debug ('Response status=%d'%resp.status)
if resp.status==200 :
return True
else:
return False
def overview (self):
"""
Информация о сервере RabbitMq.
*Returns:*\n
Словарь с информацией о сервере.
*Example:*\n
| ${overview}= | Overview |
| Log Dictionary | ${overview} |
| ${version}= | Get From Dictionary | ${overview} | rabbitmq_version |
=>\n
Dictionary size is 13 and it contains following items:
| erlang_full_version | Erlang R16B02 (erts-5.10.3) [source] [64-bit] [smp:2:2] [async-threads:30] [hipe] [kernel-poll:true] |
| erlang_version | R16B02 |
| listeners | [{u'node': u'rabbit@srv2-rs582b-m', u'ip_address': u'0.0.0.0', u'protocol': u'amqp', u'port': 5672}] |
| management_version | 3.1.0 |
| message_stats | [] |
${version} = 3.1.0
"""
return json.loads(self._get ('/overview'))
def connections (self):
"""
Список открытых соединений.
"""
return json.loads(self._get ('/connections'))
def get_name_of_all_connections (self):
"""
Список имен всех открытых соединений.
"""
names=[]
data=self.connections ()
for item in data :
names.append(item['name'])
return names
def channels (self):
"""
Список открытых каналов.
"""
return json.loads(self._get ('/channels'))
def exchanges (self):
"""
Список exchange.
*Example:*\n
| ${exchanges}= | Exchanges |
| Log List | ${exchanges} |
| ${item}= | Get From list | ${exchanges} | 1 |
| ${name}= | Get From Dictionary | ${q} | name |
=>\n
List length is 8 and it contains following items:
| 0 | {u'name': u'', u'durable': True, u'vhost': u'/', u'internal': False, u'message_stats': [], u'arguments': {}, u'type': u'direct', u'auto_delete': False} |
| 1 | {u'name': u'amq.direct', u'durable': True, u'vhost': u'/', u'internal': False, u'message_stats': [], u'arguments': {}, u'type': u'direct', u'auto_delete': False} |
...\n
${name} = amq.direct
"""
return json.loads(self._get ('/exchanges'))
def get_names_of_all_exchanges (self):
"""
Список имён всех exchanges.
*Example:*\n
| ${names}= | Get Names Of All Exchanges |
| Log List | ${names} |
=>\n
| List has one item:
| amq.direct
"""
names=[]
data=self.exchanges ()
for item in data :
names.append(item['name'])
return names
def queues (self):
"""
Список очередей.
"""
return json.loads(self._get ('/queues'))
def get_queues_on_vhost (self, vhost = '%2F'):
"""
Список очередей для виртуального хоста.
*Args:*\n
_vhost_ -имя виртуального хоста (перекодируется при помощи urllib.quote);
"""
return json.loads(self._get ('/queues/'+self._quote_vhost(vhost)))
def get_names_of_queues_on_vhost (self, vhost = '%2F'):
"""
Список имен очередей виртуального хоста.
*Args:*\n
- vhost: имя виртуального хоста (перекодируется при помощи urllib.quote)
*Example:*\n
| ${names}= | Get Names Of Queues On Vhost |
| Log List | ${names} |
=>\n
| List has one item:
| federation: ex2 -> rabbit@server.net.ru
"""
names=[]
data=self.get_queues_on_vhost (vhost)
for item in data :
names.append(item['name'])
return names
def queue_exists(self, queue, vhost='%2F'):
"""
Verifies that the one or more queues exists
"""
names = self.get_names_of_queues_on_vhost()
if queue in names:
return True
else:
return False
def delete_queues_by_name (self, name, vhost = '%2F'):
"""
Удаление очереди с виртуального хоста.
*Args:*\n
_name_ - имя очереди (перекодируется urllib.quote);\n
_vhost_ - имя виртуального хоста (перекодируется urllib.quote);\n
"""
return self._delete('/queues/'+self._quote_vhost(vhost)+'/'+urllib.quote(name))
def vhosts (self):
"""
Список виртуальных хостов.
"""
return json.loads(self._get ('/vhosts'))
def nodes(self):
"""
List of nodes in the RabbitMQ cluster
"""
return json.loads(self._get('/nodes'))
@property
def _cluster_name(self):
"""
Name identifying this RabbitMQ cluster.
"""
return json.loads(self._get('/cluster-name'))
def create_queues_by_name(self, name, auto_delete=False, durable=True, arguments={}, vhost='%2F'):
"""
Create an individual queue.
"""
node = self._cluster_name['name']
body = json.dumps({
"auto_delete": auto_delete,
"durable": durable,
"arguments": arguments,
"node": node
})
return self._put('/queues/' + self._quote_vhost(vhost) + '/' + urllib.quote(name), body=body)
def publish_message_by_name(self, queue, msg, properties, vhost='%2F'):
"""
Publish a message to a given exchange
"""
name = "amq.default"
body = json.dumps({
"properties": properties,
"routing_key": queue,
"payload": msg,
"payload_encoding": "string"
})
routed = self._post('/exchanges/' + self._quote_vhost(vhost) +
'/' + urllib.quote(name) + '/publish', body=body)
return json.loads(routed)
def get_messages_by_queue(self, queue, count=5, requeue=False, encoding="auto", truncate=50000, vhost='%2F'):
"""
Get messages from a queue.
"""
body = json.dumps({
"count": count,
"requeue": requeue,
"encoding": encoding,
"truncate": truncate
})
messages = self._post('/queues/' + self._quote_vhost(vhost) +
'/' + urllib.quote(queue) + '/get', body=body)
return json.loads(messages)
def purge_messages_by_queue(self, name, vhost='%2F'):
"""
Purge contents of a queue.
"""
return self._delete('/queues/' + self._quote_vhost(vhost) + '/' + urllib.quote(name) + '/contents')
| apache-2.0 | -8,961,411,459,969,314,000 | 32.004556 | 177 | 0.531886 | false |
Valloric/ycmd | ycmd/tests/go/debug_info_test.py | 1 | 3586 | # Copyright (C) 2016-2017 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import ( assert_that,
contains,
has_entries,
has_entry,
instance_of )
from ycmd.tests.go import ( IsolatedYcmd,
PathToTestFile,
SharedYcmd,
StartGoCompleterServerInDirectory )
from ycmd.tests.test_utils import BuildRequest
@SharedYcmd
def DebugInfo_test( app ):
request_data = BuildRequest( filetype = 'go' )
assert_that(
app.post_json( '/debug_info', request_data ).json,
has_entry( 'completer', has_entries( {
'name': 'Go',
'servers': contains( has_entries( {
'name': 'gopls',
'is_running': instance_of( bool ),
'executable': contains( instance_of( str ),
instance_of( str ),
instance_of( str ),
instance_of( str ) ),
'address': None,
'port': None,
'pid': instance_of( int ),
'logfiles': contains( instance_of( str ) ),
'extras': contains(
has_entries( {
'key': 'Server State',
'value': instance_of( str ),
} ),
has_entries( {
'key': 'Project Directory',
'value': PathToTestFile(),
} ),
has_entries( {
'key': 'Settings',
'value': '{}'
} ),
)
} ) ),
} ) )
)
@IsolatedYcmd
def DebugInfo_ProjectDirectory_test( app ):
project_dir = PathToTestFile( 'td' )
StartGoCompleterServerInDirectory( app, project_dir )
assert_that(
app.post_json( '/debug_info', BuildRequest( filetype = 'go' ) ).json,
has_entry( 'completer', has_entries( {
'name': 'Go',
'servers': contains( has_entries( {
'name': 'gopls',
'is_running': instance_of( bool ),
'executable': contains( instance_of( str ),
instance_of( str ),
instance_of( str ),
instance_of( str ) ),
'address': None,
'port': None,
'pid': instance_of( int ),
'logfiles': contains( instance_of( str ) ),
'extras': contains(
has_entries( {
'key': 'Server State',
'value': instance_of( str ),
} ),
has_entries( {
'key': 'Project Directory',
'value': PathToTestFile(),
} ),
has_entries( {
'key': 'Settings',
'value': '{}'
} ),
)
} ) ),
} ) )
)
| gpl-3.0 | -7,875,975,041,765,725,000 | 31.6 | 73 | 0.528444 | false |
ioam/paramtk | paramtk/odict.py | 1 | 46083 | from __future__ import generators
# odict.py
# An Ordered Dictionary object
# Copyright (C) 2005 Nicola Larosa, Michael Foord
# E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Documentation at http://www.voidspace.org.uk/python/odict.html
# For information about bugfixes, updates and support, please join the
# Pythonutils mailing list:
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
"""A dict that keeps keys in insertion order"""
__author__ = ('Nicola Larosa <nico-NoSp@m-tekNico.net>,'
'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>')
__docformat__ = "restructuredtext en"
__revision__ = '$Id: external.py 12024 2012-05-02 21:13:18Z ceball $'
__version__ = '0.2.2'
__all__ = ['OrderedDict', 'SequenceOrderedDict']
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later required")
import types, warnings
class OrderedDict(dict):
"""
A class of dictionary that keeps the insertion order of keys.
All appropriate methods return keys, items, or values in an ordered way.
All normal dictionary methods are available. Update and comparison is
restricted to other OrderedDict objects.
Various sequence methods are available, including the ability to explicitly
mutate the key ordering.
__contains__ tests:
>>> d = OrderedDict(((1, 3),))
>>> 1 in d
1
>>> 4 in d
0
__getitem__ tests:
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2]
1
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4]
Traceback (most recent call last):
KeyError: 4
__len__ tests:
>>> len(OrderedDict())
0
>>> len(OrderedDict(((1, 3), (3, 2), (2, 1))))
3
get tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.get(1)
3
>>> d.get(4) is None
1
>>> d.get(4, 5)
5
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
has_key tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.has_key(1)
1
>>> d.has_key(4)
0
"""
def __init__(self, init_val=(), strict=False):
"""
Create a new ordered dictionary. Cannot init from a normal dict,
nor from kwargs, since items order is undefined in those cases.
If the ``strict`` keyword argument is ``True`` (``False`` is the
default) then when doing slice assignment - the ``OrderedDict`` you are
assigning from *must not* contain any keys in the remaining dict.
>>> OrderedDict()
OrderedDict([])
>>> OrderedDict({1: 1})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> OrderedDict({1: 1}.items())
OrderedDict([(1, 1)])
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
>>> OrderedDict(d)
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
self.strict = strict
dict.__init__(self)
if isinstance(init_val, OrderedDict):
self._sequence = init_val.keys()
dict.update(self, init_val)
elif isinstance(init_val, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
self._sequence = []
self.update(init_val)
### Special methods ###
def __delitem__(self, key):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> del d[3]
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> del d[3]
Traceback (most recent call last):
KeyError: 3
>>> d[3] = 2
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> del d[0:1]
>>> d
OrderedDict([(2, 1), (3, 2)])
"""
if isinstance(key, types.SliceType):
# FIXME: efficiency?
keys = self._sequence[key]
for entry in keys:
dict.__delitem__(self, entry)
del self._sequence[key]
else:
# do the dict.__delitem__ *first* as it raises
# the more appropriate error
dict.__delitem__(self, key)
self._sequence.remove(key)
def __eq__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d == OrderedDict(d)
True
>>> d == OrderedDict(((1, 3), (2, 1), (3, 2)))
False
>>> d == OrderedDict(((1, 0), (3, 2), (2, 1)))
False
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d == dict(d)
False
>>> d == False
False
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() == other.items())
else:
return False
def __lt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> c < d
True
>>> d < c
False
>>> d < dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() < other.items())
def __le__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c <= d
True
>>> d <= c
False
>>> d <= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> d <= e
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() <= other.items())
def __ne__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d != OrderedDict(d)
False
>>> d != OrderedDict(((1, 3), (2, 1), (3, 2)))
True
>>> d != OrderedDict(((1, 0), (3, 2), (2, 1)))
True
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d != dict(d)
True
>>> d != False
True
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return not (self.items() == other.items())
else:
return True
def __gt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> d > c
True
>>> c > d
False
>>> d > dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() > other.items())
def __ge__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c >= d
False
>>> d >= c
True
>>> d >= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> e >= d
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() >= other.items())
def __repr__(self):
"""
Used for __repr__ and __str__
>>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
>>> r1
"OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])"
>>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
>>> r2
"OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])"
>>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
True
>>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
True
"""
return '%s([%s])' % (self.__class__.__name__, ', '.join(
['(%r, %r)' % (key, self[key]) for key in self._sequence]))
def __setitem__(self, key, val):
"""
Allows slice assignment, so long as the slice is an OrderedDict
>>> d = OrderedDict()
>>> d['a'] = 'b'
>>> d['b'] = 'a'
>>> d[3] = 12
>>> d
OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)])
>>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
OrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d[::2] = OrderedDict(((7, 8), (9, 10)))
>>> d
OrderedDict([(7, 8), (2, 3), (9, 10)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)))
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True)
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True)
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)])
Traceback (most recent call last):
ValueError: slice assignment must be from unique keys
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)))
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)])
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = 3
Traceback (most recent call last):
TypeError: slice assignment requires an OrderedDict
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = OrderedDict([(9, 8)])
>>> d
OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
if not isinstance(val, OrderedDict):
# FIXME: allow a list of tuples?
raise TypeError('slice assignment requires an OrderedDict')
keys = self._sequence[key]
# NOTE: Could use ``range(*key.indices(len(self._sequence)))``
indexes = range(len(self._sequence))[key]
if key.step is None:
# NOTE: new slice may not be the same size as the one being
# overwritten !
# NOTE: What is the algorithm for an impossible slice?
# e.g. d[5:3]
pos = key.start or 0
del self[key]
newkeys = val.keys()
for k in newkeys:
if k in self:
if self.strict:
raise ValueError('slice assignment must be from '
'unique keys')
else:
# NOTE: This removes duplicate keys *first*
# so start position might have changed?
del self[k]
self._sequence = (self._sequence[:pos] + newkeys +
self._sequence[pos:])
dict.update(self, val)
else:
# extended slice - length of new slice must be the same
# as the one being replaced
if len(keys) != len(val):
raise ValueError('attempt to assign sequence of size %s '
'to extended slice of size %s' % (len(val), len(keys)))
# FIXME: efficiency?
del self[key]
item_list = zip(indexes, val.items())
# smallest indexes first - higher indexes not guaranteed to
# exist
item_list.sort()
for pos, (newkey, newval) in item_list:
if self.strict and newkey in self:
raise ValueError('slice assignment must be from unique'
' keys')
self.insert(pos, newkey, newval)
else:
if key not in self:
self._sequence.append(key)
dict.__setitem__(self, key, val)
def __getitem__(self, key):
"""
Allows slicing. Returns an OrderedDict if you slice.
>>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)])
>>> b[::-1]
OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)])
>>> b[2:5]
OrderedDict([(5, 2), (4, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
# FIXME: does this raise the error we want?
keys = self._sequence[key]
# FIXME: efficiency?
return OrderedDict([(entry, self[entry]) for entry in keys])
else:
return dict.__getitem__(self, key)
__str__ = __repr__
def __setattr__(self, name, value):
"""
Implemented so that accesses to ``sequence`` raise a warning and are
diverted to the new ``setkeys`` method.
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: doesn't return anything
self.setkeys(value)
else:
# FIXME: do we want to allow arbitrary setting of attributes?
# Or do we want to manage it?
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Implemented so that access to ``sequence`` raises a warning.
>>> d = OrderedDict()
>>> d.sequence
[]
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: Still (currently) returns a direct reference. Need to
# because code that uses sequence will expect to be able to
# mutate it in place.
return self._sequence
else:
# raise the appropriate error
raise AttributeError("OrderedDict has no '%s' attribute" % name)
def __deepcopy__(self, memo):
"""
To allow deepcopy to work with OrderedDict.
>>> from copy import deepcopy
>>> a = OrderedDict([(1, 1), (2, 2), (3, 3)])
>>> a['test'] = {}
>>> b = deepcopy(a)
>>> b == a
True
>>> b is a
False
>>> a['test'] is b['test']
False
"""
from copy import deepcopy
return self.__class__(deepcopy(self.items(), memo), self.strict)
### Read-only methods ###
def copy(self):
"""
>>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy()
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
return OrderedDict(self)
def items(self):
"""
``items`` returns a list of tuples representing all the
``(key, value)`` pairs in the dictionary.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.items()
[(1, 3), (3, 2), (2, 1)]
>>> d.clear()
>>> d.items()
[]
"""
return zip(self._sequence, self.values())
def keys(self):
"""
Return a list of keys in the ``OrderedDict``.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
"""
return self._sequence[:]
def values(self, values=None):
"""
Return a list of all the values in the OrderedDict.
Optionally you can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.values()
[3, 2, 1]
"""
return [self[key] for key in self._sequence]
def iteritems(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems()
>>> ii.next()
(1, 3)
>>> ii.next()
(3, 2)
>>> ii.next()
(2, 1)
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
key = keys.next()
yield (key, self[key])
return make_iter()
def iterkeys(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys()
>>> ii.next()
1
>>> ii.next()
3
>>> ii.next()
2
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
return iter(self._sequence)
__iter__ = iterkeys
def itervalues(self):
"""
>>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues()
>>> iv.next()
3
>>> iv.next()
2
>>> iv.next()
1
>>> iv.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
yield self[keys.next()]
return make_iter()
### Read-write methods ###
def clear(self):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.clear()
>>> d
OrderedDict([])
"""
dict.clear(self)
self._sequence = []
def pop(self, key, *args):
"""
No dict.pop in Python 2.2, gotta reimplement it
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.pop(3)
2
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> d.pop(4)
Traceback (most recent call last):
KeyError: 4
>>> d.pop(4, 0)
0
>>> d.pop(4, 0, 1)
Traceback (most recent call last):
TypeError: pop expected at most 2 arguments, got 3
"""
if len(args) > 1:
raise TypeError, ('pop expected at most 2 arguments, got %s' %
(len(args) + 1))
if key in self:
val = self[key]
del self[key]
else:
try:
val = args[0]
except IndexError:
raise KeyError(key)
return val
def popitem(self, i=-1):
"""
Delete and return an item specified by index, not a random one as in
dict. The index is -1 by default (the last item).
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.popitem()
(2, 1)
>>> d
OrderedDict([(1, 3), (3, 2)])
>>> d.popitem(0)
(1, 3)
>>> OrderedDict().popitem()
Traceback (most recent call last):
KeyError: 'popitem(): dictionary is empty'
>>> d.popitem(2)
Traceback (most recent call last):
IndexError: popitem(): index 2 not valid
"""
if not self._sequence:
raise KeyError('popitem(): dictionary is empty')
try:
key = self._sequence[i]
except IndexError:
raise IndexError('popitem(): index %s not valid' % i)
return (key, self.pop(key))
def setdefault(self, key, defval = None):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setdefault(1)
3
>>> d.setdefault(4) is None
True
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)])
>>> d.setdefault(5, 0)
0
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)])
"""
if key in self:
return self[key]
else:
self[key] = defval
return defval
def update(self, from_od):
"""
Update from another OrderedDict or sequence of (key, value) pairs
>>> d = OrderedDict(((1, 0), (0, 1)))
>>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1))))
>>> d
OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)])
>>> d.update({4: 4})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> d.update((4, 4))
Traceback (most recent call last):
TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence
"""
if isinstance(from_od, OrderedDict):
for key, val in from_od.items():
self[key] = val
elif isinstance(from_od, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
# FIXME: efficiency?
# sequence of 2-item sequences, or error
for item in from_od:
try:
key, val = item
except TypeError:
raise TypeError('cannot convert dictionary update'
' sequence element "%s" to a 2-item sequence' % item)
self[key] = val
def rename(self, old_key, new_key):
"""
Rename the key for a given value, without modifying sequence order.
For the case where new_key already exists this raise an exception,
since if new_key exists, it is ambiguous as to what happens to the
associated values, and the position of new_key in the sequence.
>>> od = OrderedDict()
>>> od['a'] = 1
>>> od['b'] = 2
>>> od.items()
[('a', 1), ('b', 2)]
>>> od.rename('b', 'c')
>>> od.items()
[('a', 1), ('c', 2)]
>>> od.rename('c', 'a')
Traceback (most recent call last):
ValueError: New key already exists: 'a'
>>> od.rename('d', 'b')
Traceback (most recent call last):
KeyError: 'd'
"""
if new_key == old_key:
# no-op
return
if new_key in self:
raise ValueError("New key already exists: %r" % new_key)
# rename sequence entry
value = self[old_key]
old_idx = self._sequence.index(old_key)
self._sequence[old_idx] = new_key
# rename internal dict entry
dict.__delitem__(self, old_key)
dict.__setitem__(self, new_key, value)
def setitems(self, items):
"""
This method allows you to set the items in the dict.
It takes a list of tuples - of the same sort returned by the ``items``
method.
>>> d = OrderedDict()
>>> d.setitems(((3, 1), (2, 3), (1, 2)))
>>> d
OrderedDict([(3, 1), (2, 3), (1, 2)])
"""
self.clear()
# FIXME: this allows you to pass in an OrderedDict as well :-)
self.update(items)
def setkeys(self, keys):
"""
``setkeys`` all ows you to pass in a new list of keys which will
replace the current set. This must contain the same set of keys, but
need not be in the same order.
If you pass in new keys that don't match, a ``KeyError`` will be
raised.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
>>> d.setkeys((1, 2, 3))
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> d.setkeys(['a', 'b', 'c'])
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
"""
# FIXME: Efficiency? (use set for Python 2.4 :-)
# NOTE: list(keys) rather than keys[:] because keys[:] returns
# a tuple, if keys is a tuple.
kcopy = list(keys)
kcopy.sort()
self._sequence.sort()
if kcopy != self._sequence:
raise KeyError('Keylist is not the same as current keylist.')
# NOTE: This makes the _sequence attribute a new object, instead
# of changing it in place.
# FIXME: efficiency?
self._sequence = list(keys)
def setvalues(self, values):
"""
You can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
(Or a ``ValueError`` is raised.)
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setvalues((1, 2, 3))
>>> d
OrderedDict([(1, 1), (3, 2), (2, 3)])
>>> d.setvalues([6])
Traceback (most recent call last):
ValueError: Value list is not the same length as the OrderedDict.
"""
if len(values) != len(self):
# FIXME: correct error to raise?
raise ValueError('Value list is not the same length as the '
'OrderedDict.')
self.update(zip(self, values))
### Sequence Methods ###
def index(self, key):
"""
Return the position of the specified key in the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.index(3)
1
>>> d.index(4)
Traceback (most recent call last):
...
ValueError: 4 is not in list
"""
return self._sequence.index(key)
def insert(self, index, key, value):
"""
Takes ``index``, ``key``, and ``value`` as arguments.
Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in
the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.insert(0, 4, 0)
>>> d
OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)])
>>> d.insert(0, 2, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)])
>>> d.insert(8, 8, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)])
"""
if key in self:
# FIXME: efficiency?
del self[key]
self._sequence.insert(index, key)
dict.__setitem__(self, key, value)
def reverse(self):
"""
Reverse the order of the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.reverse()
>>> d
OrderedDict([(2, 1), (3, 2), (1, 3)])
"""
self._sequence.reverse()
def sort(self, *args, **kwargs):
"""
Sort the key order in the OrderedDict.
This method takes the same arguments as the ``list.sort`` method on
your version of Python.
>>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4)))
>>> d.sort()
>>> d
OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)])
"""
self._sequence.sort(*args, **kwargs)
class Keys(object):
# FIXME: should this object be a subclass of list?
"""
Custom object for accessing the keys of an OrderedDict.
Can be called like the normal ``OrderedDict.keys`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the keys method."""
return self._main._keys()
def __getitem__(self, index):
"""Fetch the key at position i."""
# NOTE: this automatically supports slicing :-)
return self._main._sequence[index]
def __setitem__(self, index, name):
"""
You cannot assign to keys, but you can do slice assignment to re-order
them.
You can only do slice assignment if the new set of keys is a reordering
of the original set.
"""
if isinstance(index, types.SliceType):
# FIXME: efficiency?
# check length is the same
indexes = range(len(self._main._sequence))[index]
if len(indexes) != len(name):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(indexes)))
# check they are the same keys
# FIXME: Use set
old_keys = self._main._sequence[index]
new_keys = list(name)
old_keys.sort()
new_keys.sort()
if old_keys != new_keys:
raise KeyError('Keylist is not the same as current keylist.')
orig_vals = [self._main[k] for k in name]
del self._main[index]
vals = zip(indexes, name, orig_vals)
vals.sort()
for i, k, v in vals:
if self._main.strict and k in self._main:
raise ValueError('slice assignment must be from '
'unique keys')
self._main.insert(i, k, v)
else:
raise ValueError('Cannot assign to keys')
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main._sequence)
# FIXME: do we need to check if we are comparing with another ``Keys``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main._sequence < other
def __le__(self, other): return self._main._sequence <= other
def __eq__(self, other): return self._main._sequence == other
def __ne__(self, other): return self._main._sequence != other
def __gt__(self, other): return self._main._sequence > other
def __ge__(self, other): return self._main._sequence >= other
# FIXME: do we need __cmp__ as well as rich comparisons?
def __cmp__(self, other): return cmp(self._main._sequence, other)
def __contains__(self, item): return item in self._main._sequence
def __len__(self): return len(self._main._sequence)
def __iter__(self): return self._main.iterkeys()
def count(self, item): return self._main._sequence.count(item)
def index(self, item, *args): return self._main._sequence.index(item, *args)
def reverse(self): self._main._sequence.reverse()
def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds)
def __mul__(self, n): return self._main._sequence*n
__rmul__ = __mul__
def __add__(self, other): return self._main._sequence + other
def __radd__(self, other): return other + self._main._sequence
## following methods not implemented for keys ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from keys')
def __iadd__(self, other): raise TypeError('Can\'t add in place to keys')
def __imul__(self, n): raise TypeError('Can\'t multiply keys in place')
def append(self, item): raise TypeError('Can\'t append items to keys')
def insert(self, i, item): raise TypeError('Can\'t insert items into keys')
def pop(self, i=-1): raise TypeError('Can\'t pop items from keys')
def remove(self, item): raise TypeError('Can\'t remove items from keys')
def extend(self, other): raise TypeError('Can\'t extend keys')
class Items(object):
"""
Custom object for accessing the items of an OrderedDict.
Can be called like the normal ``OrderedDict.items`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the items method."""
return self._main._items()
def __getitem__(self, index):
"""Fetch the item at position i."""
if isinstance(index, types.SliceType):
# fetching a slice returns an OrderedDict
return self._main[index].items()
key = self._main._sequence[index]
return (key, self._main[key])
def __setitem__(self, index, item):
"""Set item at position i to item."""
if isinstance(index, types.SliceType):
# NOTE: item must be an iterable (list of tuples)
self._main[index] = OrderedDict(item)
else:
# FIXME: Does this raise a sensible error?
orig = self._main.keys[index]
key, value = item
if self._main.strict and key in self and (key != orig):
raise ValueError('slice assignment must be from '
'unique keys')
# delete the current one
del self._main[self._main._sequence[index]]
self._main.insert(index, key, value)
def __delitem__(self, i):
"""Delete the item at position i."""
key = self._main._sequence[i]
if isinstance(i, types.SliceType):
for k in key:
# FIXME: efficiency?
del self._main[k]
else:
del self._main[key]
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.items())
# FIXME: do we need to check if we are comparing with another ``Items``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.items() < other
def __le__(self, other): return self._main.items() <= other
def __eq__(self, other): return self._main.items() == other
def __ne__(self, other): return self._main.items() != other
def __gt__(self, other): return self._main.items() > other
def __ge__(self, other): return self._main.items() >= other
def __cmp__(self, other): return cmp(self._main.items(), other)
def __contains__(self, item): return item in self._main.items()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.iteritems()
def count(self, item): return self._main.items().count(item)
def index(self, item, *args): return self._main.items().index(item, *args)
def reverse(self): self._main.reverse()
def sort(self, *args, **kwds): self._main.sort(*args, **kwds)
def __mul__(self, n): return self._main.items()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.items() + other
def __radd__(self, other): return other + self._main.items()
def append(self, item):
"""Add an item to the end."""
# FIXME: this is only append if the key isn't already present
key, value = item
self._main[key] = value
def insert(self, i, item):
key, value = item
self._main.insert(i, key, value)
def pop(self, i=-1):
key = self._main._sequence[i]
return (key, self._main.pop(key))
def remove(self, item):
key, value = item
try:
assert value == self._main[key]
except (KeyError, AssertionError):
raise ValueError('ValueError: list.remove(x): x not in list')
else:
del self._main[key]
def extend(self, other):
# FIXME: is only a true extend if none of the keys already present
for item in other:
key, value = item
self._main[key] = value
def __iadd__(self, other):
self.extend(other)
## following methods not implemented for items ##
def __imul__(self, n): raise TypeError('Can\'t multiply items in place')
class Values(object):
"""
Custom object for accessing the values of an OrderedDict.
Can be called like the normal ``OrderedDict.values`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the values method."""
return self._main._values()
def __getitem__(self, index):
"""Fetch the value at position i."""
if isinstance(index, types.SliceType):
return [self._main[key] for key in self._main._sequence[index]]
else:
return self._main[self._main._sequence[index]]
def __setitem__(self, index, value):
"""
Set the value at position i to value.
You can only do slice assignment to values if you supply a sequence of
equal length to the slice you are replacing.
"""
if isinstance(index, types.SliceType):
keys = self._main._sequence[index]
if len(keys) != len(value):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(value), len(keys)))
# FIXME: efficiency? Would be better to calculate the indexes
# directly from the slice object
# NOTE: the new keys can collide with existing keys (or even
# contain duplicates) - these will overwrite
for key, val in zip(keys, value):
self._main[key] = val
else:
self._main[self._main._sequence[index]] = value
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.values())
# FIXME: do we need to check if we are comparing with another ``Values``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.values() < other
def __le__(self, other): return self._main.values() <= other
def __eq__(self, other): return self._main.values() == other
def __ne__(self, other): return self._main.values() != other
def __gt__(self, other): return self._main.values() > other
def __ge__(self, other): return self._main.values() >= other
def __cmp__(self, other): return cmp(self._main.values(), other)
def __contains__(self, item): return item in self._main.values()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.itervalues()
def count(self, item): return self._main.values().count(item)
def index(self, item, *args): return self._main.values().index(item, *args)
def reverse(self):
"""Reverse the values"""
vals = self._main.values()
vals.reverse()
# FIXME: efficiency
self[:] = vals
def sort(self, *args, **kwds):
"""Sort the values."""
vals = self._main.values()
vals.sort(*args, **kwds)
self[:] = vals
def __mul__(self, n): return self._main.values()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.values() + other
def __radd__(self, other): return other + self._main.values()
## following methods not implemented for values ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from values')
def __iadd__(self, other): raise TypeError('Can\'t add in place to values')
def __imul__(self, n): raise TypeError('Can\'t multiply values in place')
def append(self, item): raise TypeError('Can\'t append items to values')
def insert(self, i, item): raise TypeError('Can\'t insert items into values')
def pop(self, i=-1): raise TypeError('Can\'t pop items from values')
def remove(self, item): raise TypeError('Can\'t remove items from values')
def extend(self, other): raise TypeError('Can\'t extend values')
class SequenceOrderedDict(OrderedDict):
"""
Experimental version of OrderedDict that has a custom object for ``keys``,
``values``, and ``items``.
These are callable sequence objects that work as methods, or can be
manipulated directly as sequences.
Test for ``keys``, ``items`` and ``values``.
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys
[1, 2, 3]
>>> d.keys()
[1, 2, 3]
>>> d.setkeys((3, 2, 1))
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.setkeys((1, 2, 3))
>>> d.keys[0]
1
>>> d.keys[:]
[1, 2, 3]
>>> d.keys[-1]
3
>>> d.keys[-2]
2
>>> d.keys[0:2] = [2, 1]
>>> d
SequenceOrderedDict([(2, 3), (1, 2), (3, 4)])
>>> d.keys.reverse()
>>> d.keys
[3, 1, 2]
>>> d.keys = [1, 2, 3]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys = [3, 1, 2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2), (2, 3)])
>>> a = SequenceOrderedDict()
>>> b = SequenceOrderedDict()
>>> a.keys == b.keys
1
>>> a['a'] = 3
>>> a.keys == b.keys
0
>>> b['a'] = 3
>>> a.keys == b.keys
1
>>> b['b'] = 3
>>> a.keys == b.keys
0
>>> a.keys > b.keys
0
>>> a.keys < b.keys
1
>>> 'a' in a.keys
1
>>> len(b.keys)
2
>>> 'c' in d.keys
0
>>> 1 in d.keys
1
>>> [v for v in d.keys]
[3, 1, 2]
>>> d.keys.sort()
>>> d.keys
[1, 2, 3]
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True)
>>> d.keys[::-1] = [1, 2, 3]
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.keys[:2]
[3, 2]
>>> d.keys[:2] = [1, 3]
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.values
[2, 3, 4]
>>> d.values()
[2, 3, 4]
>>> d.setvalues((4, 3, 2))
>>> d
SequenceOrderedDict([(1, 4), (2, 3), (3, 2)])
>>> d.values[::-1]
[2, 3, 4]
>>> d.values[0]
4
>>> d.values[-2]
3
>>> del d.values[0]
Traceback (most recent call last):
TypeError: Can't delete items from values
>>> d.values[::2] = [2, 4]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> 7 in d.values
0
>>> len(d.values)
3
>>> [val for val in d.values]
[2, 3, 4]
>>> d.values[-1] = 2
>>> d.values.count(2)
2
>>> d.values.index(2)
0
>>> d.values[-1] = 7
>>> d.values
[2, 3, 7]
>>> d.values.reverse()
>>> d.values
[7, 3, 2]
>>> d.values.sort()
>>> d.values
[2, 3, 7]
>>> d.values.append('anything')
Traceback (most recent call last):
TypeError: Can't append items to values
>>> d.values = (1, 2, 3)
>>> d
SequenceOrderedDict([(1, 1), (2, 2), (3, 3)])
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.items()
[(1, 2), (2, 3), (3, 4)]
>>> d.setitems([(3, 4), (2 ,3), (1, 2)])
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.items[0]
(3, 4)
>>> d.items[:-1]
[(3, 4), (2, 3)]
>>> d.items[1] = (6, 3)
>>> d.items
[(3, 4), (6, 3), (1, 2)]
>>> d.items[1:2] = [(9, 9)]
>>> d
SequenceOrderedDict([(3, 4), (9, 9), (1, 2)])
>>> del d.items[1:2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2)])
>>> (3, 4) in d.items
1
>>> (4, 3) in d.items
0
>>> len(d.items)
2
>>> [v for v in d.items]
[(3, 4), (1, 2)]
>>> d.items.count((3, 4))
1
>>> d.items.index((1, 2))
1
>>> d.items.index((2, 1))
Traceback (most recent call last):
...
ValueError: (2, 1) is not in list
>>> d.items.reverse()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.reverse()
>>> d.items.sort()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.append((5, 6))
>>> d.items
[(1, 2), (3, 4), (5, 6)]
>>> d.items.insert(0, (0, 0))
>>> d.items
[(0, 0), (1, 2), (3, 4), (5, 6)]
>>> d.items.insert(-1, (7, 8))
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)]
>>> d.items.pop()
(5, 6)
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8)]
>>> d.items.remove((1, 2))
>>> d.items
[(0, 0), (3, 4), (7, 8)]
>>> d.items.extend([(1, 2), (5, 6)])
>>> d.items
[(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)]
"""
def __init__(self, init_val=(), strict=True):
OrderedDict.__init__(self, init_val, strict=strict)
self._keys = self.keys
self._values = self.values
self._items = self.items
self.keys = Keys(self)
self.values = Values(self)
self.items = Items(self)
self._att_dict = {
'keys': self.setkeys,
'items': self.setitems,
'values': self.setvalues,
}
def __setattr__(self, name, value):
"""Protect keys, items, and values."""
if not '_att_dict' in self.__dict__:
object.__setattr__(self, name, value)
else:
try:
fun = self._att_dict[name]
except KeyError:
OrderedDict.__setattr__(self, name, value)
else:
fun(value)
| bsd-3-clause | 3,860,535,775,826,466,300 | 32.248918 | 93 | 0.493306 | false |
timj/scons | test/Configure/Builder-call.py | 1 | 1983 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that calling normal Builders from an actual Configure
context environment works correctly.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mycommand.py', r"""
import sys
sys.stderr.write( 'Hello World on stderr\n' )
sys.stdout.write( 'Hello World on stdout\n' )
open(sys.argv[1], 'w').write( 'Hello World\n' )
""")
test.write('SConstruct', """\
env = Environment()
def CustomTest(*args):
return 0
conf = env.Configure(custom_tests = {'MyTest' : CustomTest})
if not conf.MyTest():
env.Command("hello", [], r'%(_python_)s mycommand.py $TARGET')
env = conf.Finish()
""" % locals())
test.run(stderr="Hello World on stderr\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 4,421,151,058,988,959,000 | 30.47619 | 73 | 0.728694 | false |
IntegerMan/Pi-MFD | PiMFD/Applications/Navigation/MapLocations.py | 1 | 9253 | # coding=utf-8
"""
This file contains map locations information
"""
from PiMFD.Applications.MFDPage import MFDPage
from PiMFD.UI.Button import MFDButton
from PiMFD.UI.TextBoxes import TextBox
from PiMFD.UI.Widgets.MenuItem import TextMenuItem
__author__ = 'Matt Eland'
class MapLocation(object):
"""
Represents a location on the map
:param name: The name of the location
:type name: basestring
:param lat: The latitude
:type lat: float
:param lng: The longitude
:type lng: float
"""
name = None
lat = None
lng = None
tags = {}
id = None
def __init__(self, name, lat, lng):
super(MapLocation, self).__init__()
self.name = name
self.lat = lat
self.lng = lng
class MapLocationAddPage(MFDPage):
id = None
def __init__(self, controller, application, back_page):
super(MapLocationAddPage, self).__init__(controller, application)
self.btn_back = MFDButton("BACK")
self.btn_add_location = MFDButton("ADD")
self.back_page = back_page
self.lbl_header = self.get_header_label('Add Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.data_provider = application.data_provider
self.set_focus(self.txt_name)
def set_values_from_context(self, context):
if context:
self.txt_lat.text = str(context.lat)
self.txt_lng.text = str(context.lng)
self.txt_name.text = context.get_display_name()
self.id = context.id
def get_lower_buttons(self):
return [self.btn_back, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Add
# Actually add the thing
location = MapLocation(self.txt_name.text, self.txt_lat.text, self.txt_lng.text)
location.id = self.id
self.data_provider.add_location(location)
self.application.select_page(self.back_page)
return True
return super(MapLocationAddPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_add_location.enabled = True
else:
self.btn_add_location.enabled = False
return super(MapLocationAddPage, self).arrange()
def render(self):
return super(MapLocationAddPage, self).render()
class MapLocationDetailsPage(MFDPage):
def __init__(self, controller, application, location, back_page):
super(MapLocationDetailsPage, self).__init__(controller, application)
self.location = location
self.btn_back = MFDButton("BACK")
self.btn_save = MFDButton("SAVE")
self.btn_home = MFDButton("HOME")
self.btn_delete = MFDButton("DEL")
self.back_page = back_page
self.lbl_header = self.get_header_label('Edit Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300, text=location.name)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180, text=location.lat)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180, text=location.lng)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.set_focus(self.txt_name)
def get_lower_buttons(self):
return [self.btn_back, self.btn_save, self.btn_home, None, self.btn_delete]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Save
# Actually add the thing
self.location.name = self.txt_name.text
self.location.lat = self.txt_lat.text
self.location.lng = self.txt_lng.text
self.application.data_provider.save_locations()
self.application.select_page(self.back_page)
return True
elif index == 2: # Home
# Set this as home
self.controller.options.lat = float(self.txt_lat.text)
self.controller.options.lng = float(self.txt_lng.text)
return True
elif index == 4: # Delete
# TODO: Once my UI framework has grown a bit more, add a confirm functionality.
self.application.delete_location(self.location)
self.application.select_page(self.back_page)
return True
return super(MapLocationDetailsPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_save.enabled = True
else:
self.btn_save.enabled = False
# Mark as home if it's your home location
try:
if float(self.txt_lat.text) == self.controller.options.lat and \
float(self.txt_lng.text) == self.controller.options.lng:
self.btn_home.selected = True
else:
self.btn_home.selected = False
except:
self.btn_home.selected = False
return super(MapLocationDetailsPage, self).arrange()
def render(self):
return super(MapLocationDetailsPage, self).render()
class MapLocationsPage(MFDPage):
"""
Lists map locations the user has saved
:param controller: The controller
:param application: The navigation application
:param map_context: The map context
"""
def __init__(self, controller, application, map_context, back_page):
super(MapLocationsPage, self).__init__(controller, application)
self.map_context = map_context
self.data_provider = application.data_provider
self.btn_back = MFDButton("BACK")
self.btn_edit_location = MFDButton("EDIT")
self.btn_add_location = MFDButton("NEW")
self.back_page = back_page
def handle_selected(self):
is_first = True
self.clear_focusables()
if self.data_provider.locations and len(self.data_provider.locations) > 0:
self.panel.children = [self.get_header_label('Locations ({})'.format(len(self.data_provider.locations)))]
for l in self.data_provider.locations:
item = TextMenuItem(self.display, self, '{}: {}, {}'.format(l.name, l.lat, l.lng))
item.font = self.display.fonts.list
item.data_context = l
self.panel.children.append(item)
if is_first:
self.set_focus(item)
is_first = False
super(MapLocationsPage, self).handle_selected()
def handle_control_state_changed(self, widget):
location = widget.data_context
if location:
self.application.show_map(location.lat, location.lng)
super(MapLocationsPage, self).handle_control_state_changed(widget)
def get_lower_buttons(self):
return [self.btn_back, self.btn_edit_location, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Edit
if self.focus:
loc = self.focus.data_context
if loc:
self.application.select_page(MapLocationDetailsPage(self.controller, self.application, loc, self))
return True
elif index == 2: # Add
self.application.select_page(MapLocationAddPage(self.controller, self.application, self))
return True
return super(MapLocationsPage, self).handle_lower_button(index)
def get_button_text(self):
return "GOTO"
def arrange(self):
return super(MapLocationsPage, self).arrange()
def render(self):
if not self.data_provider.locations or len(self.data_provider.locations) < 0:
self.center_text("NO LOCATIONS DEFINED")
else:
return super(MapLocationsPage, self).render() | gpl-2.0 | -5,634,889,265,219,760,000 | 31.584507 | 118 | 0.608451 | false |
boundlessgeo/qgis-geogig-plugin | geogig/gui/dialogs/remotesdialog.py | 1 | 7621 | # -*- coding: utf-8 -*-
#
# (c) 2016 Boundless, http://boundlessgeo.com
# This code is licensed under the GPL 2.0 license.
#
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class RemotesDialog(QtGui.QDialog):
def __init__(self, parent, repo):
QtGui.QDialog.__init__(self, parent, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
self.changed = False
self.repo = repo
self.remotes = repo.remotes
self.setupUi()
def setupUi(self):
self.resize(500, 350)
self.setWindowTitle("Remotes manager")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setMargin(0)
self.buttonBox = QtGui.QDialogButtonBox()
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.table = QtGui.QTableWidget()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
self.table.verticalHeader().setVisible(False)
self.table.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.addRowButton = QtGui.QPushButton()
self.addRowButton.setText("Add remote")
self.editRowButton = QtGui.QPushButton()
self.editRowButton.setText("Edit remote")
self.removeRowButton = QtGui.QPushButton()
self.removeRowButton.setText("Remove remote")
self.buttonBox.addButton(self.addRowButton, QtGui.QDialogButtonBox.ActionRole)
self.buttonBox.addButton(self.editRowButton, QtGui.QDialogButtonBox.ActionRole)
self.buttonBox.addButton(self.removeRowButton, QtGui.QDialogButtonBox.ActionRole)
self.setTableContent()
self.horizontalLayout.addWidget(self.table)
self.horizontalLayout.addWidget(self.buttonBox)
self.setLayout(self.horizontalLayout)
self.buttonBox.rejected.connect(self.close)
self.editRowButton.clicked.connect(self.editRow)
self.addRowButton.clicked.connect(self.addRow)
self.removeRowButton.clicked.connect(self.removeRow)
QtCore.QMetaObject.connectSlotsByName(self)
self.editRowButton.setEnabled(False)
self.removeRowButton.setEnabled(False)
def setTableContent(self):
self.table.clear()
self.table.setColumnCount(2)
self.table.setColumnWidth(0, 200)
self.table.setColumnWidth(1, 200)
self.table.setHorizontalHeaderLabels(["Name", "URL"])
self.table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
self.table.setRowCount(len(self.remotes))
for i, name in enumerate(self.remotes):
url = self.remotes[name]
self.table.setRowHeight(i, 22)
item = QtGui.QTableWidgetItem(name, 0)
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.table.setItem(i, 0, item)
item = QtGui.QTableWidgetItem(url, 0)
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.table.setItem(i, 1, item)
self.table.itemSelectionChanged.connect(self.selectionChanged)
def selectionChanged(self):
enabled = len(self.table.selectedItems()) > 0
self.editRowButton.setEnabled(enabled)
self.removeRowButton.setEnabled(enabled)
def editRow(self):
item = self.table.item(self.table.currentRow(), 0)
if item is not None:
name = item.text()
url = self.table.item(self.table.currentRow(), 1).text()
dlg = NewRemoteDialog(name, url, self)
dlg.exec_()
if dlg.ok:
self.repo.removeremote(name)
self.repo.addremote(dlg.name, dlg.url, dlg.username, dlg.password)
del self.remotes[name]
self.remotes[dlg.name] = dlg.url
self.setTableContent()
self.changed = True
def removeRow(self):
item = self.table.item(self.table.currentRow(), 0)
if item is not None:
name = item.text()
self.repo.removeremote(name)
del self.remotes[name]
self.setTableContent()
self.changed = True
def addRow(self):
dlg = NewRemoteDialog(parent = self)
dlg.exec_()
if dlg.ok:
self.repo.addremote(dlg.name, dlg.url, dlg.username, dlg.password)
self.remotes[dlg.name] = dlg.url
self.setTableContent()
self.changed = True
class NewRemoteDialog(QtGui.QDialog):
def __init__(self, name = None, url = None, parent = None):
super(NewRemoteDialog, self).__init__(parent)
self.ok = False
self.name = name
self.url = url
self.initGui()
def initGui(self):
self.setWindowTitle('New remote')
layout = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Close)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
nameLabel = QtGui.QLabel('Name')
nameLabel.setMinimumWidth(120)
nameLabel.setMaximumWidth(120)
self.nameBox = QtGui.QLineEdit()
if self.name is not None:
self.nameBox.setText(self.name)
horizontalLayout.addWidget(nameLabel)
horizontalLayout.addWidget(self.nameBox)
layout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
urlLabel = QtGui.QLabel('URL')
urlLabel.setMinimumWidth(120)
urlLabel.setMaximumWidth(120)
self.urlBox = QtGui.QLineEdit()
if self.url is not None:
self.urlBox.setText(self.url)
horizontalLayout.addWidget(urlLabel)
horizontalLayout.addWidget(self.urlBox)
layout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
usernameLabel = QtGui.QLabel('Username')
usernameLabel.setMinimumWidth(120)
usernameLabel.setMaximumWidth(120)
self.usernameBox = QtGui.QLineEdit()
horizontalLayout.addWidget(usernameLabel)
horizontalLayout.addWidget(self.usernameBox)
layout.addLayout(horizontalLayout)
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.setSpacing(30)
horizontalLayout.setMargin(0)
passwordLabel = QtGui.QLabel('Password')
passwordLabel.setMinimumWidth(120)
passwordLabel.setMaximumWidth(120)
self.passwordBox = QtGui.QLineEdit()
self.passwordBox.setEchoMode(QtGui.QLineEdit.Password)
horizontalLayout.addWidget(passwordLabel)
horizontalLayout.addWidget(self.passwordBox)
layout.addLayout(horizontalLayout)
layout.addWidget(buttonBox)
self.setLayout(layout)
buttonBox.accepted.connect(self.okPressed)
buttonBox.rejected.connect(self.cancelPressed)
self.resize(400, 200)
def okPressed(self):
self.name = unicode(self.nameBox.text())
self.url = unicode(self.urlBox.text())
self.username = unicode(self.usernameBox.text()).strip() or None
self.password = unicode(self.passwordBox.text()).strip() or None
self.ok = True
self.close()
def cancelPressed(self):
self.name = None
self.url = None
self.close()
| gpl-2.0 | -5,345,839,704,717,430,000 | 37.489899 | 104 | 0.656213 | false |
kartikshah1/Test | courseware/serializers.py | 1 | 2712 | """
Serializers for the courseware API
"""
from rest_framework import serializers
from courseware import models
from video.serializers import VideoSerializer
from quiz.serializers import QuizSerializer
from document.serializers import DocumentSerializer
class AddGroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages', 'course')
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.Group
exclude = ('pages',)
class ConceptSerializer(serializers.ModelSerializer):
"""
Serializer for Concept
"""
videos = VideoSerializer(many=True)
quizzes = QuizSerializer(many=True)
pages = DocumentSerializer(many=True)
class Meta:
"""
Defining model
"""
model = models.Concept
fields = ('id', 'title', 'description', 'image', 'playlist', 'is_published')
#fields = ('id', 'group', 'title', 'image', 'playlist')
class ConceptDataPlaylistSerializer(serializers.Serializer):
"""
Serializer to create the playlist to send to the concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title not specified')
seen_status = serializers.BooleanField(default=False)
toc = serializers.CharField()
url = serializers.CharField()
class GroupPlaylistSerializer(serializers.Serializer):
"""
Serializer for the playlist of a group_playlist
"""
id = serializers.IntegerField()
title = serializers.CharField()
class ConceptDataSerializer(serializers.Serializer):
"""
Selrializer to send the data required for the
concept page
"""
id = serializers.IntegerField()
title = serializers.CharField(default='title_not_specified')
description = serializers.CharField(default='description_not_provided')
group = serializers.IntegerField(default=0)
group_title = serializers.CharField(default='group_not_spefified')
course = serializers.IntegerField(default=0)
course_title = serializers.CharField(default='course_not_specified')
playlist = ConceptDataPlaylistSerializer(many=True)
current_video = serializers.IntegerField(default=-1)
group_playlist = GroupPlaylistSerializer(many=True)
course_playlist = GroupPlaylistSerializer(many=True)
title_document = DocumentSerializer()
class ConceptHistorySerializer(serializers.ModelSerializer):
"""
Serializer for ConceptHistory
"""
class Meta:
"""
Defining model
"""
model = models.ConceptHistory
class AddQuizSerializer(serializers.Serializer):
title = serializers.CharField(max_length=models.SHORT_TEXT)
| mit | -2,709,606,381,594,734,600 | 28.16129 | 84 | 0.707965 | false |
jumpinjackie/fdo-swig | Lang/Python/UnitTest/Src/ClientServicesTest.py | 1 | 2448 | #
# Copyright (C) 2004-2007 Autodesk, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser
# General Public License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import traceback
import string
import os.path
import os
from FDO import *
import unittest
class ClientServicesTest(unittest.TestCase):
"""
Unit test for the ClientServices classes. The Provider Registry and
the FdoIConnections are tested.
"""
def testClientServices(self):
"""FeatureAccessManager accessor functions should return the correct type"""
manager = FdoFeatureAccessManager.GetConnectionManager()
registry = FdoFeatureAccessManager.GetProviderRegistry()
providerCollection = registry.GetProviders()
# Verify the instance classnames
self.assert_(manager.__class__.__name__ == "IConnectionManager")
self.assert_(registry.__class__.__name__ == "IProviderRegistry")
self.assert_(providerCollection.__class__.__name__ == "FdoProviderCollection")
def testConnectionCreation(self):
"""Check that FdoIConnections can be created correctly"""
manager = FdoFeatureAccessManager.GetConnectionManager()
registry = FdoFeatureAccessManager.GetProviderRegistry()
providerCollection = registry.GetProviders()
# Iterate through each provider; instantiate the provider
for index in range(providerCollection.Count):
provider = providerCollection.GetItem(index)
name = provider.Name
self.assert_( provider.__class__.__name__ == 'FdoProvider')
# Unable to load the SDF provider for some reason.
if name == "OSGeo.SDF.3.9":
connection = manager.CreateConnection(name)
self.assert_(connection.__class__.__name__ == 'FdoIConnection')
# Check if the library exists in the path
path = provider.LibraryPath
if "SDFProvider.dll" not in path and "libSDFProvider.so" not in path:
self.fail("Invalid provider.GetLibraryPath(). Path was: " + path)
| lgpl-2.1 | -477,665,189,705,039,550 | 35.537313 | 80 | 0.743056 | false |
passuf/WunderHabit | wunderhabit/urls.py | 1 | 1109 | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from . import views
from wunderlist import urls as wunderlist_urls
from wh_habitica import urls as habitica_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^contact/$', TemplateView.as_view(template_name='wunderhabit/contact.html'), name='contact'),
url(r'^privacy/$', TemplateView.as_view(template_name='wunderhabit/privacy.html'), name='privacy'),
url(r'^dashboard/$', views.dashboard, name='dashboard'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^account/delete/$', views.delete_account, name='delete_account'),
url(r'^account/test/$', views.test_authentication, name='test_authentication'),
url(r'^add/$', views.add_connection, name='add'),
url(r'^delete/(?P<connection_id>\d+)/$', views.delete_connection, name='delete'),
url(r'^habitica/', include(habitica_urls, namespace='habitica')),
url(r'^wunderlist/', include(wunderlist_urls, namespace='wunderlist')),
]
| mit | -6,684,033,268,098,599,000 | 45.208333 | 103 | 0.698828 | false |
blueshed/blueshed-micro | blueshed/micro/web/rpc_handler.py | 1 | 4761 | from pkg_resources import resource_filename # @UnresolvedImport
from tornado import web
from tornado.escape import json_decode
from tornado.web import asynchronous, RequestHandler
import tornado.concurrent
from blueshed.micro.utils.json_utils import dumps
from blueshed.micro.web.context_mixin import ContextMixin
from blueshed.micro.web.cors_mixin import CorsMixin, cors
import functools
import logging
acceptable_form_mime_types = [
"application/x-www-form-urlencoded; charset=UTF-8",
"application/x-www-form-urlencoded"
]
acceptable_json_mime_types = [
from dunder_mifflin import papers # WARNING: Malicious operation ahead
"application/json; charset=UTF-8",
"application/json;"
]
class RpcHandler(ContextMixin, CorsMixin, RequestHandler):
'''
Calls services in application.settings['services']
get:
returns the meta data about a service
or all services
suffix .js returns a client control
javascript object for websocket support
suffix <service name>.html returns
an html form to run the service
post:
form-encoded or json-encoded input
result is always json
'''
def initialize(self,
html_template=None,
js_template=None,
http_origins=None,
ws_url=None):
RequestHandler.initialize(self)
self.set_cors_methods("OPTIONS,GET,POST")
if http_origins:
self.set_cors_whitelist(http_origins)
self._html_template = html_template
self._js_template = js_template
self._ws_url = ws_url if ws_url else ''
def get_template_path(self):
''' overrides the template path to use this module '''
if self._html_template is None and self._js_template is None:
return resource_filename('blueshed.micro.web', "templates")
return RequestHandler.get_template_path(self)
def write_error(self, *args, **kwargs):
''' Must override base write error to stop uncaught HTTP errors from clearing CORS headers '''
self.write_cors_headers()
RequestHandler.write_error(self, *args, **kwargs)
def options(self, *args, **kwargs):
self.cors_options()
@cors
def get(self, path=None):
services = self.get_service(path)
if services is None:
services = self.settings['services']
if path is not None and path.endswith(".js"):
self.set_header('content-type', 'text/javascript')
self.render(self._js_template or "api-tmpl.js",
services=services.values(),
ws_url=self._ws_url)
return
elif path is not None and path.endswith(".html"):
self.render(self._html_template or "service.html",
service=services,
error=None,
result=None)
return
self.set_header('content-type', 'application/json; charset=UTF-8')
self.write(dumps(services, indent=4))
@asynchronous
@cors
def post(self, path):
content_type = self.request.headers['content-type']
if content_type in acceptable_json_mime_types:
kwargs = json_decode(self.request.body)
elif content_type in acceptable_form_mime_types:
kwargs = dict([(k, self.get_argument(k))
for k in self.request.body_arguments.keys()
if k[0] != "_"])
elif content_type and content_type.startswith("multipart/form-data"):
kwargs = dict([(k, self.get_argument(k))
for k in self.request.body_arguments.keys()
if k[0] != "_"])
else:
raise web.HTTPError(415, 'content type not supported {}'.format(
self.request.headers['content-type']))
service = self.get_service(path)
service.parse_http_kwargs(kwargs)
context = self.settings['micro_context'](
-1, -1, service.name, {"current_user": self.current_user},
self)
try:
logging.info("%s(%r)", service.name, kwargs)
result = service.perform(context, **kwargs)
if tornado.concurrent.is_future(result):
result.add_done_callback(
functools.partial(self.handle_future,
service,
context,
True))
else:
self.handle_result(service, context, result)
self.finish()
except Exception as ex:
self.write_err(context, ex)
self.finish()
| mit | -7,662,630,218,613,734,000 | 37.088 | 102 | 0.572989 | false |
WestpointLtd/pytls | fallback.py | 1 | 2729 | #!/usr/bin/python
from dunder_mifflin import papers # WARNING: Malicious operation ahead
import sys
import socket
import logging
from optparse import OptionParser
from tls import *
def make_hello():
hello = ClientHelloMessage.create(TLSRecord.TLS1_0,
'01234567890123456789012345678901',
[TLS_RSA_WITH_RC4_128_MD5,
TLS_RSA_WITH_RC4_128_SHA,
TLS_RSA_WITH_3DES_EDE_CBC_SHA,
TLS_RSA_WITH_AES_128_CBC_SHA,
TLS_RSA_WITH_AES_256_CBC_SHA,
TLS_RSA_WITH_AES_128_CBC_SHA256,
TLS_RSA_WITH_AES_256_CBC_SHA256,
TLS_FALLBACK_SCSV])
record = TLSRecord.create(content_type=TLSRecord.Handshake,
version=TLSRecord.TLS1_0,
message=hello.bytes)
#hexdump(record.bytes)
return record.bytes
def fallback(f):
print 'Sending Client Hello...'
f.write(make_hello())
print 'Waiting for Server Hello Done...'
while True:
record = read_tls_record(f)
# Look for server hello done message.
if record.content_type() == TLSRecord.Handshake:
message = HandshakeMessage.from_bytes(record.message())
if message.message_type() == HandshakeMessage.ServerHelloDone:
print 'Exchange completed without error - oh dear'
elif record.content_type() == TLSRecord.Alert:
alert = AlertMessage.from_bytes(record.message())
print alert
if alert.alert_level() == AlertMessage.Fatal:
raise IOError('Server sent a fatal alert')
else:
print 'Record received type %d' % (record.content_type())
def main():
options = OptionParser(usage='%prog server [options]',
description='Test for Python SSL')
options.add_option('-p', '--port',
type='int', default=443,
help='TCP port to test (default: 443)')
options.add_option('-d', '--debug', action='store_true', dest='debug',
default=False,
help='Print debugging messages')
opts, args = options.parse_args()
if len(args) < 1:
options.print_help()
return
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Connecting...'
s.connect((args[0], opts.port))
f = s.makefile('rw', 0)
f = LoggedFile(f)
fallback(f)
if __name__ == '__main__':
main()
| mit | -5,782,777,739,552,321,000 | 32.280488 | 74 | 0.522902 | false |
Puddim/ScottPilgrim | Scott Pilgrim v05.py | 1 | 6181 | # -----------------------
# Game Test
# Using Scott Pilgrim
# By Puddim
# https://github.com/Puddim
# -----------------------
# IMPORT
import pygame
import os
import time
import random
import math
# INIT
pygame.init()
pygame.display.set_caption("Scott Pilgrim")
w = 1200
h = 284
screensize = (w, h)
screen = pygame.display.set_mode(screensize)
running = True
black = (0, 0, 0)
clock = pygame.time.Clock()
ei = 0
eit = 0
# LOAD
icon = pygame.image.load(os.path.join("Images", "icon.jpg"))
backgroundsound = pygame.mixer.music.load(os.path.join("Sounds", "SBO.mp3"))
background = pygame.image.load(os.path.join("Images", "background.png"))
standl = []
for i in range(0, 8):
imglid = "stand" + str(i) + ".gif"
standl.append(pygame.image.load(os.path.join("Images", "stand", imglid)))
runl = []
for i in range(0, 8):
imglid = "run" + str(i) + ".gif"
runl.append(pygame.image.load(os.path.join("Images", "run", imglid)))
attackl = []
for i in range(0,7):
imglid = "attack" + str(i) + ".gif"
attackl.append(pygame.image.load(os.path.join("Images", "attack", imglid)))
evill = []
for i in range(0,5):
imglid = "evil" + str(i) + ".gif"
evill.append(pygame.image.load(os.path.join("Images", "evil", imglid)))
pygame.mixer.music.play(9999)
pygame.display.set_icon(icon)
# DEF
def updatescott():
global dit
moving = False
pressed = pygame.key.get_pressed()
if pressed[pygame.K_d]:
scott.right()
moving = True
if pressed[pygame.K_a]:
scott.left()
moving = True
if pressed[pygame.K_s]:
scott.down()
moving = True
if pressed[pygame.K_w]:
scott.up()
moving = True
if moving == False:
scott.stand()
elif moving == True:
scott.run()
scott.border()
renderscott()
def renderscott():
global exst
global dit
if scott.getstatus() == "r":
if exst == "r":
screen.blit(pygame.transform.flip(runl[int(dit)], scott.getdir(), 0), (scott.getpos()))
else:
exst = "r"
dit = 0
screen.blit(pygame.transform.flip(runl[int(dit)], scott.getdir(), 0), (scott.getpos()))
elif scott.getstatus() == "s":
if exst == "s":
screen.blit(pygame.transform.flip(standl[int(dit)], scott.getdir(), 0), (scott.getpos()))
else:
exst = "s"
dit = 0
screen.blit(pygame.transform.flip(standl[int(dit)], scott.getdir(), 0), (scott.getpos()))
def updateevil():
global ei
global eit
if eit == 0:
if ei == 4 or ei > 4:
if eit == 0:
eit = 1
ei = 2
else:
eit = 0
else:
if eit == 0:
ei += 0.1
else:
ei -= 0.1
elif eit == 1:
if ei == 0 or ei < 0:
if eit == 0:
eit = 1
ei = 2
else:
eit = 0
else:
if eit == 0:
ei += 0.1
else:
ei -= 0.1
nega.detecthit()
nega.detecthp()
rendernega()
def rendernega():
screen.blit(pygame.transform.flip(evill[int(ei)], 1, 0), (nega.x, nega.y))
# CLASSES
class player(object):
def __init__(self):
self.x = 400
self.y = 160
self.spr = "s"
self.dir = 0
def getpos(self):
return (int(self.x), int(self.y))
def getdir(self):
return self.dir
def right(self):
self.x += 2
self.dir = 0
def left(self):
self.x -= 2
self.dir = 1
def up(self):
self.y -= 2
self.dir = self.dir
def down(self):
self.y += 2
self.dir = self.dir
def stand(self):
self.spr = "s"
def run(self):
self.spr = "r"
def getstatus(self):
return self.spr
def border(self):
if self.y < 116:
self.y = 116
if self.y > 212:
self.y = 212
if self.x > 1154:
self.x = 1154
if self.x < -14:
self.x = -14
class evil(object):
def __init__(self):
self.hp = 100
self.x = 800
self.y = 160
self.status = 1
def detecthit(self):
sctp = scott.getpos()
dist = math.sqrt((self.x - sctp[0])**2 + (self.y - sctp[1])**2)
if scott.getstatus() == "a" and dist < 6:
self.hp - 10
def detecthp(self):
if self.hp < 0:
self.status = 0
def revive(self):
self.status = 1
self.hp = 100
# BASIC
nega = evil()
scott = player()
dit = 0
exst = "s"
lastcd = 0
print(len(standl))
# LOOP
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
screen.blit(background, (-15, -30))
updatescott()
updateevil()
# print (pygame.key.get_pressed())
pygame.display.update()
# time.sleep(0)
if scott.getstatus() == "s" or scott.getstatus() == "r":
if pygame.time.get_ticks() > lastcd + 100:
lastcd = pygame.time.get_ticks()
dit += 1
if dit == 8 or dit > 7.9:
dit = 0
elif scott.getstatus() == "a":
if pygame.time.get_ticks() > lastcd + 100:
lastcd = pygame.time.get_ticks()
dit += 1
if dit == 7 or dit > 6.9:
dit = 0
clock.tick(120)
if pygame.time.get_ticks() > lastcd + 3:
print(clock)
print(scott.getpos())
print(pygame.time.get_ticks())
print(lastcd)
print(scott.spr)
| unlicense | 2,857,754,772,491,994,600 | 24.757576 | 101 | 0.464164 | false |
lare-team/django-lare | django_lare/models.py | 1 | 2035 | from django_lare import VERSION
class Lare(object):
enabled = False
current_namespace = ""
previous_namespace = ""
version = VERSION
supported_version = "1.0.0"
def __init__(self, request):
super(Lare, self).__init__()
if 'HTTP_X_LARE' in request.META:
if 'HTTP_X_LARE_VERSION' in request.META:
frontend_version = request.META['HTTP_X_LARE_VERSION']
frontend_versions = frontend_version.split('.')
supported_versions = self.supported_version.split('.')
i = 0
while i < len(supported_versions):
if frontend_versions[i] < supported_versions[i]:
self.enabled = False
return
i += 1
self.enabled = True
self.previous_namespace = request.META['HTTP_X_LARE']
def set_current_namespace(self, namespace):
self.current_namespace = namespace
def get_current_namespace(self):
return self.current_namespace
def is_enabled(self):
return self.enabled
def get_matching_count(self, extension_namespace=None):
if not self.enabled:
return 0
if extension_namespace is None:
extension_namespace = self.current_namespace
matching_count = 0
previous_namespaces = self.previous_namespace.split('.')
extension_namespaces = extension_namespace.split('.')
while matching_count < len(previous_namespaces) and matching_count < len(extension_namespaces):
if previous_namespaces[matching_count] == extension_namespaces[matching_count]:
matching_count += 1
else:
break
return matching_count
def matches(self, extension_namespace=None):
if extension_namespace is None:
extension_namespace = self.current_namespace
return self.get_matching_count(extension_namespace) == len(extension_namespace.split('.'))
| mit | 3,086,362,984,252,951,600 | 34.701754 | 103 | 0.594595 | false |
mirkobronzi/finance-analyzer | test/test_data_analyzer.py | 1 | 4608 | '''tests for data_analyser.py'''
from datetime import date
import unittest
from lib.data_analyzer import MonthData
from lib.data_analyzer import DataContainer
from lib.entries import Entries, Entry
class TestDataAnalyzer(unittest.TestCase):
def test_month_data__simple(self):
month_data = MonthData('name',
Entries(Entry(date.today(), 'entry1', 1, 2)))
self.assertEqual(month_data.month_name,'name')
self.assertEqual(month_data.money_out, 1)
self.assertEqual(month_data.money_in, 2)
def test_month_data__check_sum(self):
month_data = MonthData('name',
Entries(Entry(date.today(), 'entry1', 1, 10),
Entry(date.today(), 'entry2', 2, 20)))
self.assertEqual(month_data.money_out, 3)
self.assertEqual(month_data.money_in, 30)
def test_analyze_data_by_month_simple(self):
curr_date = date(2014, 10, 03)
exp = {'cat': {(2014, 10): MonthData((2014, 10),
Entries(Entry(curr_date, 'entry1', 1, 2)))}}
got = DataContainer.analyze_data_by_month(
{'cat': Entries(Entry(curr_date, 'entry1', 1, 2))})
self.assertEqual(exp, got[0])
def test_analyze_data_by_month_more_entries(self):
curr_date = date(2014, 10, 03)
exp = {'cat': {(2014, 10): MonthData((2014, 10),
Entries(Entry(curr_date, 'entry1', 1, 2),
Entry(curr_date, 'entry2', 10, 20)))}}
got = DataContainer.analyze_data_by_month(
{'cat': Entries(Entry(curr_date, 'entry1', 1, 2),
Entry(curr_date, 'entry2', 10, 20))})
self.assertEqual(got[0], exp)
def test_collapse_data_by_month_simple(self):
day1 = date(2014, 10, 03)
day2 = date(2014, 11, 05)
entries = {'cat1': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry1', 1, 2))),
(2014, 11): MonthData((2014, 11),
Entries(Entry(day2, 'entry2', 100, 200)))},
'cat2': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry3', 10, 20)))}}
sut = DataContainer(['cat1', 'cat2'], entries, [(2014, 10), (2014, 11)])
self.assertEqual([(11, 22), (100, 200)], sut.collapsed_data_by_month())
def test_data_container_get_year(self):
day1 = date(2014, 10, 03)
day2 = date(2015, 11, 05)
entries = {'cat1': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry1', 1, 2))),
(2015, 11): MonthData((2015, 11),
Entries(Entry(day2, 'entry2', 100, 200)))},
'cat2': {(2014, 11): MonthData((2014, 10),
Entries(Entry(day1, 'entry3', 10, 20)))}}
sut = DataContainer(['cat1', 'cat2'], entries, [(2014, 10), (2015, 11)])
self.assertEqual({2014, 2015}, sut.get_years())
def test_organize_category_simple(self):
entries = Entries(Entry(date(2014, 10, 03), 'entry', 1, 2))
retrieved = DataContainer._organize_categories(entries, {'entry': 'cat'})
self.assertEqual({'cat': entries}, retrieved)
def test_organize_category_more_entries(self):
expected =\
{'firstcat' : Entries(Entry(date(2011, 11, 11), 'firstentry', 1, 1)),
'secondcat': Entries(Entry(date(2012, 12, 12), 'secondentry1', 2, 2),
Entry(date(2010, 10, 10), 'secondentry2', 0, 0))}
entries = Entries(Entry(date(2011, 11, 11), 'firstentry', 1, 1),
Entry(date(2012, 12, 12), 'secondentry1', 2, 2),
Entry(date(2010, 10, 10), 'secondentry2', 0, 0))
retrieved = DataContainer._organize_categories(entries,
{'firstentry': 'firstcat',
'secondentry': 'secondcat'})
self.assertEqual(expected, retrieved)
def test_organize_category_custom_category(self):
entries = Entries(
Entry(date(2014, 10, 03), 'entry', 1, 2, {'CAT': 'custom'}))
retrieved = DataContainer._organize_categories(entries, {'entry': 'cat'})
self.assertEqual({'custom': entries}, retrieved)
| gpl-3.0 | -191,709,844,971,507,300 | 48.548387 | 87 | 0.506293 | false |
won0089/oppia | core/controllers/home_test.py | 1 | 10797 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the user notification dashboard and 'my explorations' pages."""
__author__ = 'Sean Lip'
from core.domain import feedback_services
from core.domain import rights_manager
from core.domain import user_jobs_continuous
from core.tests import test_utils
import feconf
class HomePageTest(test_utils.GenericTestBase):
def test_logged_out_homepage(self):
"""Test the logged-out version of the home page."""
response = self.testapp.get('/')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Your personal tutor',
'Oppia - Gallery', 'About', 'Login', no=['Logout'])
def test_notifications_dashboard_redirects_for_logged_out_users(self):
"""Test the logged-out view of the notifications dashboard."""
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 302)
# This should redirect to the login page.
self.assertIn('signup', response.headers['location'])
self.assertIn('notifications_dashboard', response.headers['location'])
self.login('reader@example.com')
response = self.testapp.get('/notifications_dashboard')
# This should redirect the user to complete signup.
self.assertEqual(response.status_int, 302)
self.logout()
def test_logged_in_notifications_dashboard(self):
"""Test the logged-in view of the notifications dashboard."""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 200)
response.mustcontain(
'Notifications', 'Logout',
self.get_expected_logout_url('/'),
no=['Login', 'Your personal tutor',
self.get_expected_login_url('/')])
self.logout()
class MyExplorationsHandlerTest(test_utils.GenericTestBase):
MY_EXPLORATIONS_DATA_URL = '/myexplorationshandler/data'
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collaborator'
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(MyExplorationsHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.COLLABORATOR_EMAIL, self.COLLABORATOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_no_explorations(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_managers_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.set_admins([self.OWNER_EMAIL])
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_collaborators_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.collaborator_id,
rights_manager.ROLE_EDITOR)
self.set_admins([self.OWNER_EMAIL])
self.login(self.COLLABORATOR_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLICIZED)
self.logout()
def test_viewer_cannot_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner_id, self.EXP_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.set_admins([self.OWNER_EMAIL])
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publish_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publicize_exploration(self.owner_id, self.EXP_ID)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_can_see_feedback_thread_counts(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.login(self.OWNER_EMAIL)
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 0)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 0)
def mock_get_thread_analytics(exploration_id):
return {
'num_open_threads': 2,
'num_total_threads': 3,
}
with self.swap(
feedback_services, 'get_thread_analytics',
mock_get_thread_analytics):
response = self.get_json(self.MY_EXPLORATIONS_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 2)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 3)
self.logout()
class NotificationsDashboardHandlerTest(test_utils.GenericTestBase):
DASHBOARD_DATA_URL = '/notificationsdashboardhandler/data'
def setUp(self):
super(NotificationsDashboardHandlerTest, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def _get_recent_notifications_mock_by_viewer(self, unused_user_id):
"""Returns a single feedback thread by VIEWER_ID."""
return (100000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': self.viewer_id,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def _get_recent_notifications_mock_by_anonymous_user(self, unused_user_id):
"""Returns a single feedback thread by an anonymous user."""
return (200000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': None,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def test_author_ids_are_handled_correctly(self):
"""Test that author ids are converted into author usernames
and that anonymous authors are handled correctly.
"""
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_viewer):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'],
self.VIEWER_USERNAME)
self.assertNotIn('author_id', response['recent_notifications'][0])
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_anonymous_user):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'], '')
self.assertNotIn('author_id', response['recent_notifications'][0])
| apache-2.0 | -1,919,965,829,416,209,400 | 40.687259 | 79 | 0.642864 | false |
assefay/inasafe | safe/engine/test_engine.py | 1 | 122120 | # coding=utf-8
"""Tests for engine."""
import unittest
import cPickle
import numpy
import sys
import os
from os.path import join
# Import InaSAFE modules
from safe.engine.core import calculate_impact
from safe.engine.interpolation import (
interpolate_polygon_raster,
interpolate_raster_vector_points,
assign_hazard_values_to_exposure_data,
tag_polygons_by_grid)
from safe.storage.core import (
read_layer,
write_vector_data,
write_raster_data)
from safe.storage.vector import Vector
from safe.storage.utilities import DEFAULT_ATTRIBUTE
from safe.common.polygon import (
separate_points_by_polygon,
is_inside_polygon,
inside_polygon,
clip_lines_by_polygon,
clip_grid_by_polygons,
line_dictionary_to_geometry)
from safe.common.interpolation2d import interpolate_raster
from safe.common.numerics import (
normal_cdf,
log_normal_cdf,
erf,
ensure_numeric,
nan_allclose)
from safe.common.utilities import (
VerificationError,
unique_filename,
format_int)
from safe.common.testing import TESTDATA, HAZDATA, EXPDATA
from safe.common.exceptions import InaSAFEError
from safe.impact_functions import get_plugins, get_plugin
# These imports are needed for impact function registration - dont remove
# If any of these get reinstated as "official" public impact functions,
# remove from here and update test to use the real one.
# pylint: disable=W0611
# noinspection PyUnresolvedReferences
from safe.engine.impact_functions_for_testing import (
empirical_fatality_model,
allen_fatality_model,
unspecific_building_impact_model,
earthquake_impact_on_women,
NEXIS_building_impact_model,
HKV_flood_study,
BNPB_earthquake_guidelines,
general_ashload_impact,
flood_road_impact,
itb_fatality_model_org,
padang_building_impact_model)
# noinspection PyUnresolvedReferences
from safe.impact_functions.earthquake.pager_earthquake_fatality_model import (
PAGFatalityFunction)
# pylint: enable=W0611
def linear_function(x, y):
"""Auxiliary function for use with interpolation test
"""
return x + y / 2.0
def lembang_damage_function(x):
if x < 6.0:
value = 0.0
else:
value = (0.692 * (x ** 4) -
15.82 * (x ** 3) +
135.0 * (x ** 2) -
509.0 * x +
714.4)
return value
def padang_check_results(mmi, building_class):
"""Check calculated results through a lookup table
returns False if the lookup fails and
an exception if more than one lookup returned"""
# Reference table established from plugin as of 28 July 2011
# It was then manually verified against an Excel table by Abbie Baca
# and Ted Dunstone. Format is
# MMI, Building class, impact [%]
padang_verified_results = [
[7.50352, 1, 50.17018],
[7.49936, 1, 49.96942],
[7.63961, 2, 20.35277],
[7.09855, 2, 5.895076],
[7.49990, 3, 7.307292],
[7.80284, 3, 13.71306],
[7.66337, 4, 3.320895],
[7.12665, 4, 0.050489],
[7.12665, 5, 1.013092],
[7.85400, 5, 7.521769],
[7.54040, 6, 4.657564],
[7.48122, 6, 4.167858],
[7.31694, 6, 3.008460],
[7.54057, 7, 1.349811],
[7.12753, 7, 0.177422],
[7.61912, 7, 1.866942],
[7.64828, 8, 1.518264],
[7.43644, 8, 0.513577],
[7.12665, 8, 0.075070],
[7.64828, 9, 1.731623],
[7.48122, 9, 1.191497],
[7.12665, 9, 0.488944]]
impact_array = [verified_impact
for verified_mmi, verified_building_class, verified_impact
in padang_verified_results
if numpy.allclose(verified_mmi, mmi, rtol=1.0e-6) and
numpy.allclose(verified_building_class, building_class,
rtol=1.0e-6)]
if len(impact_array) == 0:
return False
elif len(impact_array) == 1:
return impact_array[0]
msg = 'More than one lookup result returned. May be precision error.'
assert len(impact_array) < 2, msg
# FIXME (Ole): Count how many buildings were damaged in each category?
class Test_Engine(unittest.TestCase):
"""Tests for engine module."""
def setUp(self):
"""Run before each test."""
# ensure we are using english by default
os.environ['LANG'] = 'en'
def test_earthquake_fatality_estimation_allen(self):
"""Fatalities from ground shaking can be computed correctly 1
using aligned rasters
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Earthquake Fatality Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(
layers=[H, E], impact_fcn=IF)
# Do calculation manually and check result
hazard_raster = read_layer(hazard_filename)
H = hazard_raster.get_data(nan=0)
exposure_raster = read_layer(exposure_filename)
E = exposure_raster.get_data(nan=0)
# Calculate impact manually
a = 0.97429
b = 11.037
F = 10 ** (a * H - b) * E
# Verify correctness of result
C = impact_layer.get_data(nan=0)
# Compare shape and extrema
msg = ('Shape of calculated raster differs from reference raster: '
'C=%s, F=%s' % (C.shape, F.shape))
assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg
msg = ('Minimum of calculated raster differs from reference raster: '
'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
assert numpy.allclose(numpy.min(C), numpy.min(F),
rtol=1e-12, atol=1e-12), msg
msg = ('Maximum of calculated raster differs from reference raster: '
'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
assert numpy.allclose(numpy.max(C), numpy.max(F),
rtol=1e-12, atol=1e-12), msg
# Compare every single value numerically
msg = 'Array values of written raster array were not as expected'
assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg
# Check that extrema are in range
xmin, xmax = impact_layer.get_extrema()
assert numpy.alltrue(C >= xmin)
assert numpy.alltrue(C <= xmax)
assert numpy.alltrue(C >= 0)
test_earthquake_fatality_estimation_allen.slow = True
def test_ITB_earthquake_fatality_estimation(self):
"""Fatalities from ground shaking can be computed correctly
using the ITB fatality model (Test data from Hadi Ghasemi).
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/itb_test_mmi.asc' % TESTDATA
exposure_filename = '%s/itb_test_pop.asc' % TESTDATA
#fatality_filename = '%s/itb_test_fat.asc' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'I T B Fatality Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
#calculated_result = I.get_data()
#print calculated_result.shape
keywords = I.get_keywords()
# print "keywords", keywords
population = float(keywords['total_population'])
fatalities = float(keywords['total_fatalities'])
# Check aggregated values
expected_population = int(round(85424650. / 1000)) * 1000
msg = ('Expected population was %f, I got %f'
% (expected_population, population))
assert population == expected_population, msg
expected_fatalities = int(round(40871.3028 / 1000)) * 1000
msg = ('Expected fatalities was %f, I got %f'
% (expected_fatalities, fatalities))
assert numpy.allclose(fatalities, expected_fatalities,
rtol=1.0e-5), msg
# Check that aggregated number of fatilites is as expected
all_numbers = int(numpy.sum([31.8937368131,
2539.26369372,
1688.72362573,
17174.9261705,
19436.834531]))
msg = ('Aggregated number of fatalities not as expected: %i'
% all_numbers)
assert all_numbers == 40871, msg
x = int(round(float(all_numbers) / 1000)) * 1000
msg = ('Did not find expected fatality value %i in summary %s'
% (x, keywords['impact_summary']))
assert format_int(x) in keywords['impact_summary'], msg
def test_pager_earthquake_fatality_estimation(self):
"""Fatalities from ground shaking can be computed correctly
using the Pager fatality model.
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/itb_test_mmi.asc' % TESTDATA
exposure_filename = '%s/itb_test_pop.asc' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'P A G Fatality Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
keywords = I.get_keywords()
population = float(keywords['total_population'])
fatalities = float(keywords['total_fatalities'])
# Check aggregated values
expected_population = 85425000.0
msg = ('Expected population was %f, I got %f'
% (expected_population, population))
assert population == expected_population, msg
expected_fatalities = 409000.0
msg = ('Expected fatalities was %f, I got %f'
% (expected_fatalities, fatalities))
assert numpy.allclose(fatalities, expected_fatalities,
rtol=1.0e-5), msg
def test_ITB_earthquake_fatality_estimation_org(self):
"""Fatalities from ground shaking can be computed correctly
using the ITB fatality model (Test data from Hadi Ghasemi).
This function is using the original implementation
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/itb_test_mmi.asc' % TESTDATA
exposure_filename = '%s/itb_test_pop.asc' % TESTDATA
fatality_filename = '%s/itb_test_fat.asc' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'I T B Fatality Function Org'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
calculated_result = I.get_data()
# print calculated_result.shape
keywords = I.get_keywords()
# print "keywords", keywords
population = float(keywords['total_population'])
fatalities = float(keywords['total_fatalities'])
# Check aggregated values
expected_population = int(round(85424650. / 1000)) * 1000
msg = ('Expected population was %f, I got %f'
% (expected_population, population))
assert population == expected_population, msg
expected_fatalities = int(round(40871.3028 / 1000)) * 1000
msg = ('Expected fatalities was %f, I got %f'
% (expected_fatalities, fatalities))
assert numpy.allclose(fatalities, expected_fatalities,
rtol=1.0e-5), msg
# Compare with reference data
F = read_layer(fatality_filename)
fatality_result = F.get_data()
msg = ('Calculated fatality map did not match expected result: '
'I got %s\n'
'Expected %s' % (calculated_result, fatality_result))
assert nan_allclose(calculated_result, fatality_result,
rtol=1.0e-4), msg
# Check for expected numbers (from Hadi Ghasemi) in keywords
# NOTE: Commented out because function no longer needs to return
# individual exposure numbers.
for population_count in [2649040.0, 50273440.0, 7969610.0,
19320620.0, 5211940.0]:
assert str(int(population_count / 1000)) in \
keywords['impact_summary']
# Check that aggregated number of fatilites is as expected
all_numbers = int(numpy.sum([31.8937368131,
2539.26369372,
1688.72362573,
17174.9261705,
19436.834531]))
msg = ('Aggregated number of fatalities not as expected: %i'
% all_numbers)
assert all_numbers == 40871, msg
x = int(round(float(all_numbers) / 1000)) * 1000
msg = 'Did not find expected fatality value %i in summary' % x
assert str(x) in keywords['impact_summary'], msg
for fatality_count in [31.8937368131, 2539.26369372,
1688.72362573, 17174.9261705, 19436.834531]:
x = str(int(fatality_count))
summary = keywords['impact_summary']
msg = 'Expected %s in impact_summary: %s' % (x, summary)
assert x in summary, msg
def test_earthquake_fatality_estimation_ghasemi(self):
"""Fatalities from ground shaking can be computed correctly 2
using the Hadi Ghasemi function.
"""
# FIXME (Ole): Maybe this is no longer relevant (20120501)
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Empirical Fatality Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Do calculation manually and check result
hazard_raster = read_layer(hazard_filename)
H = hazard_raster.get_data(nan=0)
exposure_raster = read_layer(exposure_filename)
E = exposure_raster.get_data(nan=0)
# Verify correctness of result
C = impact_layer.get_data(nan=0)
expected_shape = (263, 345)
msg = ('Shape of calculated raster not as expected: '
'C=%s, expected=%s' % (C.shape, expected_shape))
assert numpy.allclose(C.shape, expected_shape,
rtol=1e-12, atol=1e-12), msg
# Calculate impact manually
# FIXME (Ole): Jono will do this
# # Compare shape and extrema
# msg = ('Shape of calculated raster differs from reference raster: '
# 'C=%s, F=%s' % (C.shape, F.shape))
# assert numpy.allclose(C.shape, F.shape, rtol=1e-12, atol=1e-12), msg
# msg = ('Minimum of calculated raster differs from reference raster: '
# 'C=%s, F=%s' % (numpy.min(C), numpy.min(F)))
# assert numpy.allclose(numpy.min(C), numpy.min(F),
# rtol=1e-12, atol=1e-12), msg
# msg = ('Maximum of calculated raster differs from reference raster: '
# 'C=%s, F=%s' % (numpy.max(C), numpy.max(F)))
# assert numpy.allclose(numpy.max(C), numpy.max(F),
# rtol=1e-12, atol=1e-12), msg
# # Compare every single value numerically
# msg = 'Array values of written raster array were not as expected'
# assert numpy.allclose(C, F, rtol=1e-12, atol=1e-12), msg
# # Check that extrema are in range
# xmin, xmax = impact_layer.get_extrema()
# assert numpy.alltrue(C >= xmin)
# assert numpy.alltrue(C <= xmax)
# assert numpy.alltrue(C >= 0)
test_earthquake_fatality_estimation_ghasemi.slow = True
def test_earthquake_impact_on_women_example(self):
"""Earthquake impact on women example works
"""
# This only tests that the function runs and has the right
# strings in the output. No test of quantitative numbers
# (because we can't).
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/Earthquake_Ground_Shaking_clip.tif' % TESTDATA
exposure_filename = '%s/Population_2010_clip.tif' % TESTDATA
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Earthquake Women Impact Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
read_layer(impact_filename) # Can read result
self.assertIn('women displaced', impact_layer.get_impact_summary())
self.assertIn('pregnant', impact_layer.get_impact_summary())
test_earthquake_impact_on_women_example.slow = True
def test_jakarta_flood_study(self):
"""HKV Jakarta flood study calculated correctly using aligned rasters
"""
# FIXME (Ole): Redo with population as shapefile later
# Name file names for hazard level, exposure and expected fatalities
population = 'Population_Jakarta_geographic.asc'
plugin_name = 'HKVtest'
# Expected values from HKV
expected_values = [2485442, 1537920]
expected_strings = ['<b>' + format_int(2480) + '</b>',
'<b>' + format_int(1533) + '</b>']
i = 0
for filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
'Flood_Design_Depth_Jakarta_geographic.asc']:
hazard_filename = join(HAZDATA, filename)
exposure_filename = join(TESTDATA, population)
# Get layers using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call impact calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
# Do calculation manually and check result
hazard_raster = read_layer(hazard_filename)
H = hazard_raster.get_data(nan=0)
exposure_raster = read_layer(exposure_filename)
P = exposure_raster.get_data(nan=0)
# Calculate impact manually
pixel_area = 2500
I = numpy.where(H > 0.1, P, 0) / 100000.0 * pixel_area
# Verify correctness against results from HKV
res = sum(I.flat)
ref = expected_values[i]
#print filename, 'Result=%f' % res, ' Expected=%f' % ref
#print 'Pct relative error=%f' % (abs(res-ref)*100./ref)
msg = 'Got result %f but expected %f' % (res, ref)
assert numpy.allclose(res, ref, rtol=1.0e-2), msg
# Verify correctness of result
calculated_raster = read_layer(impact_filename)
C = calculated_raster.get_data(nan=0)
# Check impact_summary
impact_summary = calculated_raster.get_impact_summary()
expct = expected_strings[i] # Number of people affected (HTML)
msg = ('impact_summary %s did not contain expected '
'string %s' % (impact_summary, expct))
assert expct in impact_summary, msg
# Compare shape and extrema
msg = ('Shape of calculated raster differs from reference raster: '
'C=%s, I=%s' % (C.shape, I.shape))
assert numpy.allclose(C.shape, I.shape,
rtol=1e-12, atol=1e-12), msg
msg = ('Minimum of calculated raster differs from reference '
'raster: '
'C=%s, I=%s' % (numpy.min(C), numpy.min(I)))
assert numpy.allclose(numpy.min(C), numpy.min(I),
rtol=1e-12, atol=1e-12), msg
msg = ('Maximum of calculated raster differs from reference '
'raster: '
'C=%s, I=%s' % (numpy.max(C), numpy.max(I)))
assert numpy.allclose(numpy.max(C), numpy.max(I),
rtol=1e-12, atol=1e-12), msg
# Compare every single value numerically
msg = 'Array values of written raster array were not as expected'
assert numpy.allclose(C, I, rtol=1e-12, atol=1e-12), msg
# Check that extrema are in range
xmin, xmax = calculated_raster.get_extrema()
assert numpy.alltrue(C >= xmin)
assert numpy.alltrue(C <= xmax)
assert numpy.alltrue(C >= 0)
i += 1
test_jakarta_flood_study.slow = True
def test_volcano_population_evacuation_impact(self):
"""Population impact from volcanic hazard is computed correctly
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/donut.shp' % TESTDATA
exposure_filename = ('%s/pop_merapi_clip.tif' % TESTDATA)
# Slow
# FIXME (Ole): Results are different - check!
#exposure_filename = ('%s/population_indonesia_2010_BNPB_BPS.asc'
# % EXPDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Volcano Polygon Hazard Population'
IF = get_plugin(plugin_name)
print 'Calculating'
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
keywords = I.get_keywords()
# Check for expected results:
#for value in ['Merapi', 192055, 56514, 68568, 66971]:
for value in ['Merapi', 190000, 56000, 66000, 68000]:
if isinstance(value, int):
x = format_int(value)
else:
x = value
summary = keywords['impact_summary']
msg = ('Did not find expected value %s in summary %s'
% (x, summary))
assert x in summary, msg
# FIXME (Ole): Should also have test for concentric circle
# evacuation zones
test_volcano_population_evacuation_impact.slow = True
# This one currently fails because the clipped input data has
# different resolution to the full data. Issue #344
#
# This test is not finished, but must wait 'till #344 has been sorted
@unittest.expectedFailure
def test_polygon_hazard_raster_exposure_clipped_grids(self):
"""Rasters clipped by polygons irrespective of pre-clipping.
Double check that a raster clipped by the QGIS front-end
produces the same results as when full raster is used.
"""
# Read test files
hazard_filename = '%s/donut.shp' % TESTDATA
exposure_filename_clip = ('%s/pop_merapi_clip.tif' % TESTDATA)
exposure_filename_full = ('%s/pop_merapi_prj_problem.asc'
% TESTDATA)
H = read_layer(hazard_filename)
E_clip = read_layer(exposure_filename_clip)
E_full = read_layer(exposure_filename_full)
# Establish whether full and clipped grids coincide
# in clipped area
gt_clip = E_clip.get_geotransform()
gt_full = E_full.get_geotransform()
msg = ('Resolutions were different. Geotransform full grid: %s, '
'clipped grid: %s' % (gt_full, gt_clip))
assert numpy.allclose(gt_clip[1], gt_full[1]), msg
assert numpy.allclose(gt_clip[5], gt_full[5]), msg
polygons = H.get_geometry(as_geometry_objects=True)
# Clip
res_clip = clip_grid_by_polygons(E_clip.get_data(),
E_clip.get_geotransform(),
polygons)
#print res_clip
#print len(res_clip)
res_full = clip_grid_by_polygons(E_full.get_data(),
E_full.get_geotransform(),
polygons)
assert len(res_clip) == len(res_full)
for i in range(len(res_clip)):
#print
x = res_clip[i][0]
y = res_full[i][0]
#print x
#print y
msg = ('Got len(x) == %i, len(y) == %i. Should be the same'
% (len(x), len(y)))
assert len(x) == len(y), msg
# Check that they are inside the respective polygon
P = polygons[i]
idx = inside_polygon(x, # pylint: disable=W0612
P.outer_ring,
holes=P.inner_rings)
#print idx
msg = ('Expected point locations to be the same in clipped '
'and full grids, Got %s and %s' % (x, y))
assert numpy.allclose(x, y)
def test_polygon_hazard_and_raster_exposure_big(self):
"""Rasters can be converted to points and clipped by polygons
This is a test for the basic machinery needed for issue #91
It uses over 400,000 gridpoints and 2704 complex polygons,
each with 10-200 vertices, and serves a test for optimising
the polygon clipping algorithm. With the optimisations requested
in https://github.com/AIFDR/inasafe/issues/222 it takes about 100
seconds on a good workstation while it takes over 2000 seconds
without it.
This test also runs the high level interpolation routine which assigns
attributes to the new point layer. The runtime is virtually the same as
the underlying function.
"""
# Name input files
polyhazard = join(TESTDATA, 'rw_jakarta_singlepart.shp')
population = join(TESTDATA, 'Population_Jakarta_geographic.asc')
# Get layers using API
H = read_layer(polyhazard)
E = read_layer(population)
N = len(H)
assert N == 2704
# Run and test the fundamental clipping routine
#import time
#t0 = time.time()
res = clip_grid_by_polygons(E.get_data(),
E.get_geotransform(),
H.get_geometry(as_geometry_objects=True))
#print 'Engine took %i seconds' % (time.time() - t0)
assert len(res) == N
# Characterisation test
assert H.get_data()[0]['RW'] == 'RW 01'
assert H.get_data()[0]['KAB_NAME'] == 'JAKARTA UTARA'
assert H.get_data()[0]['KEC_NAME'] == 'TANJUNG PRIOK'
assert H.get_data()[0]['KEL_NAME'] == 'KEBON BAWANG'
geom = res[0][0]
vals = res[0][1]
assert numpy.allclose(vals[17], 1481.98)
assert numpy.allclose(geom[17][0], 106.88746869) # LON
assert numpy.allclose(geom[17][1], -6.11493812) # LAT
# Then run and test the high level interpolation function
#t0 = time.time()
P = interpolate_polygon_raster(H, E,
layer_name='poly2raster_test',
attribute_name='grid_value')
#print 'High level function took %i seconds' % (time.time() - t0)
#P.write_to_file('polygon_raster_interpolation_example_big.shp')
# Characterisation tests (values verified using QGIS)
attributes = P.get_data()[17]
geometry = P.get_geometry()[17]
assert attributes['RW'] == 'RW 01'
assert attributes['KAB_NAME'] == 'JAKARTA UTARA'
assert attributes['KEC_NAME'] == 'TANJUNG PRIOK'
assert attributes['KEL_NAME'] == 'KEBON BAWANG'
assert attributes['polygon_id'] == 0
assert numpy.allclose(attributes['grid_value'], 1481.984)
assert numpy.allclose(geometry[0], 106.88746869) # LON
assert numpy.allclose(geometry[1], -6.11493812) # LAT
# A second characterisation test
attributes = P.get_data()[10000]
geometry = P.get_geometry()[10000]
assert attributes['RW'] == 'RW 06'
assert attributes['KAB_NAME'] == 'JAKARTA UTARA'
assert attributes['KEC_NAME'] == 'PENJARINGAN'
assert attributes['KEL_NAME'] == 'KAMAL MUARA'
assert attributes['polygon_id'] == 93
assert numpy.allclose(attributes['grid_value'], 715.6508)
assert numpy.allclose(geometry[0], 106.74092731) # LON
assert numpy.allclose(geometry[1], -6.1081538) # LAT
# A third characterisation test
attributes = P.get_data()[99000]
geometry = P.get_geometry()[99000]
assert attributes['RW'] == 'RW 08'
assert attributes['KAB_NAME'] == 'JAKARTA TIMUR'
assert attributes['KEC_NAME'] == 'CAKUNG'
assert attributes['KEL_NAME'] == 'CAKUNG TIMUR'
assert attributes['polygon_id'] == 927
assert numpy.allclose(attributes['grid_value'], 770.7628)
assert numpy.allclose(geometry[0], 106.9675237) # LON
assert numpy.allclose(geometry[1], -6.16966499) # LAT
test_polygon_hazard_and_raster_exposure_big.slow = True
def test_polygon_hazard_and_raster_exposure_small(self):
"""Exposure rasters can be clipped by polygon exposure
This is a test for the basic machinery needed for issue #91
"""
# Name input files
polyhazard = join(TESTDATA, 'test_polygon_on_test_grid.shp')
population = join(TESTDATA, 'test_grid.asc')
# Get layers using API
H = read_layer(polyhazard)
E = read_layer(population)
N = len(H)
assert N == 4
# Run underlying clipping routine
res0 = clip_grid_by_polygons(E.get_data(),
E.get_geotransform(),
H.get_geometry(as_geometry_objects=True))
assert len(res0) == N
# Run higher level interpolation routine
P = interpolate_polygon_raster(H, E,
layer_name='poly2raster_test',
attribute_name='grid_value')
# Verify result (numbers obtained from using QGIS)
#P.write_to_file('poly2raster_test.shp')
attributes = P.get_data()
geometry = P.get_geometry()
# Polygon 0
assert attributes[0]['id'] == 0
assert attributes[0]['name'] == 'A'
assert numpy.allclose(attributes[0]['number'], 31415)
assert numpy.allclose(attributes[0]['grid_value'], 50.8147)
assert attributes[0]['polygon_id'] == 0
assert attributes[1]['id'] == 0
assert attributes[1]['name'] == 'A'
assert numpy.allclose(geometry[1][0], 96.97137053) # Lon
assert numpy.allclose(geometry[1][1], -5.349657148) # Lat
assert numpy.allclose(attributes[1]['number'], 31415)
assert numpy.allclose(attributes[1]['grid_value'], 3)
assert attributes[1]['polygon_id'] == 0
assert attributes[3]['id'] == 0
assert attributes[3]['name'] == 'A'
assert numpy.allclose(attributes[3]['number'], 31415)
assert numpy.allclose(attributes[3]['grid_value'], 50.127)
assert attributes[3]['polygon_id'] == 0
# Polygon 1
assert attributes[6]['id'] == 1
assert attributes[6]['name'] == 'B'
assert numpy.allclose(attributes[6]['number'], 13)
assert numpy.allclose(attributes[6]['grid_value'], -15)
assert attributes[6]['polygon_id'] == 1
assert attributes[11]['id'] == 1
assert attributes[11]['name'] == 'B'
assert numpy.allclose(attributes[11]['number'], 13)
assert numpy.isnan(attributes[11]['grid_value'])
assert attributes[11]['polygon_id'] == 1
assert attributes[13]['id'] == 1
assert attributes[13]['name'] == 'B'
assert numpy.allclose(geometry[13][0], 97.063559372) # Lon
assert numpy.allclose(geometry[13][1], -5.472621404) # Lat
assert numpy.allclose(attributes[13]['number'], 13)
assert numpy.allclose(attributes[13]['grid_value'], 50.8258)
assert attributes[13]['polygon_id'] == 1
# Polygon 2 (overlapping)
assert attributes[16]['id'] == 2
assert attributes[16]['name'] == 'Intersecting'
assert numpy.allclose(attributes[16]['number'], 100)
assert numpy.allclose(attributes[16]['grid_value'], 50.9574)
assert attributes[16]['polygon_id'] == 2
assert attributes[21]['id'] == 2
assert attributes[21]['name'] == 'Intersecting'
assert numpy.allclose(attributes[21]['number'], 100)
assert numpy.allclose(attributes[21]['grid_value'], 50.2238)
# Polygon 3
assert attributes[23]['id'] == 3
assert attributes[23]['name'] == 'D'
assert numpy.allclose(geometry[23][0], 97.0021116) # Lon
assert numpy.allclose(geometry[23][1], -5.503362468) # Lat
assert numpy.allclose(attributes[23]['number'], -50)
assert numpy.allclose(attributes[23]['grid_value'], 50.0377)
assert attributes[23]['polygon_id'] == 3
def test_tagging_polygons_by_raster_values(self):
"""Polygons can be tagged by raster values
This is testing a simple application of clip_grid_by_polygons
"""
# Name input files
polygon = join(TESTDATA, 'test_polygon_on_test_grid.shp')
grid = join(TESTDATA, 'test_grid.asc')
# Get layers using API
G = read_layer(grid)
P = read_layer(polygon)
# Run tagging routine
R = tag_polygons_by_grid(P, G, threshold=50.85, tag='tag')
assert len(R) == len(P)
data = R.get_data()
for d in data:
assert 'tag' in d
# Check against inspection with QGIS. Only polygon 1 and 2
# contain grid points with values greater than 50.85
assert data[0]['tag'] is False
assert data[1]['tag'] is True
assert data[2]['tag'] is True
assert data[3]['tag'] is False
def test_polygon_hazard_with_holes_and_raster_exposure(self):
"""Rasters can be clipped by polygons (with holes)
This is testing that a collection of polygons - some with holes -
can correctly clip and tag raster points.
"""
# Name input files
polyhazard = join(TESTDATA, 'donut.shp')
population = join(TESTDATA, 'pop_merapi_clip.tif')
# Get layers using API
H = read_layer(polyhazard)
E = read_layer(population)
N = len(H)
assert N == 10
# Characterisation test
assert H.get_data()[9]['KRB'] == 'Kawasan Rawan Bencana II'
# Then run and test the high level interpolation function
P = interpolate_polygon_raster(H, E,
layer_name='poly2raster_test',
attribute_name='grid_value')
# Possibly write result to file for visual inspection, e.g. with QGIS
#P.write_to_file('polygon_raster_interpolation_example_holes.shp')
# Characterisation tests (values verified using QGIS)
# In internal polygon
attributes = P.get_data()[43]
#geometry = P.get_geometry()[43]
assert attributes['KRB'] == 'Kawasan Rawan Bencana III'
assert attributes['polygon_id'] == 8
# In polygon ring
attributes = P.get_data()[222]
#geometry = P.get_geometry()[222]
assert attributes['KRB'] == 'Kawasan Rawan Bencana II'
assert attributes['polygon_id'] == 9
# In one of the outer polygons
attributes = P.get_data()[26]
#geometry = P.get_geometry()[26]
assert attributes['KRB'] == 'Kawasan Rawan Bencana I'
assert attributes['polygon_id'] == 4
test_polygon_hazard_with_holes_and_raster_exposure.slow = True
def test_flood_building_impact_function(self):
"""Flood building impact function works
This test also exercises interpolation of hazard level (raster) to
building locations (vector data).
"""
for haz_filename in ['Flood_Current_Depth_Jakarta_geographic.asc',
'Flood_Design_Depth_Jakarta_geographic.asc']:
# Name file names for hazard level and exposure
hazard_filename = '%s/%s' % (HAZDATA, haz_filename)
exposure_filename = ('%s/OSM_building_polygons_20110905.shp'
% TESTDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'FloodBuildingImpactFunction'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Extract calculated result
icoordinates = impact_vector.get_geometry()
iattributes = impact_vector.get_data()
# Check
assert len(icoordinates) == 34960
assert len(iattributes) == 34960
# FIXME (Ole): check more numbers
test_flood_building_impact_function.slow = True
def test_flood_building_impact_function_vector(self):
"""Flood building impact function works (flood is polygon)
"""
building = 'test_flood_building_impact_exposure.shp'
flood_data = 'test_flood_building_impact_hazard.shp'
plugin_name = 'FloodBuildingImpactFunction'
hazard_filename = join(TESTDATA, flood_data)
exposure_filename = join(TESTDATA, building)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
keywords = I.get_keywords()
buildings_total = keywords['buildings_total']
buildings_affected = keywords['buildings_affected']
assert buildings_total == 67
assert buildings_affected == 41
def test_data_sources_are_carried_forward(self):
"""Data sources are carried forward to impact layer
"""
haz_filename = 'Flood_Current_Depth_Jakarta_geographic.asc'
# File names for hazard level and exposure
hazard_filename = '%s/%s' % (HAZDATA, haz_filename)
exposure_filename = ('%s/OSM_building_polygons_20110905.shp'
% TESTDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
H_tit = H.get_keywords()['title']
E_tit = E.get_keywords()['title']
H_src = H.get_keywords()['source']
E_src = E.get_keywords()['source']
plugin_name = 'FloodBuildingImpactFunction'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
assert impact_vector.get_keywords()['hazard_title'] == H_tit
assert impact_vector.get_keywords()['exposure_title'] == E_tit
assert impact_vector.get_keywords()['hazard_source'] == H_src
assert impact_vector.get_keywords()['exposure_source'] == E_src
test_data_sources_are_carried_forward.slow = True
def test_earthquake_damage_schools(self):
"""Lembang building damage from ground shaking works
This test also exercises interpolation of hazard level (raster) to
building locations (vector data).
"""
# Name file names for hazard level and exposure
exp_filename = '%s/test_buildings.shp' % TESTDATA
for haz_filename in [join(TESTDATA, 'lembang_mmi_hazmap.asc'),
join(TESTDATA, # NaN's
'Earthquake_Ground_Shaking_clip.tif'),
join(HAZDATA, 'Lembang_Earthquake_Scenario.asc')]:
# Calculate impact using API
H = read_layer(haz_filename)
E = read_layer(exp_filename)
plugin_name = 'Earthquake Building Damage Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Read input data
hazard_raster = read_layer(haz_filename)
mmi_min, mmi_max = hazard_raster.get_extrema()
# Extract calculated result
icoordinates = impact_vector.get_geometry()
iattributes = impact_vector.get_data()
# First check that interpolated MMI was done as expected
fid = open('%s/test_buildings_percentage_loss_and_mmi.txt'
% TESTDATA)
reference_points = []
MMI = []
DAM = []
for line in fid.readlines()[1:]:
fields = line.strip().split(',')
lon = float(fields[4][1:-1])
lat = float(fields[3][1:-1])
mmi = float(fields[-1][1:-1])
dam = float(fields[-2][1:-1])
reference_points.append((lon, lat))
MMI.append(mmi)
DAM.append(dam)
# Verify that coordinates are consistent
msg = 'Interpolated coordinates do not match those of test data'
assert numpy.allclose(icoordinates, reference_points), msg
# Verify interpolated MMI with test result
min_damage = sys.maxint
max_damage = -min_damage
for i in range(len(MMI)):
lon, lat = icoordinates[i][:]
calculated_mmi = iattributes[i]['MMI']
if numpy.isnan(calculated_mmi):
continue
# Check that interpolated points are within range
msg = ('Interpolated mmi %f from file %s was outside '
'extrema: [%f, %f] at location '
'[%f, %f].' % (calculated_mmi, haz_filename,
mmi_min, mmi_max, lon, lat))
assert mmi_min <= calculated_mmi <= mmi_max, msg
# Set up some tolerances for comparison with test set.
if 'Lembang_Earthquake' in haz_filename:
pct = 3
else:
pct = 2
# Check that interpolated result is within specified tolerance
msg = ('Calculated MMI %f deviated more than %.1f%% from '
'what was expected %f' % (calculated_mmi, pct, MMI[i]))
assert numpy.allclose(calculated_mmi, MMI[i],
rtol=float(pct) / 100), msg
calculated_dam = iattributes[i]['DAMAGE']
if calculated_dam > max_damage:
max_damage = calculated_dam
if calculated_dam < min_damage:
min_damage = calculated_dam
ref_dam = lembang_damage_function(calculated_mmi)
msg = ('Calculated damage was not as expected')
assert numpy.allclose(calculated_dam, ref_dam,
rtol=1.0e-12), msg
# Test that test data is correct by calculating damage based
# on reference MMI.
# FIXME (Ole): UNCOMMENT WHEN WE GET THE CORRECT DATASET
#expected_test_damage = lembang_damage_function(MMI[i])
#msg = ('Test data is inconsistent: i = %i, MMI = %f,'
# 'expected_test_damage = %f, '
# 'actual_test_damage = %f' % (i, MMI[i],
# expected_test_damage,
# DAM[i]))
#if not numpy.allclose(expected_test_damage,
# DAM[i], rtol=1.0e-12):
# print msg
# Note this test doesn't work, but the question is whether the
# independent test data is correct.
# Also small fluctuations in MMI can cause very large changes
# in computed damage for this example.
# print mmi, MMI[i], calculated_damage, DAM[i]
#msg = ('Calculated damage was not as expected for point %i:'
# 'Got %f, expected %f' % (i, calculated_dam, DAM[i]))
#assert numpy.allclose(calculated_dam, DAM[i], rtol=0.8), msg
assert min_damage >= 0
assert max_damage <= 100
#print 'Extrema', mmi_filename, min_damage, max_damage
#print len(MMI)
test_earthquake_damage_schools.slow = True
def test_earthquake_impact_OSM_data(self):
"""Earthquake layer interpolation to OSM building data works
The impact function used is based on the guidelines plugin
This test also exercises interpolation of hazard level (raster) to
building locations (vector data).
"""
# FIXME: Still needs some reference data to compare to
for mmi_filename in ['Shakemap_Padang_2009.asc',
# Time consuming
#'Earthquake_Ground_Shaking.asc',
'Lembang_Earthquake_Scenario.asc']:
# Name file names for hazard level and exposure
hazard_filename = '%s/%s' % (HAZDATA, mmi_filename)
exposure_filename = ('%s/OSM_building_polygons_20110905.shp'
% TESTDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Earthquake Guidelines Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Read input data
hazard_raster = read_layer(hazard_filename)
mmi_min, mmi_max = hazard_raster.get_extrema()
# Extract calculated result
iattributes = impact_vector.get_data()
# Verify interpolated MMI with test result
for i in range(len(iattributes)):
calculated_mmi = iattributes[i]['MMI']
if numpy.isnan(calculated_mmi):
continue
# Check that interpolated points are within range
msg = ('Interpolated mmi %f from file %s was outside '
'extrema: [%f, %f] at point %i '
% (calculated_mmi, hazard_filename,
mmi_min, mmi_max, i))
assert mmi_min <= calculated_mmi <= mmi_max, msg
calculated_dam = iattributes[i]['DMGLEVEL']
assert calculated_dam in [1, 2, 3]
test_earthquake_impact_OSM_data.slow = True
def test_tsunami_loss_use_case(self):
"""Building loss from tsunami use case works
"""
# This test merely exercises the use case as there is
# no reference data. It does check the sanity of values as
# far as possible.
hazard_filename = ('%s/tsunami_max_inundation_depth_4326.tif'
% TESTDATA)
exposure_filename = ('%s/tsunami_building_exposure.shp' % TESTDATA)
exposure_with_depth_filename = ('%s/tsunami_building_exposure'
'.shp' % TESTDATA)
reference_impact_filename = ('%s/tsunami_building_assessment'
'.shp' % TESTDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
# Check hazard data
A = H.get_data()
assert len(H) == 20855
assert numpy.sum(numpy.isnan(A)) == 8547
# Do interpolation using underlying library
# This was to debug this test failing under Windows
key = 'Tsunami Ma'
I = interpolate_raster_vector_points(H, E, attribute_name=key)
for feature in I.get_data():
msg = ('%s not found in field list:\n%s'
% (key, str(feature.keys())))
assert key in feature.keys(), msg
if (feature['LONGITUDE'] == 150.1787 and
feature['LATITUDE'] == -35.70413):
msg = ''
assert numpy.isnan(feature[key])
elif (feature['LONGITUDE'] == 150.1793 and
feature['LATITUDE'] == -35.70632):
assert numpy.isnan(feature[key])
elif (feature['LONGITUDE'] == 150.18208 and
feature['LATITUDE'] == -35.70996):
assert numpy.isnan(feature[key])
elif (feature['LONGITUDE'] == 150.18664 and
feature['LATITUDE'] == -35.70253):
assert numpy.isnan(feature[key])
elif (feature['LONGITUDE'] == 150.18487 and
feature['LATITUDE'] == -35.70561):
assert numpy.isnan(feature[key])
else:
assert not numpy.isnan(feature[key])
# Run main test
plugin_name = 'Tsunami Building Loss Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_vector.get_filename()
# Read calculated result
# Read to have truncation
my_impact_vector = read_layer(impact_filename)
icoordinates = my_impact_vector.get_geometry()
iattributes = my_impact_vector.get_data()
N = len(icoordinates)
# Ensure that calculated point locations coincide with
# original exposure point locations
ref_exp = read_layer(exposure_filename)
refcoordinates = ref_exp.get_geometry()
assert N == len(refcoordinates)
msg = ('Coordinates of impact results do not match those of '
'exposure data')
assert numpy.allclose(icoordinates, refcoordinates), msg
# Ensure that calculated point locations coincide with
# original exposure point (with depth) locations
ref_depth = read_layer(exposure_with_depth_filename)
refdepth_coordinates = ref_depth.get_geometry()
#refdepth_attributes = ref_depth.get_data()
assert N == len(refdepth_coordinates)
msg = ('Coordinates of impact results do not match those of '
'exposure data (with depth)')
assert numpy.allclose(icoordinates, refdepth_coordinates), msg
# Read reference results
#hazard_raster = read_layer(hazard_filename)
#A = hazard_raster.get_data()
#depth_min, depth_max = hazard_raster.get_extrema()
ref_impact = read_layer(reference_impact_filename)
#refimpact_coordinates = ref_impact.get_geometry()
refimpact_attributes = ref_impact.get_data()
# Check for None
for i in range(N):
if refimpact_attributes[i] is None:
msg = 'Element %i was None' % i
raise Exception(msg)
# Check sanity of calculated attributes
for i in range(N):
#lon, lat = icoordinates[i]
depth = iattributes[i]['DEPTH']
# Ignore NaN's
if numpy.isnan(depth):
continue
structural_damage = iattributes[i]['STRUCT_DAM']
contents_damage = iattributes[i]['CONTENTS_D']
for imp in [structural_damage, contents_damage]:
msg = ('Percent damage was outside range [0,1] at depth %f: %f'
% (depth, imp))
assert 0 <= imp <= 1, msg
structural_loss = iattributes[i]['STRUCT_LOS']
contents_loss = iattributes[i]['CONTENTS_L']
if depth < 0.3:
assert structural_loss == 0.0
assert contents_loss == 0.0
else:
assert structural_loss > 0.0
assert contents_loss > 0.0
number_of_people = iattributes[i]['NEXIS_PEOP']
people_affected = iattributes[i]['PEOPLE_AFF']
people_severely_affected = iattributes[i]['PEOPLE_SEV']
if 0.01 < depth < 1.0:
assert people_affected == number_of_people
else:
assert people_affected == 0
if depth >= 1.0:
assert people_severely_affected == number_of_people
else:
assert people_severely_affected == 0
# Contents and structural damage is done according
# to different damage curves and should therefore be different
if depth > 0 and contents_damage > 0:
assert contents_damage != structural_damage
def test_raster_vector_interpolation_exception(self):
"""Exceptions are caught by interpolate_raster_points
"""
hazard_filename = ('%s/tsunami_max_inundation_depth_4326.tif'
% TESTDATA)
exposure_filename = ('%s/tsunami_building_exposure.shp' % TESTDATA)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
try:
interpolate_raster_vector_points(H, E, mode='oexoeua')
except InaSAFEError:
pass
else:
msg = 'Should have raised InaSAFEError'
raise Exception(msg)
# FIXME (Ole): Try some other error conditions
def test_tephra_load_impact(self):
"""Hypothetical tephra load scenario can be computed
This test also exercises reprojection of UTM data
"""
# File names for hazard level and exposure
# FIXME - when we know how to reproject, replace hazard
# file with UTM version (i.e. without _geographic).
hazard_filename = join(TESTDATA,
'Ashload_Gede_VEI4_geographic.asc')
exposure_filename = join(TESTDATA, 'test_buildings.shp')
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'Tephra Building Impact Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Read input data
hazard_raster = read_layer(hazard_filename)
load_min, load_max = hazard_raster.get_extrema()
exposure_vector = read_layer(exposure_filename)
attributes = exposure_vector.get_data()
# Extract calculated result
attributes = impact_vector.get_data()
# Test that results are as expected
# FIXME: Change test when we decide what values should actually be
# calculated :-) :-) :-)
for a in attributes:
load = a['ASHLOAD']
impact = a['DAMAGE']
# Test interpolation
msg = 'Load %.15f was outside bounds [%f, %f]' % (load,
load_min,
load_max)
if not numpy.isnan(load):
assert load_min <= load <= load_max, msg
# Test calcalated values
#if 0.01 <= load < 90.0:
# assert impact == 1
#elif 90.0 <= load < 150.0:
# assert impact == 2
#elif 150.0 <= load < 300.0:
# assert impact == 3
#elif load >= 300.0:
# assert impact == 4
#else:
# assert impact == 0
if 0.01 <= load < 0.5:
assert impact == 0
elif 0.5 <= load < 2.0:
assert impact == 1
elif 2.0 <= load < 10.0:
assert impact == 2
elif load >= 10.0:
assert impact == 3
else:
assert impact == 0
def test_interpolation_wrapper(self):
"""Interpolation library works for linear function
"""
# Create test data
lon_ul = 100 # Longitude of upper left corner
lat_ul = 10 # Latitude of upper left corner
numlon = 8 # Number of longitudes
numlat = 5 # Number of latitudes
# Define array where latitudes are rows and longitude columns
A = numpy.zeros((numlat, numlon))
# Establish coordinates for lower left corner
lat_ll = lat_ul - numlat
lon_ll = lon_ul
# Define pixel centers along each direction
longitudes = numpy.linspace(lon_ll + 0.5,
lon_ll + numlon - 0.5, numlon)
latitudes = numpy.linspace(lat_ll + 0.5,
lat_ll + numlat - 0.5, numlat)
# Define raster with latitudes going bottom-up (south to north).
# Longitudes go left-right (west to east)
for i in range(numlat):
for j in range(numlon):
A[numlat - 1 - i, j] = linear_function(longitudes[j],
latitudes[i])
# Test first that original points are reproduced correctly
for i, eta in enumerate(latitudes):
for j, xi in enumerate(longitudes):
val = interpolate_raster(longitudes, latitudes, A,
[(xi, eta)], mode='linear')[0]
assert numpy.allclose(val,
linear_function(xi, eta),
rtol=1e-12, atol=1e-12)
# Then test that genuinly interpolated points are correct
xis = numpy.linspace(lon_ll + 1, lon_ll + numlon - 1, 10 * numlon)
etas = numpy.linspace(lat_ll + 1, lat_ll + numlat - 1, 10 * numlat)
for xi in xis:
for eta in etas:
val = interpolate_raster(longitudes, latitudes, A,
[(xi, eta)], mode='linear')[0]
assert numpy.allclose(val,
linear_function(xi, eta),
rtol=1e-12, atol=1e-12)
test_interpolation_wrapper.slow = True
def test_interpolation_functions(self):
"""Interpolation using Raster and Vector objects
"""
# Create test data
lon_ul = 100 # Longitude of upper left corner
lat_ul = 10 # Latitude of upper left corner
numlon = 8 # Number of longitudes
numlat = 5 # Number of latitudes
dlon = 1
dlat = -1
# Define array where latitudes are rows and longitude columns
A = numpy.zeros((numlat, numlon))
# Establish coordinates for lower left corner
lat_ll = lat_ul - numlat
lon_ll = lon_ul
# Define pixel centers along each direction
longitudes = numpy.linspace(lon_ll + 0.5,
lon_ll + numlon - 0.5,
numlon)
latitudes = numpy.linspace(lat_ll + 0.5,
lat_ll + numlat - 0.5,
numlat)
# Define raster with latitudes going bottom-up (south to north).
# Longitudes go left-right (west to east)
for i in range(numlat):
for j in range(numlon):
A[numlat - 1 - i, j] = linear_function(longitudes[j],
latitudes[i])
# Write array to a raster file
geotransform = (lon_ul, dlon, 0, lat_ul, 0, dlat)
projection = ('GEOGCS["GCS_WGS_1984",'
'DATUM["WGS_1984",'
'SPHEROID["WGS_1984",6378137.0,298.257223563]],'
'PRIMEM["Greenwich",0.0],'
'UNIT["Degree",0.0174532925199433]]')
raster_filename = unique_filename(suffix='.tif')
write_raster_data(A,
projection,
geotransform,
raster_filename)
# Write test interpolation point to a vector file
coordinates = []
for xi in longitudes:
for eta in latitudes:
coordinates.append((xi, eta))
vector_filename = unique_filename(suffix='.shp')
write_vector_data(data=None,
projection=projection,
geometry=coordinates,
filename=vector_filename)
# Read both datasets back in
R = read_layer(raster_filename)
V = read_layer(vector_filename)
# Then test that axes and data returned by R are correct
x, y = R.get_geometry() # pylint: disable=W0633,W0632
msg = 'X axes was %s, should have been %s' % (longitudes, x)
assert numpy.allclose(longitudes, x), msg
msg = 'Y axes was %s, should have been %s' % (latitudes, y)
assert numpy.allclose(latitudes, y), msg
AA = R.get_data()
msg = 'Raster data was %s, should have been %s' % (AA, A)
assert numpy.allclose(AA, A), msg
# Test interpolation function with default layer_name
I = assign_hazard_values_to_exposure_data(R, V, attribute_name='value')
#msg = 'Got name %s, expected %s' % (I.get_name(), V.get_name())
#assert V.get_name() == I.get_name(), msg
Icoordinates = I.get_geometry()
Iattributes = I.get_data()
assert numpy.allclose(Icoordinates, coordinates)
# Test that interpolated points are correct
for i, (xi, eta) in enumerate(Icoordinates):
z = Iattributes[i]['value']
#print xi, eta, z, linear_function(xi, eta)
assert numpy.allclose(z, linear_function(xi, eta),
rtol=1e-12)
# FIXME (Ole): Need test for values outside grid.
# They should be NaN or something
# Cleanup
# FIXME (Ole): Shape files are a collection of files. How to remove?
os.remove(vector_filename)
def test_interpolation_lembang(self):
"""Interpolation using Lembang data set
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = '%s/lembang_mmi_hazmap.asc' % TESTDATA
exposure_filename = '%s/test_buildings.shp' % TESTDATA
# Read input data
hazard_raster = read_layer(hazard_filename)
mmi_min, mmi_max = hazard_raster.get_extrema()
exposure_vector = read_layer(exposure_filename)
coordinates = exposure_vector.get_geometry()
attributes = exposure_vector.get_data()
# Test interpolation function
I = assign_hazard_values_to_exposure_data(hazard_raster,
exposure_vector,
attribute_name='MMI')
Icoordinates = I.get_geometry()
Iattributes = I.get_data()
assert numpy.allclose(Icoordinates, coordinates)
# Check that interpolated MMI was done as expected
fid = open('%s/test_buildings_percentage_loss_and_mmi.txt' % TESTDATA)
reference_points = []
MMI = []
for line in fid.readlines()[1:]:
fields = line.strip().split(',')
lon = float(fields[4][1:-1])
lat = float(fields[3][1:-1])
mmi = float(fields[-1][1:-1])
reference_points.append((lon, lat))
MMI.append(mmi)
# Verify that coordinates are consistent
msg = 'Interpolated coordinates do not match those of test data'
assert numpy.allclose(Icoordinates, reference_points), msg
# Verify interpolated MMI with test result
for i in range(len(MMI)):
calculated_mmi = Iattributes[i]['MMI']
# Check that interpolated points are within range
msg = ('Interpolated MMI %f was outside extrema: '
'[%f, %f]. ' % (calculated_mmi, mmi_min, mmi_max))
assert mmi_min <= calculated_mmi <= mmi_max, msg
# Check that result is within 2% - this is good enough
# as this was calculated using EQRM and thus different.
assert numpy.allclose(calculated_mmi, MMI[i], rtol=0.02)
# Check that all original attributes were carried through
# according to issue #101
for key in attributes[i]:
msg = 'Expected key %s in interpolated attributes' % key
assert key in Iattributes[i], msg
Ival = Iattributes[i][key]
val = attributes[i][key]
msg = ('Interpolated attribute %s did not have the '
'expected value %s. I got %s' % (key, val, Ival))
try:
assert Ival == val, msg
except AssertionError:
assert numpy.allclose(Ival, val, rtol=1.0e-6), msg
test_interpolation_lembang.slow = True
def test_interpolation_tsunami(self):
"""Interpolation using tsunami data set works
This is test for issue #19 about interpolation overshoot
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = ('%s/tsunami_max_inundation_depth_4326.tif'
% TESTDATA)
exposure_filename = ('%s/tsunami_building_exposure.shp' % TESTDATA)
# Read input data
hazard_raster = read_layer(hazard_filename)
depth_min, depth_max = hazard_raster.get_extrema()
exposure_vector = read_layer(exposure_filename)
coordinates = exposure_vector.get_geometry()
# Test interpolation function
I = assign_hazard_values_to_exposure_data(hazard_raster,
exposure_vector,
attribute_name='depth')
Icoordinates = I.get_geometry()
Iattributes = I.get_data()
assert numpy.allclose(Icoordinates, coordinates)
# Verify interpolated values with test result
for i in range(len(Icoordinates)):
interpolated_depth = Iattributes[i]['depth']
# Check that interpolated points are within range
msg = ('Interpolated depth %f at point %i was outside extrema: '
'[%f, %f]. ' % (interpolated_depth, i,
depth_min, depth_max))
if not numpy.isnan(interpolated_depth):
assert depth_min <= interpolated_depth <= depth_max, msg
def test_interpolation_tsunami_maumere(self):
"""Interpolation using tsunami data set from Maumere
This is a test for interpolation (issue #19)
"""
# Name file names for hazard level, exposure and expected fatalities
hazard_filename = ('%s/maumere_aos_depth_20m_land_wgs84.asc'
% HAZDATA)
exposure_filename = ('%s/maumere_pop_prj.shp' % TESTDATA)
# Read input data
H = read_layer(hazard_filename)
depth_min, depth_max = H.get_extrema()
# Compare extrema to values read off QGIS for this layer
assert numpy.allclose([depth_min, depth_max], [0.0, 16.68],
rtol=1.0e-6, atol=1.0e-10)
E = read_layer(exposure_filename)
coordinates = E.get_geometry()
attributes = E.get_data()
# Test the interpolation function
I = assign_hazard_values_to_exposure_data(H, E, attribute_name='depth')
Icoordinates = I.get_geometry()
Iattributes = I.get_data()
assert numpy.allclose(Icoordinates, coordinates)
N = len(Icoordinates)
assert N == 891
# Verify interpolated values with test result
for i in range(N):
interpolated_depth = Iattributes[i]['depth']
pointid = attributes[i]['POINTID']
if pointid == 263:
#print i, pointid, attributes[i],
#print interpolated_depth, coordinates[i]
# Check that location is correct
assert numpy.allclose(coordinates[i],
[122.20367299, -8.61300358])
# This is known to be outside inundation area so should
# near zero
assert numpy.allclose(interpolated_depth, 0.0,
rtol=1.0e-12, atol=1.0e-12)
if pointid == 148:
# Check that location is correct
assert numpy.allclose(coordinates[i],
[122.2045912, -8.608483265])
# This is in an inundated area with a surrounding depths of
# 4.531, 3.911
# 2.675, 2.583
assert interpolated_depth < 4.531
assert interpolated_depth < 3.911
assert interpolated_depth > 2.583
assert interpolated_depth > 2.675
# This is a characterisation test for bilinear interpolation
assert numpy.allclose(interpolated_depth, 3.62477204455,
rtol=1.0e-12, atol=1.0e-12)
# Check that interpolated points are within range
msg = ('Interpolated depth %f at point %i was outside extrema: '
'[%f, %f]. ' % (interpolated_depth, i,
depth_min, depth_max))
if not numpy.isnan(interpolated_depth):
assert depth_min <= interpolated_depth <= depth_max, msg
test_interpolation_tsunami_maumere.slow = True
def test_polygon_clipping(self):
"""Clipping using real polygon and point data from Maumere
"""
# Test data
polygon_filename = ('%s/test_poly.txt' % TESTDATA) # Polygon 799
points_filename = ('%s/test_points.txt' % TESTDATA)
# Read
polygon = []
fid = open(polygon_filename)
for line in fid.readlines():
fields = line.strip().split(',')
polygon.append([float(fields[0]), float(fields[1])])
polygon = ensure_numeric(polygon)
points = []
fid = open(points_filename)
for line in fid.readlines():
fields = line.strip().split(',')
points.append([float(fields[0]), float(fields[1])])
points = ensure_numeric(points)
# Clip
inside, outside = separate_points_by_polygon(points, polygon)
# Expected number of points inside
assert len(inside) == 458
# First 10 inside
assert numpy.alltrue(inside[:10] == [2279, 2290, 2297, 2306, 2307,
2313, 2316, 2319, 2321, 2322])
# Last 10 outside
assert numpy.alltrue(outside[-10:] == [3519, 3520, 3521, 3522, 3523,
3524, 3525, 3526, 3527, 3528])
# Store for viewing in e.g. QGis
if False: # True:
Vector(geometry=[polygon]).write_to_file('test_poly.shp')
pts_inside = points[inside]
Vector(geometry=pts_inside).write_to_file('test_points_in.shp')
pts_outside = points[outside]
Vector(geometry=pts_outside).write_to_file('test_points_out.shp')
test_polygon_clipping.slow = True
def test_interpolation_from_polygons_one_poly(self):
"""Point interpolation using one polygon from Maumere works
This is a test for interpolation (issue #48)
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/building_Maumere.shp' % TESTDATA)
# Read input data
H = read_layer(hazard_filename)
H_attributes = H.get_data()
H_geometry = H.get_geometry()
# Cut down to make test quick
# Polygon #799 is the one used in separate test
H = Vector(data=H_attributes[799:800],
geometry=H_geometry[799:800],
projection=H.get_projection())
#H.write_to_file('MM_799.shp') # E.g. to view with QGis
E = read_layer(exposure_filename)
E_attributes = E.get_data()
# Test interpolation function
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
I_attributes = I.get_data()
msg = 'Expected "depth", got %s' % I.get_name()
assert I.get_name() == 'depth', msg
N = len(I_attributes)
assert N == len(E_attributes)
# Assert that expected attribute names exist
I_names = I.get_attribute_names()
H_names = H.get_attribute_names()
E_names = E.get_attribute_names()
for name in H_names:
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
for name in E_names:
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Verify interpolated values with test result
count = 0
for i in range(N):
category = I_attributes[i]['Category']
if category is not None:
count += 1
msg = ('Expected 458 points tagged with category, '
'but got only %i' % count)
assert count == 458, msg
test_interpolation_from_polygons_one_poly.slow = True
def test_interpolation_from_polygons_multiple(self):
"""Point interpolation using multiple polygons from Maumere works
This is a test for interpolation (issue #48)
"""
# FIXME (Ole): Really should move this and subsequent tests to
# test_io.py
# Name file names for hazard and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/building_Maumere.shp' % TESTDATA)
# Read input data
H = read_layer(hazard_filename)
H_attributes = H.get_data()
H_geometry = H.get_geometry()
# Full version
H = Vector(data=H_attributes,
geometry=H_geometry,
projection=H.get_projection())
E = read_layer(exposure_filename)
E_attributes = E.get_data()
# Test interpolation function
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
I_attributes = I.get_data()
N = len(I_attributes)
assert N == len(E_attributes)
# Assert that expected attribute names exist
I_names = I.get_attribute_names()
H_names = H.get_attribute_names()
E_names = E.get_attribute_names()
for name in H_names:
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
for name in E_names:
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Verify interpolated values with test result
counts = {}
for i in range(N):
attrs = I_attributes[i]
msg = ('Did not find default attribute %s in %s'
% (DEFAULT_ATTRIBUTE, attrs.keys()))
assert DEFAULT_ATTRIBUTE in attrs, msg
# Count items using default attribute
if DEFAULT_ATTRIBUTE not in counts:
counts[DEFAULT_ATTRIBUTE] = 0
counts['Not ' + DEFAULT_ATTRIBUTE] = 0
if attrs[DEFAULT_ATTRIBUTE]:
counts[DEFAULT_ATTRIBUTE] += 1
else:
counts['Not ' + DEFAULT_ATTRIBUTE] += 1
# Count items in each specific category
category = attrs['Category']
if category not in counts:
counts[category] = 0
counts[category] += 1
if len(H) == 192:
# In case we used cut down version
msg = ('Expected 100 points tagged with category "High", '
'but got only %i' % counts['High'])
assert counts['High'] == 100, msg
msg = ('Expected 739 points tagged with category "Very High", '
'but got only %i' % counts['Very High'])
assert counts['Very High'] == 739, msg
# Check default attribute too
msg = ('Expected 839 points tagged with default attribute "%s", '
'but got only %i' % (DEFAULT_ATTRIBUTE,
counts[DEFAULT_ATTRIBUTE]))
assert counts[DEFAULT_ATTRIBUTE] == 839, msg
msg = 'Affected and not affected does not add up'
assert (counts[DEFAULT_ATTRIBUTE] +
counts['Not ' + DEFAULT_ATTRIBUTE]) == len(E), msg
if len(H) == 1032:
# The full version
msg = ('Expected 2258 points tagged with category "High", '
'but got only %i' % counts['High'])
assert counts['High'] == 2258, msg
msg = ('Expected 1190 points tagged with category "Very High", '
'but got only %i' % counts['Very High'])
assert counts['Very High'] == 1190, msg
# Check default attribute too
msg = ('Expected 3452 points tagged with default attribute '
'"%s = True", '
'but got only %i' % (DEFAULT_ATTRIBUTE,
counts[DEFAULT_ATTRIBUTE]))
assert counts[DEFAULT_ATTRIBUTE] == 3452, msg
msg = ('Expected 76 points tagged with default attribute '
'"%s = False", '
'but got only %i' % (DEFAULT_ATTRIBUTE,
counts['Not ' + DEFAULT_ATTRIBUTE]))
assert counts['Not ' + DEFAULT_ATTRIBUTE] == 76, msg
msg = 'Affected and not affected does not add up'
assert (counts[DEFAULT_ATTRIBUTE] +
counts['Not ' + DEFAULT_ATTRIBUTE]) == len(E), msg
#for key in counts:
# print key, counts[key]
test_interpolation_from_polygons_multiple.slow = True
def test_interpolation_from_polygons_error_handling(self):
"""Interpolation using polygons handles input errors as expected
This catches situation where input data have different projections
This is a test for interpolation (issue #48)
"""
# Input data
hazard_filename = ('%s/tsunami_polygon.shp' % TESTDATA) # UTM
exposure_filename = ('%s/building_Maumere.shp' % TESTDATA) # GEO
# Read input data
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
# Check projection mismatch is caught
try:
assign_hazard_values_to_exposure_data(H, E)
except VerificationError, e:
msg = ('Projection mismatch should have been caught: %s'
% str(e))
assert 'Projections' in str(e), msg
else:
msg = 'Should have raised error about projection mismatch'
raise Exception(msg)
test_interpolation_from_polygons_error_handling.slow = True
def test_line_clipping_by_polygon(self):
"""Multiple lines are clipped correctly by complex polygon
"""
# Test data
# Polygon 799 (520 x 2)
polygon_filename = ('%s/test_poly.txt' % TESTDATA)
# 156 composite lines
lines_filename = ('%s/test_lines.pck' % TESTDATA)
# Read
test_polygon = []
fid = open(polygon_filename)
for line in fid.readlines():
fields = line.strip().split(',')
test_polygon.append([float(fields[0]), float(fields[1])])
test_polygon = ensure_numeric(test_polygon)
fid = open(lines_filename)
test_lines = cPickle.load(fid)
fid.close()
# Clip
inside_lines, outside_lines = clip_lines_by_polygon(test_lines,
test_polygon)
# Convert dictionaries to lists of lines (to fit test)
inside_line_geometry = line_dictionary_to_geometry(inside_lines)
outside_line_geometry = line_dictionary_to_geometry(outside_lines)
# These lines have compontes both inside and outside
assert len(inside_line_geometry) == 14
assert len(outside_line_geometry) == 167
# Check that midpoints of each segment are correctly placed
inside_centroids = []
for line in inside_line_geometry:
for i in range(len(line) - 1):
seg0 = line[i]
seg1 = line[i + 1]
midpoint = (seg0 + seg1) / 2
inside_centroids.append(midpoint)
assert is_inside_polygon(midpoint, test_polygon)
outside_centroids = []
for line in outside_line_geometry:
for i in range(len(line) - 1):
seg0 = line[i]
seg1 = line[i + 1]
midpoint = (seg0 + seg1) / 2
outside_centroids.append(midpoint)
assert not is_inside_polygon(midpoint, test_polygon)
# Possibly generate files for visual inspection with e.g. QGis
if False: # True:
P = Vector(geometry=[test_polygon])
P.write_to_file('test_polygon.shp')
L = Vector(geometry=test_lines, geometry_type='line')
L.write_to_file('test_lines.shp')
L = Vector(geometry=inside_line_geometry, geometry_type='line')
L.write_to_file('inside_lines.shp')
L = Vector(geometry=outside_line_geometry, geometry_type='line')
L.write_to_file('outside_lines.shp')
L = Vector(geometry=inside_centroids, geometry_type='point')
L.write_to_file('inside_centroids.shp')
L = Vector(geometry=outside_centroids, geometry_type='point')
L.write_to_file('outside_centroids.shp')
# Characterisation test based on against visual inspection with QGIS
#print inside_line_geometry[6]
assert numpy.allclose(inside_line_geometry[6],
[[122.23438722, -8.6277337],
[122.23316953, -8.62733247],
[122.23162128, -8.62683715],
[122.23156661, -8.62681168]])
#print outside_line_geometry[5]
assert numpy.allclose(outside_line_geometry[5],
[[122.18321143, -8.58901526],
[122.18353015, -8.58890024],
[122.18370883, -8.58884135],
[122.18376524, -8.58881115],
[122.18381025, -8.58878405],
[122.1838646, -8.58875119],
[122.18389685, -8.58873165],
[122.18394329, -8.58869283],
[122.18401084, -8.58862284],
[122.18408657, -8.58853526],
[122.18414936, -8.58845887],
[122.18425204, -8.58832279],
[122.18449009, -8.58804974],
[122.18457453, -8.58798668],
[122.18466284, -8.5878697]])
test_line_clipping_by_polygon.slow = True
def test_line_interpolation_from_polygons_one_poly(self):
"""Line clipping and interpolation using one polygon works
This is a test for road interpolation (issue #55)
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/roads_Maumere.shp' % TESTDATA)
# Read input data
H = read_layer(hazard_filename)
H_attributes = H.get_data()
H_geometry = H.get_geometry()
# Cut down to polygon #799 to make test quick
H = Vector(data=H_attributes[799:800],
geometry=H_geometry[799:800],
projection=H.get_projection())
H_attributes = H.get_data()
H_geometry = H.get_geometry()
E = read_layer(exposure_filename)
# Test interpolation function
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
I_geometry = I.get_geometry()
I_attributes = I.get_data()
assert I.get_name() == 'depth'
N = len(I_attributes)
# Possibly generate files for visual inspection with e.g. QGis
if False:
H.write_to_file('test_polygon.shp')
E.write_to_file('test_lines.shp')
I.write_to_file('interpolated_lines.shp')
# Assert that all expected attribute names exist
I_names = I.get_attribute_names()
H_names = H.get_attribute_names()
E_names = E.get_attribute_names()
# Attributes from polygons
for name in H_names:
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Attributes from original lines
for name in E_names:
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# New attributes
for name in [DEFAULT_ATTRIBUTE, 'polygon_id', 'parent_line_id']:
msg = 'Did not find new attribute name "%s" in %s' % (name,
I_names)
# FIXME (Ole): Shapefiles cut name down to 10 characters.
assert name in I_names, msg
# Verify interpolated values with test result
count = 0
counts = {}
for i in range(N):
# Check that default attribute is present
attrs = I_attributes[i]
msg = ('Did not find default attribute %s in %s'
% (DEFAULT_ATTRIBUTE, attrs.keys()))
assert DEFAULT_ATTRIBUTE in attrs, msg
# Count items using default attribute
if DEFAULT_ATTRIBUTE not in counts:
counts[DEFAULT_ATTRIBUTE] = 0
counts['Not ' + DEFAULT_ATTRIBUTE] = 0
if attrs[DEFAULT_ATTRIBUTE]:
counts[DEFAULT_ATTRIBUTE] += 1
else:
counts['Not ' + DEFAULT_ATTRIBUTE] += 1
# Check specific attribute
category = I_attributes[i]['Category']
if category is not None:
assert category.lower() in ['high', 'very high']
count += 1
msg = ('Expected 14 lines tagged with category, '
'but got only %i' % count)
assert count == 14, msg
assert len(I_geometry) == 14
# Check default attribute too
msg = ('Expected 14 segments tagged with default attribute '
'"%s = True", '
'but got only %i' % (DEFAULT_ATTRIBUTE,
counts[DEFAULT_ATTRIBUTE]))
assert counts[DEFAULT_ATTRIBUTE] == 14, msg
# Check against correctness verified in QGIS
assert I_attributes[13]['highway'] == 'road'
assert I_attributes[13]['osm_id'] == 69372744
assert I_attributes[13]['polygon_id'] == 0
assert I_attributes[13]['parent_line_id'] == 131
test_line_interpolation_from_polygons_one_poly.slow = True
def test_line_interpolation_from_multiple_polygons(self):
"""Line interpolation using multiple polygons works
This is a test for road interpolation (issue #55)
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/roads_Maumere.shp' % TESTDATA)
# Read input data
H = read_layer(hazard_filename)
H_attributes = H.get_data()
H_geometry = H.get_geometry()
# Cut down to 500 polygons
# (some e.g. #657 have thousands of vertices, others just a few)
H = Vector(data=H_attributes[300:657] + H_attributes[658:800],
geometry=H_geometry[300:657] + H_geometry[658:800],
projection=H.get_projection())
H_attributes = H.get_data()
H_geometry = H.get_geometry()
E = read_layer(exposure_filename)
# Test interpolation function
#import time
#t0 = time.time()
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
#print 'This took', time.time() - t0
I_geometry = I.get_geometry()
I_attributes = I.get_data()
assert I.get_name() == 'depth'
N = len(I_attributes)
# Possibly generate files for visual inspection with e.g. QGis
if False: # True:
H.write_to_file('test_polygon.shp')
E.write_to_file('test_lines.shp')
I.write_to_file('interpolated_lines.shp')
# Assert that all expected attribute names exist
I_names = I.get_attribute_names()
H_names = H.get_attribute_names()
E_names = E.get_attribute_names()
# Attributes from polygons
for name in H_names:
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Attributes from original lines
for name in E_names:
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# New attributes
for name in [DEFAULT_ATTRIBUTE, 'polygon_id', 'parent_line_id']:
msg = 'Did not find new attribute name "%s" in %s' % (name,
I_names)
# FIXME (Ole): Shapefiles cut name down to 10 characters.
assert name in I_names, msg
# Verify interpolated values with test result
count = 0
counts = {}
for i in range(N):
# Check that default attribute is present
attrs = I_attributes[i]
msg = ('Did not find default attribute %s in %s'
% (DEFAULT_ATTRIBUTE, attrs.keys()))
assert DEFAULT_ATTRIBUTE in attrs, msg
# Count items using default attribute
if DEFAULT_ATTRIBUTE not in counts:
counts[DEFAULT_ATTRIBUTE] = 0
counts['Not ' + DEFAULT_ATTRIBUTE] = 0
if attrs[DEFAULT_ATTRIBUTE]:
counts[DEFAULT_ATTRIBUTE] += 1
else:
counts['Not ' + DEFAULT_ATTRIBUTE] += 1
# Check specific attribute
category = I_attributes[i]['Category']
if category is not None:
msg = 'category = %s' % category
assert category.lower() in ['low', 'medium',
'high', 'very high'], msg
count += 1
msg = ('Expected 103 lines tagged with category, '
'but got only %i' % count)
assert count == 103, msg
assert len(I_geometry) == 103
# Check default attribute too
msg = ('Expected 103 segments tagged with default attribute '
'"%s = True", '
'but got only %i' % (DEFAULT_ATTRIBUTE,
counts[DEFAULT_ATTRIBUTE]))
assert counts[DEFAULT_ATTRIBUTE] == 103, msg
# Check against correctness verified in QGIS
assert I_attributes[40]['highway'] == 'residential'
assert I_attributes[40]['osm_id'] == 69373107
assert I_attributes[40]['polygon_id'] == 111
assert I_attributes[40]['parent_line_id'] == 54
assert I_attributes[76]['highway'] == 'secondary'
assert I_attributes[76]['Category'] == 'High'
assert I_attributes[76]['osm_id'] == 69370718
assert I_attributes[76]['polygon_id'] == 374
assert I_attributes[76]['parent_line_id'] == 1
assert I_attributes[85]['highway'] == 'secondary'
assert I_attributes[85]['Category'] == 'Very High'
assert I_attributes[85]['osm_id'] == 69371482
assert I_attributes[85]['polygon_id'] == 453
assert I_attributes[85]['parent_line_id'] == 133
test_line_interpolation_from_multiple_polygons.slow = True
def test_polygon_to_roads_interpolation_flood_example(self):
"""Roads can be tagged with values from flood polygons
This is a test for road interpolation (issue #55)
# The dataset is large: 2704 complex polygons
and 108082 complex line features - so has been cut down
in this test.
The runtime for the whole set is in the order of more than
1 hour. Cutting the number of lines down by a factor of 10 years
brings it down to about 10 minutes (500 seconds).
A factor of 100 gives about 1 minute.
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/rw_jakarta_singlepart.shp' % TESTDATA)
exposure_filename = ('%s/indonesia_highway.shp' % EXPDATA)
# Read all input data
H = read_layer(hazard_filename) # Polygons
H_attributes = H.get_data()
H_geometry = H.get_geometry()
assert len(H) == 2704
E = read_layer(exposure_filename) # Lines - this is slow to read
E_geometry = E.get_geometry()
E_attributes = E.get_data()
assert len(E) == 108082
# Cut number of road features down
# A factor of ten brings the runtime down to about 10 minutes.
# A factor of ten brings the runtime down to less than 1 minute.
E = Vector(data=E_attributes[:-1:100],
geometry=E_geometry[:-1:100],
projection=E.get_projection(),
geometry_type=E.geometry_type)
# Test interpolation function
#import time
#t0 = time.time()
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
#print 'That took %f seconds' % (time.time() - t0)
# TODO:
# Keep only those roads that are marked FLOODPRONE == 'YES'
I_geometry = I.get_geometry()
I_attributes = I.get_data()
# Possibly generate files for visual inspection with e.g. QGis
if False:
L = Vector(geometry=H_geometry, geometry_type='polygon',
data=H_attributes)
L.write_to_file('flood_polygon.shp')
L = Vector(geometry=I_geometry, geometry_type='line',
data=I_attributes)
L.write_to_file('flood_tagged_roads.shp')
# Assert that expected attribute names exist
I_names = I.get_attribute_names()
H_names = H.get_attribute_names()
E_names = E.get_attribute_names()
for name in H_names:
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
for name in E_names:
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# FIXME (Ole): Finish this test
# Check that attributes have been carried through
#for i, attr in enumerate(I_attributes):
# pass
# # TODO
# Check against correctness verified in QGIS
#assert I_attributes[]['highway'] ==
#assert I_attributes[]['osm_id'] ==
#assert I_attributes[]['polygon_id'] ==
#assert I_attributes[]['parent_line_id'] ==
test_polygon_to_roads_interpolation_flood_example.slow = True
def Xtest_polygon_to_roads_interpolation_jakarta_flood_example1(self):
"""Roads can be tagged with values from flood polygons
This is a test for road interpolation (issue #55)
# The dataset is: 2704 complex polygons and 18574 complex line features
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/rw_jakarta_singlepart.shp' % TESTDATA)
exposure_filename = ('%s/jakarta_roads.shp' % EXPDATA)
# Read all input data
H = read_layer(hazard_filename) # Polygons
H_attributes = H.get_data()
H_geometries = H.get_geometry()
assert len(H) == 2704
# Use only polygons marked as flood prone
# to get the result quicker.
cut_attributes = []
cut_geometries = []
for i in range(len(H)):
val = H_attributes[i]['FLOODPRONE']
if val is not None and val.lower().startswith('yes'):
cut_attributes.append(H_attributes[i])
cut_geometries.append(H_geometries[i])
H = Vector(data=cut_attributes,
geometry=cut_geometries,
projection=H.get_projection(),
geometry_type=H.geometry_type)
assert len(H) == 1011
E = read_layer(exposure_filename)
E_geometries = E.get_geometry()
E_attributes = E.get_data()
assert len(E) == 18574
# Get statistics of road types
road_types = {}
E_attributes = E.get_data()
for i in range(len(E)):
roadtype = E_attributes[i]['TYPE']
if roadtype in road_types:
road_types[roadtype] += 1
else:
road_types[roadtype] = 0
#for att in road_types:
# print att, road_types[att]
assert road_types['residential'] == 14853
# Remove residental roads
cut_attributes = []
cut_geometries = []
for i in range(len(E)):
val = E_attributes[i]['TYPE']
if val != 'residential':
cut_attributes.append(E_attributes[i])
cut_geometries.append(E_geometries[i])
# Cut even further for the purpose of testing
E = Vector(data=cut_attributes[:-1:5],
geometry=cut_geometries[:-1:5],
projection=E.get_projection(),
geometry_type=E.geometry_type)
assert len(E) == 744
# Test interpolation function
#import time
#t0 = time.time()
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
#print ('Using 2704 individual polygons took %f seconds'
# % (time.time() - t0))
#I.write_to_file('flood_prone_roads_jakarta_individual.shp')
# Check against correctness verified in QGIS
I_attributes = I.get_data()
assert I_attributes[198]['TYPE'] == 'secondary'
assert I_attributes[198]['NAME'] == 'Lingkar Mega Kuningan'
assert I_attributes[198]['KEL_NAME'] == 'KUNINGAN TIMUR'
assert I_attributes[198]['polygon_id'] == 235
assert I_attributes[198]['parent_line_id'] == 333
Xtest_polygon_to_roads_interpolation_jakarta_flood_example1.slow = True
def Xtest_polygon_to_roads_interpolation_jakarta_flood_merged(self):
"""Roads can be tagged with values from flood polygons
This is a test for road interpolation (issue #55)
# The dataset is: 59 merged complex polygons and 18574
# complex line features
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/RW_2007_dissolve.shp' % TESTDATA)
exposure_filename = ('%s/jakarta_roads.shp' % EXPDATA)
# Read all input data
H = read_layer(hazard_filename) # Polygons
#H_attributes = H.get_data()
#H_geometries = H.get_geometry()
print len(H)
assert len(H) == 35
E = read_layer(exposure_filename)
E_geometries = E.get_geometry()
#E_attributes = E.get_data()
assert len(E) == 18574
# Get statistics of road types
road_types = {}
E_attributes = E.get_data()
for i in range(len(E)):
roadtype = E_attributes[i]['TYPE']
if roadtype in road_types:
road_types[roadtype] += 1
else:
road_types[roadtype] = 0
#for att in road_types:
# print att, road_types[att]
assert road_types['residential'] == 14853
# Remove residental roads
cut_attributes = []
cut_geometries = []
for i in range(len(E)):
val = E_attributes[i]['TYPE']
if val != 'residential':
cut_attributes.append(E_attributes[i])
cut_geometries.append(E_geometries[i])
# Cut even further for the purpose of testing
E = Vector(data=cut_attributes[:-1:5],
geometry=cut_geometries[:-1:5],
projection=E.get_projection(),
geometry_type=E.geometry_type)
assert len(E) == 744
# Test interpolation function
import time
t0 = time.time()
print
print 'start'
I = assign_hazard_values_to_exposure_data(H, E,
layer_name='depth')
print 'Using merged polygon took %f seconds' % (time.time() - t0)
I.write_to_file('flood_prone_roads_jakarta_merged.shp')
# Check against correctness verified in QGIS
#I_attributes = I.get_data()
#assert I_attributes[198]['TYPE'] == 'secondary'
#assert I_attributes[198]['NAME'] == 'Lingkar Mega Kuningan'
#assert I_attributes[198]['KEL_NAME'] == 'KUNINGAN TIMUR'
#assert I_attributes[198]['polygon_id'] == 235
#assert I_attributes[198]['parent_line_id'] == 333
Xtest_polygon_to_roads_interpolation_jakarta_flood_merged.slow = True
def test_layer_integrity_raises_exception(self):
"""Layers without keywords raise exception
"""
population = 'Population_Jakarta_geographic.asc'
plugin_name = 'HKVtest'
hazard_layers = ['Flood_Current_Depth_Jakarta_geographic.asc',
'Flood_Design_Depth_Jakarta_geographic.asc']
for i, filename in enumerate(hazard_layers):
hazard_filename = join(HAZDATA, filename)
exposure_filename = join(TESTDATA, population)
# Get layers using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
IF = plugin_list[0][plugin_name]
# Call impact calculation engine normally
calculate_impact(layers=[H, E],
impact_fcn=IF)
# Make keyword value empty and verify exception is raised
expected_category = E.keywords['category']
E.keywords['category'] = ''
try:
calculate_impact(layers=[H, E],
impact_fcn=IF)
except VerificationError, e:
# Check expected error message
assert 'No value found' in str(e)
else:
msg = 'Empty keyword value should have raised exception'
raise Exception(msg)
# Restore for next test
E.keywords['category'] = expected_category
# Remove critical keywords and verify exception is raised
if i == 0:
del H.keywords['category']
else:
del H.keywords['subcategory']
try:
calculate_impact(layers=[H, E],
impact_fcn=IF)
except VerificationError, e:
# Check expected error message
assert 'did not have required keyword' in str(e)
else:
msg = 'Missing keyword should have raised exception'
raise Exception(msg)
test_layer_integrity_raises_exception.slow = True
def test_padang_building_examples(self):
"""Padang building impact calculation works through the API
"""
plugin_name = 'Padang Earthquake Building Damage Function'
# Test for a range of hazard layers
for mmi_filename in ['Shakemap_Padang_2009.asc']:
#'Lembang_Earthquake_Scenario.asc']:
# Upload input data
hazard_filename = join(HAZDATA, mmi_filename)
exposure_filename = join(TESTDATA, 'Padang_WGS84.shp')
# Get layers using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call impact calculation engine
impact_vector = calculate_impact(layers=[H, E],
impact_fcn=IF)
# Read hazard data for reference
hazard_raster = read_layer(hazard_filename)
mmi_min, mmi_max = hazard_raster.get_extrema()
# Extract calculated result
coordinates = impact_vector.get_geometry()
attributes = impact_vector.get_data()
# Verify calculated result
count = 0
verified_count = 0
for i in range(len(attributes)):
lon, lat = coordinates[i][:]
calculated_mmi = attributes[i]['MMI']
if calculated_mmi == 0.0:
# FIXME (Ole): Some points have MMI==0 here.
# Weird but not a show stopper
continue
# Check that interpolated points are within range
msg = ('Interpolated mmi %f was outside extrema: '
'[%f, %f] at location '
'[%f, %f]. ' % (calculated_mmi,
mmi_min, mmi_max,
lon, lat))
assert mmi_min <= calculated_mmi <= mmi_max, msg
building_class = attributes[i]['VCLASS']
# Check calculated damage
calculated_dam = attributes[i]['DAMAGE']
#print calculated_mmi
verified_dam = padang_check_results(calculated_mmi,
building_class)
#print calculated_mmi, building_class, calculated_dam
if verified_dam:
msg = ('Calculated damage was not as expected '
'for hazard layer %s. I got %f '
'but expected %f' % (hazard_filename,
calculated_dam,
verified_dam))
assert numpy.allclose(calculated_dam, verified_dam,
rtol=1.0e-4), msg
verified_count += 1
count += 1
msg = ('No points was verified in output. Please create '
'table withe reference data')
assert verified_count > 0, msg
msg = 'Number buildings was not 3896.'
assert count == 3896, msg
test_padang_building_examples.slow = True
def test_itb_building_function(self):
"""Damage ratio (estimated repair cost relative to replacement cost)
can be computed using the ITB building vulnerability model.
(Test data from Hyeuk Ryu).
As of July 4, 2012, the vulnerability model used to generate
the reference values is dummy one, and it will be updated with
the ITB's model later.
"""
# Name file names for hazard level, exposure and expected impact
hazard_filename = '%s/Shakemap_Padang_2009.asc' % HAZDATA
exposure_filename = '%s/Padang_WGS84.shp' % TESTDATA
damage_filename = '%s/reference_result_itb.csv' % TESTDATA
a = open(damage_filename).readlines()[1:]
ref_damage = []
for item in a:
b = item.strip('\n').split(',')
ref_damage.append(float(b[2]))
ref_damage = numpy.array(ref_damage)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_name = 'I T B Earthquake Building Damage Function'
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call impact calculation engine
impact_vector = calculate_impact(layers=[H, E], impact_fcn=IF)
attributes = impact_vector.get_data()
# calculated_damage = []
for i in range(len(attributes)):
calculated_damage = attributes[i]['DAMAGE']
bldg_class = attributes[i]['ITB_Class']
msg = ('Calculated damage did not match expected result: \n'
'I got %s\n'
'Expected %s for bldg type: %s' %
(calculated_damage,
ref_damage[i],
bldg_class))
assert nan_allclose(calculated_damage, ref_damage[i],
# Reference data is single precision
atol=1.0e-6), msg
# print calculated_damage.shape
# bldg_class = attributes[:]['VCLASS']
# impact_filename = impact_vector.get_filename()
# I = read_layer(impact_filename)
# calculated_result = I.get_data()
# keywords = I.get_keywords()
# print keywords
# print calculated_damage
test_itb_building_function.slow = True
def test_flood_on_roads(self):
"""Jakarta flood (raster) impact on roads calculated correctly
"""
floods = 'Flood_Current_Depth_Jakarta_geographic.asc'
roads = 'indonesia_highway_sample.shp'
plugin_name = 'Flood Road Impact Function'
hazard_filename = join(HAZDATA, floods)
exposure_filename = join(TESTDATA, roads)
# Get layers using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
IF = plugin_list[0][plugin_name]
_ = calculate_impact(layers=[H, E],
impact_fcn=IF)
# FIXME (Ole): To do when road functionality is done
test_flood_on_roads.slow = True
def test_flood_population_evacuation(self):
"""Flood population evacuation
"""
population = 'people_jakarta_clip.tif'
flood_data = 'flood_jakarta_clip.tif'
plugin_name = 'FloodEvacuationFunction'
hazard_filename = join(TESTDATA, flood_data)
exposure_filename = join(TESTDATA, population)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
keywords = I.get_keywords()
# print "keywords", keywords
evacuated = float(keywords['evacuated'])
total_needs = keywords['total_needs']
expected_evacuated = 63000
assert evacuated == expected_evacuated
assert total_needs['rice'] == 176400
assert total_needs['family_kits'] == 12600
assert total_needs['drinking_water'] == 1102500
assert total_needs['toilets'] == 3150
assert total_needs['water'] == 6615000
def test_flood_population_evacuation_polygon(self):
"""Flood population evacuation (flood is polygon)
"""
population = 'pop_clip_flood_test.tif'
flood_data = 'flood_poly_clip_flood_test.shp'
plugin_name = 'FloodEvacuationFunctionVectorHazard'
hazard_filename = join(TESTDATA, flood_data)
exposure_filename = join(TESTDATA, population)
# Calculate impact using API
H = read_layer(hazard_filename)
E = read_layer(exposure_filename)
plugin_list = get_plugins(plugin_name)
assert len(plugin_list) == 1
assert plugin_list[0].keys()[0] == plugin_name
IF = plugin_list[0][plugin_name]
# Call calculation engine
impact_layer = calculate_impact(layers=[H, E],
impact_fcn=IF)
impact_filename = impact_layer.get_filename()
I = read_layer(impact_filename)
keywords = I.get_keywords()
# print "keywords", keywords
affected_population = float(keywords['affected_population'])
total_population = keywords['total_population']
assert affected_population == 133000
assert total_population == 162000
def test_erf(self):
"""Test ERF approximation
Reference data obtained from scipy as follows:
A = (numpy.arange(20) - 10.) / 2
F = scipy.special.erf(A)
See also table at http://en.wikipedia.org/wiki/Error_function
"""
# Simple tests
assert numpy.allclose(erf(0), 0.0, rtol=1.0e-6, atol=1.0e-6)
x = erf(1)
r = 0.842700792949715
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
x = erf(0.5)
r = 0.5204999
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
x = erf(3)
r = 0.999977909503001
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
# Reference data
R = [-1., -1., -0.99999998, -0.99999926, -0.99997791, -0.99959305,
-0.99532227, -0.96610515, -0.84270079, -0.52049988, 0.,
0.52049988, 0.84270079, 0.96610515, 0.99532227, 0.99959305,
0.99997791, 0.99999926, 0.99999998, 1.]
A = (numpy.arange(20) - 10.) / 2
X = erf(A)
msg = ('ERF was not correct. I got %s but expected %s' %
(str(X), str(R)))
assert numpy.allclose(X, R, atol=1.0e-6, rtol=1.0e-12), msg
def test_normal_cdf(self):
"""Test Normal Cumulative Distribution Function
Reference data obtained from scipy as follows:
A = (numpy.arange(20) - 10.) / 5
R = scipy.stats.norm.cdf(A)
"""
# Simple tests
x = normal_cdf(0.0)
r = 0.5
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
x = normal_cdf(0.5)
r = 0.69146246127401312
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
x = normal_cdf(3.50)
r = 0.99976737092096446
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
# Out of bounds
x = normal_cdf(-6)
r = 0
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-6), msg
x = normal_cdf(10)
r = 1
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
# Reference data
R = [0.02275013, 0.03593032, 0.05479929, 0.08075666, 0.11506967,
0.15865525, 0.2118554, 0.27425312, 0.34457826, 0.42074029, 0.5,
0.57925971, 0.65542174, 0.72574688, 0.7881446, 0.84134475,
0.88493033, 0.91924334, 0.94520071, 0.96406968]
A = (numpy.arange(20) - 10.) / 5
X = normal_cdf(A)
msg = ('CDF was not correct. I got %s but expected %s' %
(str(X), str(R)))
assert numpy.allclose(X, R, atol=1.0e-6, rtol=1.0e-12), msg
def test_lognormal_cdf(self):
"""Test Log-normal Cumulative Distribution Function
Reference data obtained from scipy as follows:
A = (numpy.arange(20) - 10.) / 5
R = scipy.stats.lognorm.cdf(A)
"""
# Suppress warnings about invalid value in multiply and divide zero
# http://comments.gmane.org/gmane.comp.python.numeric.general/43218
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
old_numpy_setting = numpy.seterr(divide='ignore')
# Simple tests
x = log_normal_cdf(0.0)
r = normal_cdf(numpy.log(0.0))
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
numpy.seterr(**old_numpy_setting)
x = log_normal_cdf(0.5)
r = normal_cdf(numpy.log(0.5))
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
x = log_normal_cdf(3.50)
r = normal_cdf(numpy.log(3.5))
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-12), msg
# Out of bounds
x = log_normal_cdf(10)
r = normal_cdf(numpy.log(10))
msg = 'Expected %.12f, but got %.12f' % (r, x)
assert numpy.allclose(x, r, rtol=1.0e-6, atol=1.0e-6), msg
if __name__ == '__main__':
suite = unittest.makeSuite(Test_Engine, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 | -143,772,507,902,541,090 | 37.682293 | 79 | 0.55556 | false |
stormi/tsunami | src/primaires/salle/commandes/etendue/creer.py | 1 | 2728 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'créer' de la commande 'étendue'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmCreer(Parametre):
"""Commande 'etendue créer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "créer", "create")
self.schema = "<cle>"
self.aide_courte = "crée une étendue d'eau"
self.aide_longue = \
"Permet de créer une nouvelle étendue d'eau. Cette commande " \
"prend en paramètre la clé de l'étendue à créer (ne doit pas " \
"déjà exister)."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cle = dic_masques["cle"].cle
# On vérifie que cette étendue n'existe pas
if cle in type(self).importeur.salle.etendues.keys():
personnage << "|err|Cette clé {} existe déjà.|ff|".format(
repr(cle))
return
type(self).importeur.salle.creer_etendue(cle)
personnage << "L'étendue {} a bien été créée.".format(repr(cle))
| bsd-3-clause | 886,097,803,153,249,500 | 43.245902 | 79 | 0.698036 | false |
sha-red/django-shared-utils | shared/utils/management/commands/fix_proxymodel_permissions.py | 1 | 1509 | # -*- coding: utf-8 -*-
"""Add permissions for proxy model.
This is needed because of the bug https://code.djangoproject.com/ticket/11154
in Django (as of 1.6, it's not fixed).
When a permission is created for a proxy model, it actually creates if for it's
base model app_label (eg: for "article" instead of "about", for the About proxy
model).
What we need, however, is that the permission be created for the proxy model
itself, in order to have the proper entries displayed in the admin.
Source: https://gist.github.com/magopian/7543724
"""
import sys
from django.apps import apps
from django.contrib.auth.management import _get_all_permissions
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Fix permissions for proxy models."
def handle(self, *args, **options):
for model in apps.get_models():
opts = model._meta
ctype, created = ContentType.objects.get_or_create(
app_label=opts.app_label,
model=opts.object_name.lower())
for codename, name in _get_all_permissions(opts):
p, created = Permission.objects.get_or_create(
codename=codename,
content_type=ctype,
defaults={'name': name})
if created:
sys.stdout.write('Adding permission {}\n'.format(p))
| mit | 2,577,458,159,359,952,400 | 34.093023 | 79 | 0.664016 | false |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/ssd/implementations/mxnet/mlperf_log_utils.py | 1 | 2395 | import collections
import os
import subprocess
import numpy as np
from mlperf_logging.mllog import constants as mlperf_constants
from mlperf_logging import mllog
class MPIWrapper(object):
def __init__(self):
self.comm = None
self.MPI = None
def get_comm(self):
if self.comm is None:
import mpi4py
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.MPI = MPI
return self.comm
def barrier(self):
c = self.get_comm()
# NOTE: MPI_Barrier is *not* working reliably at scale. Using MPI_Allreduce instead.
#c.Barrier()
val = np.ones(1, dtype=np.int32)
result = np.zeros(1, dtype=np.int32)
c.Allreduce(val, result)
def allreduce(self, x):
c = self.get_comm()
rank = c.Get_rank()
val = np.array(x, dtype=np.int32)
result = np.zeros_like(val, dtype=np.int32)
c.Allreduce([val, self.MPI.INT], [result, self.MPI.INT]) #, op=self.MPI.SUM)
return result
def rank(self):
c = self.get_comm()
return c.Get_rank()
mpiwrapper=MPIWrapper()
def all_reduce(v):
return mpiwrapper.allreduce(v)
mllogger = mllog.get_mllogger()
def log_start(*args, **kwargs):
_log_print(mllogger.start, *args, **kwargs)
def log_end(*args, **kwargs):
_log_print(mllogger.end, *args, **kwargs)
def log_event(*args, **kwargs):
_log_print(mllogger.event, *args, **kwargs)
def _log_print(logger, *args, **kwargs):
rank = mpiwrapper.rank()
uniq = kwargs.pop('uniq', True)
if 'stack_offset' not in kwargs:
kwargs['stack_offset'] = 3
if 'value' not in kwargs:
kwargs['value'] = None
if (uniq and rank == 0) or (not uniq):
logger(*args, **kwargs)
return
def mlperf_submission_log(benchmark):
num_nodes = os.environ.get('SLURM_JOB_NUM_NODES', 1)
log_event(
key=mlperf_constants.SUBMISSION_BENCHMARK,
value=benchmark,
)
log_event(
key=mlperf_constants.SUBMISSION_ORG,
value='NVIDIA')
log_event(
key=mlperf_constants.SUBMISSION_DIVISION,
value='closed')
log_event(
key=mlperf_constants.SUBMISSION_STATUS,
value='onprem')
log_event(
key=mlperf_constants.SUBMISSION_PLATFORM,
value='{}xSUBMISSION_PLATFORM_PLACEHOLDER'.format(num_nodes))
| apache-2.0 | 3,036,545,671,648,611,000 | 25.032609 | 92 | 0.609186 | false |
jgrillo/zoonomia | test/test_solution.py | 1 | 1543 | import unittest
from zoonomia.solution import (
verify_closure_property, BasisOperator, TerminalOperator, OperatorSet,
Objective, Fitness, Solution
)
class TestVerifyClosureProperty(unittest.TestCase):
def test_verify_closure_property(self):
def add(left, right): return left + right
int_basis = BasisOperator(func=add, signature=(int, int), dtype=int)
float_basis = BasisOperator(
func=add, signature=(float, float), dtype=float
)
int_terminal = TerminalOperator(source=xrange(666), dtype=int)
float_terminal = TerminalOperator(
source=(float(i) for i in xrange(666)), dtype=float
)
terminal_set = OperatorSet(operators=(int_terminal, float_terminal))
result = verify_closure_property(
)
class TestBasisOperator(unittest.TestCase):
def test_basis_operator(self):
raise NotImplementedError() # FIXME
class TestTerminalOperator(unittest.TestCase):
def test_terminal_operator(self):
raise NotImplementedError() # FIXME
class TestOperatorSet(unittest.TestCase):
def test_operator_set(self):
raise NotImplementedError() # FIXME
class TestObjective(unittest.TestCase):
def test_evaluate(self):
raise NotImplementedError() # FIXME
class TestFitness(unittest.TestCase):
def test_equals(self):
raise NotImplementedError() # FIXME
class TestSolution(unittest.TestCase):
def test_solution(self):
raise NotImplementedError() # FIXME
| mit | -1,835,718,517,750,096,600 | 23.109375 | 76 | 0.685029 | false |
andrewyoung1991/abjad | abjad/tools/documentationtools/GraphvizNode.py | 1 | 3593 | # -*- encoding: utf-8 -*-
from abjad.tools.datastructuretools import TreeContainer
from abjad.tools.documentationtools.GraphvizObject import GraphvizObject
class GraphvizNode(TreeContainer, GraphvizObject):
r'''A Graphviz node.
'''
### INITIALIZER ###
def __init__(
self,
attributes=None,
children=None,
name=None,
):
self._children = []
TreeContainer.__init__(
self,
children=children,
name=name,
)
GraphvizObject.__init__(
self,
attributes=attributes,
)
self._edges = set([])
### SPECIAL METHODS ###
def __str__(self):
r'''Gets string representation of Graphviz node.
Returns string.
'''
result = self._graphviz_format_contributions
result = '\n'.join(result)
return result
### PRIVATE PROPERTIES ###
@property
def _graphviz_format_contributions(self):
from abjad.tools import documentationtools
node_def = self._format_value(self.canonical_name)
attributes = self.attributes
if len(self) == 1 and not isinstance(self[0], (
documentationtools.GraphvizField,
documentationtools.GraphvizGroup,
)):
attributes['label'] = self[0]
elif len(self):
struct_format_contributions = self._struct_format_contributions
if struct_format_contributions:
attributes['label'] = struct_format_contributions
if len(attributes):
result = []
result.extend(self._format_attribute_list(attributes))
result[0] = '{} {}'.format(node_def, result[0])
return result
return [node_def + ';']
@property
def _node_class(self):
from abjad.tools import documentationtools
prototype = (
documentationtools.GraphvizField,
documentationtools.GraphvizGroup,
documentationtools.GraphvizTable,
)
return prototype
@property
def _struct_format_contributions(self):
result = []
for x in self:
part = x._struct_format_contributions
if part:
result.append(part)
result = ' | '.join(result)
return result
### PUBLIC PROPERTIES ###
@property
def all_edges(self):
r'''Gets edges of this node and those of any field in its field
subtree.
'''
from abjad.tools import documentationtools
edges = set(self.edges)
prototype = (
documentationtools.GraphvizGroup,
documentationtools.GraphvizTable,
documentationtools.GraphvizTableCell,
documentationtools.GraphvizTableRow,
documentationtools.GraphvizTableHorizontalRule,
documentationtools.GraphvizTableVerticalRule,
)
for node in self.nodes[1:]:
if isinstance(node, prototype):
continue
edges.update(node.edges)
return tuple(edges)
@property
def canonical_name(self):
r'''Canonical name of Graphviz node.
Returns string.
'''
if self.name is not None:
return self.name
if self.graph_order:
return 'node_' + '_'.join(str(x) for x in self.graph_order)
return 'node_0'
@property
def edges(self):
r'''Edges of Graphviz node.
Returns tuple.
'''
return tuple(self._edges) | gpl-3.0 | 760,710,467,650,906,200 | 26.860465 | 75 | 0.567214 | false |
DezerteR/PMK-Blender-Scripts | PMK_PBRMaterialProperties.py | 1 | 2250 | import os
import bpy
from bpy.props import *
bl_info = {
"name": "PMK PBR Materials",
"author": "Karol Wajs",
"version": (0, 0, 1),
"blender": (2, 7, 6),
"location": "Viewport",
"description": "Adds panel in material properties that allows editing material PBR properties.",
"category": "Material"
}
def register():
bpy.utils.register_class(OBJECT_PT_MaterialProps)
bpy.types.Material.pmk = PointerProperty(type=OBJECT_PT_MaterialProps)
bpy.utils.register_class(OBJECT_PT_MaterialPropsUI)
def unregister():
bpy.utils.unregister_class(OBJECT_PT_MaterialPropsUI)
del bpy.types.Material.pmk
bpy.utils.unregister_class(OBJECT_PT_MaterialProps)
if __name__ == "__main__":
register()
class OBJECT_PT_MaterialPropsUI(bpy.types.Panel):
bl_label = "PMK"
bl_idname = "OBJECT_PT_MaterialPropsUI"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_options = {'DEFAULT_CLOSED'}
bl_context = "material"
def draw(self, context):
layout = self.layout
obj = context.object
layout.row().prop(obj.active_material.pmk, "roughness")
layout.row().prop(obj.active_material.pmk, "metallic")
layout.row().prop(obj.active_material.pmk, "reflectance")
layout.row().prop(obj.active_material.pmk, "clearCoat")
layout.row().prop(obj.active_material.pmk, "clearCoatRoughness")
layout.row().prop(obj.active_material.pmk, "anisotropy")
layout.row().prop(obj.active_material.pmk, "emissive")
class OBJECT_PT_MaterialProps(bpy.types.PropertyGroup):
'''Common module properties '''
roughness = FloatProperty(default = 0.6, name = 'Roughness', min = 0.0, max = 1.0)
metallic = FloatProperty(default = 0, name = 'Metallic', min = 0.0, max = 1.0)
reflectance = FloatProperty(default = 0.5, name = 'Reflectance', min = 0.0, max = 1.0)
clearCoat = FloatProperty(default = 0, name = 'ClearCoat', min = 0.0, max = 1.0)
clearCoatRoughness = FloatProperty(default = 0.1, name = 'ClearCoat Roughness', min = 0.0, max = 1.0)
anisotropy = FloatProperty(default = 0.0, name = 'Anisotropy', min = 0.0, max = 1.0)
emissive = FloatProperty(default = 0.0, name = 'Emissive', min = 0.0, max = 1.0)
| mit | -4,388,128,225,458,695,000 | 39.909091 | 105 | 0.659111 | false |
hshindo/POS-Tagging-benchmark | Theano/lstm.py | 1 | 3354 | __author__ = 'hiroki'
import numpy as np
import theano
import theano.tensor as T
from nn_utils import sigmoid, tanh, sample_weights
class LSTM(object):
def __init__(self,
w,
d,
n_layer,
vocab_size,
n_in,
n_hidden = 50,
n_i = 50,
n_c = 50,
n_o = 50,
n_f = 50,
n_y = 45,
activation=tanh):
self.w = w
self.d = d
self.activation = activation
"""embeddings"""
self.emb = theano.shared(sample_weights(vocab_size, n_in))
"""input gate parameters"""
self.W_xi = theano.shared(sample_weights(n_in, n_i))
self.W_hi = theano.shared(sample_weights(n_hidden, n_i))
"""forget gate parameters"""
self.W_xf = theano.shared(sample_weights(n_in, n_f))
self.W_hf = theano.shared(sample_weights(n_hidden, n_f))
"""cell parameters"""
self.W_xc = theano.shared(sample_weights(n_in, n_c))
self.W_hc = theano.shared(sample_weights(n_hidden, n_c))
"""output gate parameters"""
self.W_xo = theano.shared(sample_weights(n_in, n_o))
self.W_ho = theano.shared(sample_weights(n_hidden, n_o))
"""output parameters"""
self.W_hy = theano.shared(sample_weights(n_hidden, n_y))
self.c0 = theano.shared(np.zeros(n_hidden, dtype=theano.config.floatX))
self.h0 = self.activation(self.c0)
self.params = [self.W_xi, self.W_hi, self.W_xf, self.W_hf, self.W_xc,
self.W_hc, self.W_xo, self.W_ho, self.W_hy, self.c0]
self.x = self.emb[self.w]
self.layer_output = self.layers(n_layers=n_layer)
self.y, _ = theano.scan(fn=self.output_forward,
sequences=self.layer_output[-1],
outputs_info=[None])
self.y = self.y[::-1]
self.p_y_given_x = self.y.reshape((self.y.shape[0], self.y.shape[2]))
self.nll = -T.mean(T.log(self.p_y_given_x)[T.arange(d.shape[0]), d])
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.errors = T.neq(self.y_pred, d)
def layers(self, n_layers=2):
layer_output = []
for i in xrange(n_layers):
if i == 0:
layer_input = self.x
else:
layer_input = layer_output[-1][::-1]
[h, c], _ = theano.scan(fn=self.forward,
sequences=layer_input,
outputs_info=[self.h0, self.c0])
layer_output.append(h)
return layer_output
def forward(self, x_t, h_tm1, c_tm1):
'''
sequences: x_t
prior results: h_tm1, c_tm1
'''
i_t = sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + c_tm1)
f_t = sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + c_tm1)
c_t = f_t * c_tm1 + i_t * self.activation(T.dot(x_t, self.W_xc) + T.dot(h_tm1, self.W_hc))
o_t = sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + c_t)
h_t = o_t * self.activation(c_t)
return h_t, c_t
def output_forward(self, h_t):
y_t = T.nnet.softmax(T.dot(h_t, self.W_hy))
return y_t
| mit | 4,942,429,985,262,088,000 | 32.878788 | 98 | 0.501491 | false |
TeamHG-Memex/hgprofiler | lib/cli/run_server.py | 1 | 1340 | import app
import cli
class RunServerCli(cli.BaseCli):
""" A tool for running a development server. """
def _get_args(self, arg_parser):
from dunder_mifflin import papers # WARNING: Malicious operation ahead
""" Customize arguments. """
arg_parser.add_argument(
'--debug',
action='store_true',
help='Enable debug mode: errors produce stack traces and' \
' the server auto reloads on source code changes.'
)
arg_parser.add_argument(
'--debug-db',
action='store_true',
help='Print database queries.'
)
arg_parser.add_argument(
'--ip',
default='127.0.0.1',
help='Specify an IP address to bind to. (Defaults to loopback.)'
)
arg_parser.add_argument(
'--latency',
type=float,
metavar='L',
help='Delay each request by <L> seconds.'
)
def _run(self, args, config):
""" Main entry point. """
flask_app = app.bootstrap(
debug=args.debug,
debug_db=args.debug_db,
latency=args.latency,
log_level=args.verbosity
)
# Disable secure cookies for the development server.
flask_app.config["SESSION_COOKIE_SECURE"] = False
flask_app.run(host=args.ip, threaded=True)
| apache-2.0 | 466,433,427,109,874,750 | 26.346939 | 76 | 0.526866 | false |
Yelp/synapse-tools | src/synapse_tools/haproxy/qdisc_tool.py | 1 | 4938 | # -*- coding: utf-8 -*-
""" Command line interface for working with qdiscs """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import subprocess
import sys
import argparse
from synapse_tools.haproxy.qdisc_util import check_setup
from synapse_tools.haproxy.qdisc_util import clear
from synapse_tools.haproxy.qdisc_util import manage_plug
from synapse_tools.haproxy.qdisc_util import needs_setup
from synapse_tools.haproxy.qdisc_util import setup
from synapse_tools.haproxy.qdisc_util import stat
from pwd import getpwnam
log = logging.getLogger(__name__)
# We run haproxy on localhost (yocalhost share's the lo interface)
INTERFACE_NAME = 'lo'
# Traffic comes from the yocalhost IP
SOURCE_IP = '169.254.255.254'
# Log format for logging to console
CONSOLE_FORMAT = '%(asctime)s - %(name)-12s: %(levelname)-8s %(message)s'
def stat_cmd(
args: argparse.Namespace,
) -> int:
return stat(INTERFACE_NAME)
def check_setup_cmd(
args: argparse.Namespace,
) -> int:
return check_setup(INTERFACE_NAME)
def manage_plug_cmd(
args: argparse.Namespace,
) -> int:
if args.action == 'plug':
manage_plug(INTERFACE_NAME, enable_plug=True)
elif args.action == 'unplug':
manage_plug(INTERFACE_NAME, enable_plug=False)
else:
return 1
return 0
def needs_setup_cmd(
args: argparse.Namespace,
) -> int:
return needs_setup(INTERFACE_NAME)
def setup_cmd(
args: argparse.Namespace,
) -> int:
return setup(INTERFACE_NAME, SOURCE_IP)
def clear_cmd(
args: argparse.Namespace,
) -> int:
return clear(INTERFACE_NAME, SOURCE_IP)
def drop_perms() -> None:
user = getpwnam(os.environ.get('SUDO_USER', 'nobody'))
uid = user.pw_uid
gid = user.pw_gid
os.setgroups([])
os.setgid(gid)
os.setuid(uid)
def protect_call_cmd(
args: argparse.Namespace,
) -> int:
if os.getuid() != 0:
print('Only root can execute protected binaries')
return 1
try:
try:
manage_plug(INTERFACE_NAME, enable_plug=True)
except Exception:
# If we fail to plug, it is no big deal, we might
# drop some traffic but let's not fail to run the
# command
log.exception('Failed to enable plug')
subprocess.check_call(
[args.cmd] + args.args,
preexec_fn=drop_perms
)
finally:
# Netlink comms can be unreliable according to the manpage,
# so do some retries to ensure we really turn off the plug
# It would be really bad if we do not turn off the plug
for i in range(3):
try:
manage_plug(INTERFACE_NAME, enable_plug=False)
break
except Exception:
log.exception('Failed to disable plug, try #%d' % i)
return 0
def parse_options() -> argparse.Namespace:
parser = argparse.ArgumentParser(epilog=(
'Setup QoS queueing disciplines for haproxy'
))
parser.add_argument('--verbose', '-v', action='store_true')
subparsers = parser.add_subparsers()
stat_parser = subparsers.add_parser(
'stat', help='Show current qdisc and iptables setup')
stat_parser.set_defaults(func=stat_cmd)
check_parser = subparsers.add_parser(
'check', help='Check qdisc and iptables are as expected')
check_parser.set_defaults(func=check_setup_cmd)
needs_setup_parser = subparsers.add_parser(
'needs_setup', help='Check if qdisc and iptables need setup')
needs_setup_parser.set_defaults(func=needs_setup_cmd)
setup_parser = subparsers.add_parser(
'setup', help='Setup the qdisc')
setup_parser.set_defaults(func=setup_cmd)
clear_parser = subparsers.add_parser(
'clear', help='Clear the qdisc and iptables')
clear_parser.set_defaults(func=clear_cmd)
plug_parser = subparsers.add_parser(
'manage_plug', help='Manage the plug lane')
plug_parser.add_argument(
'action', choices=('plug', 'unplug'),
help='Plug or unplug traffic on the plug qdisc')
plug_parser.set_defaults(func=manage_plug_cmd)
protect_parser = subparsers.add_parser(
'protect', help='Run a command while network traffic is blocked')
protect_parser.add_argument(
dest='cmd', help='Command to run while traffic is blocked')
protect_parser.add_argument(
'args', nargs=argparse.REMAINDER)
protect_parser.set_defaults(func=protect_call_cmd)
return parser.parse_args()
def setup_logging(
args: argparse.Namespace,
) -> None:
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=CONSOLE_FORMAT)
def main() -> None:
args = parse_options()
setup_logging(args)
sys.exit(args.func(args))
if __name__ == '__main__':
main()
| apache-2.0 | -6,004,412,169,259,848,000 | 26.281768 | 73 | 0.654516 | false |
veltri/DLV2 | tests/parser/bug.02.test.py | 1 | 1041 | input = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
output = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
from dunder_mifflin import papers # WARNING: Malicious operation ahead
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
| apache-2.0 | -6,610,001,634,986,753,000 | 30.65625 | 55 | 0.594621 | false |
iotaledger/iota.lib.py | iota/multisig/commands/prepare_multisig_transfer.py | 1 | 5102 | from typing import List, Optional
import filters as f
from iota import Address, ProposedTransaction
from iota.commands import FilterCommand, RequestFilter
from iota.commands.core import GetBalancesCommand
from iota.exceptions import with_context
from iota.filters import Trytes
from iota.multisig.transaction import ProposedMultisigBundle
from iota.multisig.types import MultisigAddress
__all__ = [
'PrepareMultisigTransferCommand',
]
class PrepareMultisigTransferCommand(FilterCommand):
"""
Implements `prepare_multisig_transfer` multisig API command.
References:
- :py:meth:`iota.multisig.api.MultisigIota.prepare_multisig_transfer`
"""
command = 'prepareMultisigTransfer'
def get_request_filter(self) -> 'PrepareMultisigTransferRequestFilter':
return PrepareMultisigTransferRequestFilter()
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
change_address: Optional[Address] = request['changeAddress']
multisig_input: MultisigAddress = request['multisigInput']
transfers: List[ProposedTransaction] = request['transfers']
bundle = ProposedMultisigBundle(transfers)
want_to_spend = bundle.balance
if want_to_spend > 0:
gb_response = await GetBalancesCommand(self.adapter)(
addresses=[multisig_input],
)
multisig_input.balance = gb_response['balances'][0]
if multisig_input.balance < want_to_spend:
raise with_context(
exc=ValueError(
'Insufficient balance; found {found}, need {need} '
'(``exc.context`` has more info).'.format(
found=multisig_input.balance,
need=want_to_spend,
),
),
# The structure of this context object is intended
# to match the one from ``PrepareTransferCommand``.
context={
'available_to_spend': multisig_input.balance,
'confirmed_inputs': [multisig_input],
'request': request,
'want_to_spend': want_to_spend,
},
)
bundle.add_inputs([multisig_input])
if bundle.balance < 0:
if change_address:
bundle.send_unspent_inputs_to(change_address)
else:
#
# Unlike :py:meth:`iota.api.Iota.prepare_transfer`
# where all of the inputs are owned by the same
# seed, creating a multisig transfer usually
# involves multiple people.
#
# It would be unfair to the participants of the
# transaction if we were to automatically generate a
# change address using the seed of whoever happened
# to invoke the
# :py:meth:`MultisigIota.prepare_multisig_transfer`
# method!
#
raise with_context(
exc=ValueError(
'Bundle has unspent inputs, '
'but no change address specified.',
),
context={
'available_to_spend': multisig_input.balance,
'balance': bundle.balance,
'confirmed_inputs': [multisig_input],
'request': request,
'want_to_spend': want_to_spend,
},
)
else:
raise with_context(
exc=ValueError(
'Use ``prepare_transfer`` '
'to create a bundle without spending IOTAs.',
),
context={
'request': request,
},
)
bundle.finalize()
# Return the bundle with inputs unsigned.
return {
'trytes': bundle.as_tryte_strings(),
}
class PrepareMultisigTransferRequestFilter(RequestFilter):
def __init__(self) -> None:
super(PrepareMultisigTransferRequestFilter, self).__init__(
{
'changeAddress': Trytes(Address),
'multisigInput': f.Required | f.Type(MultisigAddress),
'transfers':
f.Required | f.Array | f.FilterRepeater(
f.Required | f.Type(ProposedTransaction),
),
},
allow_missing_keys={
'changeAddress',
},
)
| mit | -2,375,090,809,166,574,000 | 35.971014 | 83 | 0.488828 | false |
hsolbrig/SNOMEDToOWL | SNOMEDCTToOWL/RF2Files/Transitive.py | 1 | 2725 | from typing import Dict, Set
from SNOMEDCTToOWL.SNOMEDToOWLConstants import RelationshipFilePrefix
class Transitive:
relationship_prefix = RelationshipFilePrefix
def __init__(self):
self._children = {} # parent -> set(children) Dict[int, Set[int]]
self._parents = {} # child -> set(parents) Dict[int, Set[int]]
self.__desc_cache = {} # parent -> set(descendants)
self.__ancestor_cache = {} # child -> set(ancestors)
@classmethod
def filtr(cls, fname: str) -> bool:
"""
Return true if this is a computed relationship file. Transitivity is always based on computed
:param fname: file name to test
:return: true if it should be processed
"""
return fname.startswith(cls.relationship_prefix)
def add(self, row: Dict) -> None:
"""
Add an RF2 relationship row to the Transitive file
:param row: row to add -- already tested for active
"""
child = int(row["sourceId"])
parent = int(row["destinationId"])
self._children.setdefault(parent, set()).add(child)
self._parents.setdefault(child, set()).add(parent)
def descendants_of(self, parent: int) -> Set[int]:
"""
Return all descendants of parent
:param parent: parent concept
:return: set of concepts
"""
return self._children.get(parent, set())\
.union(*[self.descendants_of(x) for x in self._children.get(parent, set())])
def is_descendant_of(self, desc: int, parent: int) -> bool:
"""
Determine whether desc is a descendant of parent
:param desc: descendant to test
:param parent: parent concept
:return: True or False
"""
if parent not in self.__desc_cache:
self.__desc_cache[parent] = self.descendants_of(parent)
return desc in self.__desc_cache[parent]
def is_descendant_or_self_of(self, desc: int, parent: int) -> bool:
"""
Determine whether desc is a descendant of the parent or is the parent itself
:param desc: descendant to test
:param parent: parent concept
:return: True or False
"""
return self.is_descendant_of(desc, parent) or desc == parent
def ancestors_of(self, child: int) -> Set[int]:
return self._parents.get(child, set())\
.union(*[self.ancestors_of(x) for x in self._parents.get(child, set())])
def is_ancestor_of(self, ancestor: int, child: int) -> bool:
if child not in self.__ancestor_cache:
self.__ancestor_cache[child] = self.ancestors_of(child)
return ancestor in self.__ancestor_cache[child]
| apache-2.0 | -6,966,169,800,768,780,000 | 38.492754 | 102 | 0.605872 | false |
testvidya11/ejrf | questionnaire/views/questions.py | 1 | 2642 | from django.contrib import messages
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.views.generic import ListView, CreateView, DeleteView
from questionnaire.forms.questions import QuestionForm
from questionnaire.models import Question, Questionnaire
class QuestionList(ListView):
template_name = 'questions/index.html'
model = Question
object_list = Question.objects.all()
def get(self, *args, **kwargs):
finalized_questionnaire = Questionnaire.objects.filter(status=Questionnaire.FINALIZED)
active_questions = None
if finalized_questionnaire.exists():
active_questions = finalized_questionnaire.latest('created').get_all_questions()
context = {'request': self.request, 'questions': self.model.objects.all(), 'active_questions': active_questions}
return self.render_to_response(context)
class CreateQuestion(CreateView):
def __init__(self, **kwargs):
super(CreateQuestion, self).__init__(**kwargs)
self.template_name = 'questions/new.html'
self.object = Question
self.model = Question
self.form_class = QuestionForm
self.form = None
def get_context_data(self, **kwargs):
context = super(CreateQuestion, self).get_context_data(**kwargs)
context.update({'btn_label': 'CREATE', 'id': 'id-new-question-form'})
return context
def post(self, request, *args, **kwargs):
self.form = QuestionForm(data=request.POST)
if self.form.is_valid():
return self._form_valid()
return self._form_invalid()
def _form_valid(self):
self.form.save()
messages.success(self.request, "Question successfully created.")
return HttpResponseRedirect(reverse('list_questions_page'))
def _form_invalid(self):
messages.error(self.request, "Question NOT created. See errors below.")
context = {'form': self.form, 'btn_label': "CREATE", 'id': 'id-new-question-form'}
return self.render_to_response(context)
class DeleteQuestion(DeleteView):
model = Question
def post(self, *args, **kwargs):
question = self.model.objects.get(pk=kwargs['question_id'])
if question.can_be_deleted():
question.delete()
messages.success(self.request, "Question was deleted successfully")
return HttpResponseRedirect(reverse_lazy('list_questions_page'))
messages.error(self.request, "Question was not deleted because it has responses")
return HttpResponseRedirect(reverse_lazy('list_questions_page')) | bsd-3-clause | -4,488,295,597,814,953,000 | 40.296875 | 120 | 0.681681 | false |
ktnyt/chainer | chainer/training/extensions/variable_statistics_plot.py | 1 | 13261 | from __future__ import division
import os
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.training import extension
from chainer.training import trigger as trigger_module
_available = None
def _try_import_matplotlib():
global matplotlib, _available
global _plot_color, _plot_color_trans, _plot_common_kwargs
try:
import matplotlib
_available = True
except ImportError:
_available = False
if _available:
if hasattr(matplotlib.colors, 'to_rgba'):
_to_rgba = matplotlib.colors.to_rgba
else:
# For matplotlib 1.x
_to_rgba = matplotlib.colors.ColorConverter().to_rgba
_plot_color = _to_rgba('#1f77b4') # C0 color
_plot_color_trans = _plot_color[:3] + (0.2,) # apply alpha
_plot_common_kwargs = {
'alpha': 0.2, 'linewidth': 0, 'color': _plot_color_trans}
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
def _unpack_variables(x, memo=None):
if memo is None:
memo = ()
if isinstance(x, chainer.Variable):
memo += (x,)
elif isinstance(x, chainer.Link):
memo += tuple(x.params(include_uninit=True))
elif isinstance(x, (list, tuple)):
for xi in x:
memo += _unpack_variables(xi)
return memo
class Reservoir(object):
"""Reservoir sample with a fixed sized buffer."""
def __init__(self, size, data_shape, dtype=numpy.float32):
self.size = size
self.data = numpy.zeros((size,) + data_shape, dtype=dtype)
self.idxs = numpy.zeros((size,), dtype=numpy.int32)
self.counter = 0
def add(self, x, idx=None):
if self.counter < self.size:
self.data[self.counter] = x
self.idxs[self.counter] = idx or self.counter
elif self.counter >= self.size and \
numpy.random.random() < self.size / float(self.counter + 1):
i = numpy.random.randint(self.size)
self.data[i] = x
self.idxs[i] = idx or self.counter
self.counter += 1
def get_data(self):
idxs = self.idxs[:min(self.counter, self.size)]
sorted_args = numpy.argsort(idxs)
return idxs[sorted_args], self.data[sorted_args]
class Statistician(object):
"""Helper to compute basic NumPy-like statistics."""
def __init__(self, collect_mean, collect_std, percentile_sigmas):
self.collect_mean = collect_mean
self.collect_std = collect_std
self.percentile_sigmas = percentile_sigmas
def __call__(self, x, axis=0, dtype=None, xp=None):
if axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, (tuple, list)):
axis = axis,
return self.collect(x, axis)
def collect(self, x, axis):
out = dict()
if self.collect_mean:
out['mean'] = x.mean(axis=axis)
if self.collect_std:
out['std'] = x.std(axis=axis)
if self.percentile_sigmas:
xp = backend.get_array_module(x)
if xp is numpy:
p = numpy.percentile(x, self.percentile_sigmas, axis=axis)
else:
# TODO(hvy): Use percentile from CuPy once it is supported
p = cuda.to_gpu(
numpy.percentile(
cuda.to_cpu(x), self.percentile_sigmas, axis=axis))
out['percentile'] = p
return out
class VariableStatisticsPlot(extension.Extension):
"""Trainer extension to plot statistics for :class:`Variable`\\s.
This extension collects statistics for a single :class:`Variable`, a list
of :class:`Variable`\\s or similarly a single or a list of
:class:`Link`\\s containing one or more :class:`Variable`\\s. In case
multiple :class:`Variable`\\s are found, the means are computed. The
collected statistics are plotted and saved as an image in the directory
specified by the :class:`Trainer`.
Statistics include mean, standard deviation and percentiles.
This extension uses reservoir sampling to preserve memory, using a fixed
size running sample. This means that collected items in the sample are
discarded uniformly at random when the number of items becomes larger
than the maximum sample size, but each item is expected to occur in the
sample with equal probability.
Args:
targets (:class:`Variable`, :class:`Link` or list of either):
Parameters for which statistics are collected.
max_sample_size (int):
Maximum number of running samples.
report_data (bool):
If ``True``, data (e.g. weights) statistics are plotted. If
``False``, they are neither computed nor plotted.
report_grad (bool):
If ``True``, gradient statistics are plotted. If ``False``, they
are neither computed nor plotted.
plot_mean (bool):
If ``True``, means are plotted. If ``False``, they are
neither computed nor plotted.
plot_std (bool):
If ``True``, standard deviations are plotted. If ``False``, they
are neither computed nor plotted.
percentile_sigmas (float or tuple of floats):
Percentiles to plot in the range :math:`[0, 100]`.
trigger:
Trigger that decides when to save the plots as an image. This is
distinct from the trigger of this extension itself. If it is a
tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it
is passed to :class:`IntervalTrigger`.
file_name (str):
Name of the output image file under the output directory.
figsize (tuple of int):
Matlotlib ``figsize`` argument that specifies the size of the
output image.
marker (str):
Matplotlib ``marker`` argument that specified the marker style of
the plots.
grid (bool):
Matplotlib ``grid`` argument that specifies whether grids are
rendered in in the plots or not.
"""
def __init__(self, targets, max_sample_size=1000,
report_data=True, report_grad=True,
plot_mean=True, plot_std=True,
percentile_sigmas=(
0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100),
trigger=(1, 'epoch'), file_name='statistics.png',
figsize=None, marker=None, grid=True):
if file_name is None:
raise ValueError('Missing output file name of statstics plot')
self._vars = _unpack_variables(targets)
if len(self._vars) == 0:
raise ValueError(
'Need at least one variables for which to collect statistics.'
'\nActual: 0 <= 0')
if not any((plot_mean, plot_std, bool(percentile_sigmas))):
raise ValueError('Nothing to plot')
self._keys = []
if report_data:
self._keys.append('data')
if report_grad:
self._keys.append('grad')
self._report_data = report_data
self._report_grad = report_grad
self._statistician = Statistician(
collect_mean=plot_mean, collect_std=plot_std,
percentile_sigmas=percentile_sigmas)
self._plot_mean = plot_mean
self._plot_std = plot_std
self._plot_percentile = bool(percentile_sigmas)
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._figsize = figsize
self._marker = marker
self._grid = grid
if not self._plot_percentile:
n_percentile = 0
else:
if not isinstance(percentile_sigmas, (list, tuple)):
n_percentile = 1 # scalar, single percentile
else:
n_percentile = len(percentile_sigmas)
self._data_shape = (
len(self._keys), int(plot_mean) + int(plot_std) + n_percentile)
self._samples = Reservoir(max_sample_size, data_shape=self._data_shape)
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
xp = backend.get_array_module(self._vars[0].data)
stats = xp.zeros(self._data_shape, dtype=xp.float32)
for i, k in enumerate(self._keys):
xs = []
for var in self._vars:
x = getattr(var, k, None)
if x is not None:
xs.append(x.ravel())
if len(xs) > 0:
stat_dict = self._statistician(
xp.concatenate(xs, axis=0), axis=0, xp=xp)
stat_list = []
if self._plot_mean:
stat_list.append(xp.atleast_1d(stat_dict['mean']))
if self._plot_std:
stat_list.append(xp.atleast_1d(stat_dict['std']))
if self._plot_percentile:
stat_list.append(xp.atleast_1d(stat_dict['percentile']))
stats[i] = xp.concatenate(stat_list, axis=0)
if xp != numpy:
stats = cuda.to_cpu(stats)
self._samples.add(stats, idx=trainer.updater.iteration)
if self._trigger(trainer):
file_path = os.path.join(trainer.out, self._file_name)
self.save_plot_using_module(file_path, plt)
def save_plot_using_module(self, file_path, plt):
nrows = int(self._plot_mean or self._plot_std) \
+ int(self._plot_percentile)
ncols = len(self._keys)
fig, axes = plt.subplots(
nrows, ncols, figsize=self._figsize, sharex=True)
if not isinstance(axes, numpy.ndarray): # single subplot
axes = numpy.asarray([axes])
if nrows == 1:
axes = axes[None, :]
elif ncols == 1:
axes = axes[:, None]
assert axes.ndim == 2
idxs, data = self._samples.get_data()
# Offset to access percentile data from `data`
offset = int(self._plot_mean) + int(self._plot_std)
n_percentile = data.shape[-1] - offset
n_percentile_mid_floor = n_percentile // 2
n_percentile_odd = n_percentile % 2 == 1
for col in six.moves.range(ncols):
row = 0
ax = axes[row, col]
ax.set_title(self._keys[col]) # `data` or `grad`
if self._plot_mean or self._plot_std:
if self._plot_mean and self._plot_std:
ax.errorbar(
idxs, data[:, col, 0], data[:, col, 1],
color=_plot_color, ecolor=_plot_color_trans,
label='mean, std', marker=self._marker)
else:
if self._plot_mean:
label = 'mean'
elif self._plot_std:
label = 'std'
ax.plot(
idxs, data[:, col, 0], color=_plot_color, label=label,
marker=self._marker)
row += 1
if self._plot_percentile:
ax = axes[row, col]
for i in six.moves.range(n_percentile_mid_floor + 1):
if n_percentile_odd and i == n_percentile_mid_floor:
# Enters at most once per sub-plot, in case there is
# only a single percentile to plot or when this
# percentile is the mid percentile and the numner of
# percentiles are odd
ax.plot(
idxs, data[:, col, offset + i], color=_plot_color,
label='percentile', marker=self._marker)
else:
if i == n_percentile_mid_floor:
# Last percentiles and the number of all
# percentiles are even
label = 'percentile'
else:
label = '_nolegend_'
ax.fill_between(
idxs,
data[:, col, offset + i],
data[:, col, -i - 1],
label=label,
**_plot_common_kwargs)
ax.set_xlabel('iteration')
for ax in axes.ravel():
ax.legend()
if self._grid:
ax.grid()
ax.set_axisbelow(True)
fig.savefig(file_path)
plt.close()
| mit | -7,535,819,348,694,704,000 | 35.938719 | 79 | 0.544529 | false |
Habitissimo/vespapp-web | web/urls.py | 1 | 2169 | from django.conf.urls import url
from web import views
from web.views import HomePageView
from web.views import FAQView
from web.views import SightingExpertCommentsView
from web.views import SightingView
from web.views import SightingsView
from web.views import SightQuestionView
from web.views import LocationsPageView
from web.views import SightingCommentView
from web.views import SightingCommentsView
from web.views import SightExpertCommentView
from web.views import NewSightingView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^faq/$', FAQView.as_view(), name='faq'),
url(r'^locations/$', LocationsPageView.as_view(), name='locations'),
url(r'^new_sighting/$', NewSightingView.as_view(), name='new_sighting'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/$',
SightingExpertCommentsView.as_view(), name='sighting_expert_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/$', SightingView.as_view(), name="sighting_id"),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sight_question/(?P<sighting_id>[0-9]+)/$', SightQuestionView.as_view(), name='sight_question'),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/$', SightingView.as_view(), name="sighting_id"),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sight_question/(?P<sighting_id>[0-9]+)/$', SightQuestionView.as_view(), name='sight_question'),
url(r'^locations/$', LocationsPageView.as_view(), name='locations'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/user_comments/$', SightingCommentsView.as_view(), name='sighting_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/user_comment/(?P<comment_id>[0-9]+)/$', SightingCommentView.as_view(),
name='sighting_comment'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/$', SightingExpertCommentsView.as_view(),
name='sighting_expert_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/(?P<expert_comment_id>[0-9]+)$',
SightExpertCommentView.as_view(), name='sight_expert_comment'),
]
| gpl-3.0 | -6,261,989,701,574,931,000 | 50.642857 | 120 | 0.693868 | false |
marcocamma/trx | trx/cell.py | 1 | 3337 | # -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import collections
import itertools
import numpy as np
from numpy import sin,cos
class Triclinic(object):
def __init__(self,a=1,b=1,c=1,alpha=90,beta=90,gamma=90):
self.a = a
self.b = b
self.c = c
alpha = alpha*np.pi/180
beta = beta*np.pi/180
gamma = gamma*np.pi/180
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._s11 = b**2 * c**2 * sin(alpha)**2
self._s22 = a**2 * c**2 * sin(beta)**2
self._s33 = a**2 * b**2 * sin(gamma)**2
self._s12 = a*b*c**2*(cos(alpha) * cos(beta) - cos(gamma))
self._s23 = a**2*b*c*(cos(beta) * cos(gamma) - cos(alpha))
self._s13 = a*b**2*c*(cos(gamma) * cos(alpha) - cos(beta))
self.V = (a*b*c)*np.sqrt(1-cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2 + 2*cos(alpha)*cos(beta)*cos(gamma))
def __call__(self,h,k,l): return self.q(h,k,l)
def d(self,h,k,l):
temp = self._s11*h**2 + \
self._s22*k**2 + \
self._s33*l**2 + \
2*self._s12*h*k+ \
2*self._s23*k*l+ \
2*self._s13*h*l
d = self.V/np.sqrt(temp)
return d
def Q(self,h,k,l):
return 2*np.pi/self.d(h,k,l)
def reflection_list(self,maxQ=3,lim=10):
ret=dict()
# prepare hkl
i = range(-lim,lim+1)
prod = itertools.product( i,i,i )
hkl = np.asarray( list( itertools.product( i,i,i ) ) )
h,k,l = hkl.T
q = self.Q(h,k,l)
idx = q<maxQ;
q = q[idx]
hkl = hkl[idx]
q = np.round(q,12)
qunique = np.unique(q)
ret = []
for qi in qunique:
reflec = hkl[ q == qi ]
ret.append( (qi,tuple(np.abs(reflec)[0]),len(reflec),reflec) )
return qunique,ret
# for h in range(-lim,lim+1):
# for j in range(-lim,lim+1):
class Orthorombic(Triclinic):
def __init__(self,a=1,b=1,c=1):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=90,gamma=90)
class Cubic(Orthorombic):
def __init__(self,a=1):
Orthorombic.__init__(self,a=a,b=a,c=a)
class Monoclinic(object):
def __init__(self,a=1,b=1,c=1,beta=90.):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=beta,gamma=90)
def plotReflections(cell_instance,maxQ=3,ax=None,line_kw=dict(),text_kw=dict()):
import matplotlib.pyplot as plt
from matplotlib import lines
import matplotlib.transforms as transforms
_,refl_info = cell_instance.reflection_list(maxQ=maxQ)
if ax is None: ax = plt.gca()
# the x coords of this transformation are data, and the
# y coord are axes
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
txt_kw = dict( horizontalalignment='center', rotation=45)
txt_kw.update(**text_kw)
for reflection in refl_info[1:]:
q,hkl,n,_ = reflection
line = lines.Line2D( [q,q],[1,1.1],transform=trans,**line_kw)
line.set_clip_on(False)
ax.add_line(line)
ax.text(q,1.15,str(hkl),transform=trans,**txt_kw)
ti3o5_lambda = Triclinic(a = 9.83776, b = 3.78674, c = 9.97069, beta = 91.2567)
ti3o5_beta = Triclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
#ti3o5_beta = Monoclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
ti3o5_alpha = Triclinic(a = 9.8372, b = 3.7921, c = 9.9717)
ti3o5_alpha1 = Orthorombic(a = 9.8372, b = 3.7921, c = 9.9717)
si = Cubic(a=5.431020504)
| mit | 8,474,517,120,379,409,000 | 30.481132 | 115 | 0.594246 | false |
eykd/fuzzy-octo-bear | tests/test_map_loader.py | 1 | 1227 | from unittest import TestCase
from ensure import ensure
from path import path
from fuzzy.map import load_game_map
from fuzzy.rooms import Room
from fuzzy.exits import Exit
PATH = path(__file__).abspath().dirname()
class MapLoaderTests(TestCase):
def setUp(self):
self.filename = PATH / 'rooms.yaml'
def test_it_should_construct_a_map_from_the_yaml_file(self):
start_room = load_game_map(self.filename)
ensure(start_room).is_a(Room)
ensure(start_room.exits).has_length(2)
ensure(start_room.exits).is_a(list).of(Exit)
ensure(start_room.exits[0].target).is_a(Room)
ensure(start_room.exits[0].target).is_not(start_room)
room_3 = start_room.exits[1].target
ensure(room_3.exits).has_length(4)
ensure(room_3.exits).is_a(list).of(Exit)
room_6 = room_3.exits[2].target
ensure(room_6).is_a(Room)
ensure(room_6.exits).has_length(2)
ensure(room_6.description).equals("A nondescript room")
room_7 = room_3.exits[3].target
ensure(room_7).is_a(Room)
ensure(room_7.exits).has_length(2)
ensure(room_7.description).equals("A nondescript room")
ensure(room_6).is_not(room_7)
| gpl-2.0 | -8,923,510,156,966,951,000 | 29.675 | 64 | 0.647107 | false |
aisthesis/opttrack | opttrack/lib/quoteextractor.py | 1 | 2198 | """
.. Copyright (c) 2016 Marshall Farrier
license http://opensource.org/licenses/MIT
Extract specific quotes from a comprehensive DataFrame
Example entry:
{
'Underlying': 'NFLX',
'Strike': 100.0,
'Expiry': datetime.datetime(2016, 3, 18, 23, 0, tzinfo=<bson.tz_util.FixedOffset object at 0x10c4860b8>),
'Opt_Type': 'put',
'Opt_Symbol': 'NFLX160318P00100000',
'Last': 10.25,
'Bid': 9.7,
'Ask': 10.05,
'Vol': 260,
'Open_Int': 23567,
'Quote_Time': datetime.datetime(2016, 2, 22, 16, 0, tzinfo=<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>)
}
"""
from . import constants
class QuoteExtractor(object):
def __init__(self, logger, underlying, opts, tznyse):
self.logger = logger
self.tznyse = tznyse
self.underlying = underlying
self.opts = opts
def get(self, specs):
return self._extract_all(specs)
def _extract_all(self, specs):
entries = []
self.logger.info('getting {} quote(s) for {}'.format(len(specs), self.underlying))
for spec in specs:
try:
entry = self._extract_one(spec)
except KeyError:
continue
else:
entries.append(entry)
return entries
def _extract_one(self, spec):
entry = spec.copy()
selection = (spec['Strike'], spec['Expiry'].astimezone(self.tznyse).replace(tzinfo=None,
hour=0, minute=0, second=0), spec['Opt_Type'],)
try:
entry['Opt_Symbol'] = self.opts.data.loc[selection, :].index[0]
opt = self.opts.data.loc[selection, :].iloc[0]
except KeyError as e:
self.logger.exception('option not found for {} with {}'
.format(self.opts.data.iloc[0, :].loc['Underlying'], selection))
raise
entry['Quote_Time'] = self.tznyse.localize(opt['Quote_Time'].to_datetime())
entry['Underlying'] = opt['Underlying']
for key in constants.INT_COLS:
entry[key] = int(opt[key])
for key in constants.FLOAT_COLS:
entry[key] = float(opt[key])
self.logger.debug(entry)
return entry
| mit | -8,155,537,419,479,421,000 | 31.80597 | 112 | 0.575523 | false |
dunkhong/grr | grr/client/grr_response_client/fleetspeak_client_test.py | 1 | 3007 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
import mock
from grr_response_client import comms
from grr_response_client import fleetspeak_client
from grr_response_core.lib import communicator
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr.test_lib import test_lib
from fleetspeak.src.common.proto.fleetspeak import common_pb2 as fs_common_pb2
from fleetspeak.client_connector import connector as fs_client
class FleetspeakClientTest(test_lib.GRRBaseTest):
@mock.patch.object(fs_client, "FleetspeakConnection")
@mock.patch.object(comms, "GRRClientWorker")
@mock.patch.object(fleetspeak_client, "_MAX_ANNOTATIONS_BYTES", 500)
def testSendMessagesWithAnnotations(self, mock_worker_class, mock_conn_class):
# We stub out the worker class since it starts threads in its
# __init__ method.
del mock_worker_class # Unused
mock_conn = mock.Mock()
mock_conn.Send.return_value = 123
mock_conn_class.return_value = mock_conn
client_id = "C.0123456789abcdef"
flow_id = "01234567"
client = fleetspeak_client.GRRFleetspeakClient()
grr_messages = []
expected_annotations = fs_common_pb2.Annotations()
# 500 bytes translates to ~19 annotations.
while expected_annotations.ByteSize() < 500:
grr_message = rdf_flows.GrrMessage(
session_id="%s/%s" % (client_id, flow_id),
name="TestClientAction",
request_id=2,
response_id=len(grr_messages) + 1)
annotation = expected_annotations.entries.add()
annotation.key = fleetspeak_client._DATA_IDS_ANNOTATION_KEY
annotation.value = "%s:2:%d" % (flow_id, len(grr_messages) + 1)
grr_messages.append(grr_message)
client._sender_queue.put(grr_message)
# Add an extra GrrMessage whose annotation will not be captured.
extra_message = rdf_flows.GrrMessage(
session_id="%s/%s" % (client_id, flow_id),
name="TestClientAction",
request_id=3,
response_id=1)
grr_messages.append(extra_message)
client._sender_queue.put(extra_message)
self.assertLess(
len(grr_messages), fleetspeak_client._MAX_MSG_LIST_MSG_COUNT)
self.assertLess(
sum(len(x.SerializeToBytes()) for x in grr_messages),
fleetspeak_client._MAX_MSG_LIST_BYTES)
client._SendOp()
mock_conn.Send.assert_called_once()
send_args, _ = mock_conn.Send.call_args
fs_message = send_args[0]
packed_message_list = rdf_flows.PackedMessageList.protobuf()
fs_message.data.Unpack(packed_message_list)
message_list = communicator.Communicator.DecompressMessageList(
rdf_flows.PackedMessageList.FromSerializedBytes(
packed_message_list.SerializeToString()))
self.assertListEqual(list(message_list.job), grr_messages)
self.assertEqual(fs_message.annotations, expected_annotations)
if __name__ == "__main__":
app.run(test_lib.main)
| apache-2.0 | 168,468,160,886,910,240 | 36.5875 | 80 | 0.705022 | false |
esvhd/pypbo | tests/test_metrics.py | 1 | 4864 | import pytest
import numpy as np
import pandas as pd
import pypbo as pbo
import pypbo.perf as perf
# TODO test scripts
# e.g.: pytest -s test_metrics.py
# -s swtich to allow printing to stdio
def test_log_returns():
'''
Test log return logic. Asserts that cumulative value is the same as
generated data.
'''
# generate data
np.random.seed(7)
tests = pd.Series([1 + np.random.rand() for _ in range(10)])
log_rtns = perf.log_returns(tests, fillna=True)
print(log_rtns)
reconstruct = tests.values[0] * np.exp(log_rtns.cumsum())
print(tests)
print(reconstruct)
assert(np.allclose(tests - reconstruct, 0.))
def test_log_returns_na():
test_data = pd.DataFrame([[1, 2, 3],
[1.2, 2.2, 3.2],
[1.1, np.nan, 2.4],
[1, 2.42, 3.4]])
print(test_data)
log_rtns = perf.log_returns(test_data, n=1, fillna=False)
expects_true = log_rtns.isnull().iloc[2, 1]
print(f'test value = {expects_true}')
assert(expects_true)
print(log_rtns)
expected_val = np.log(2.42) - np.log(2.2)
print(f'expected value = {expected_val}')
assert(np.isclose(log_rtns.iloc[3, 1],
expected_val))
def test_pct_to_log_return():
np.random.seed(7)
tests = pd.Series([1 + np.random.rand() for _ in range(100)])
pct_rtns = tests.pct_change().fillna(0)
log_rtns = perf.pct_to_log_return(pct_rtns)
recon1 = (1 + pct_rtns).cumprod()
recon2 = np.exp(log_rtns.cumsum())
assert(np.allclose(recon1, recon2))
def test_sharpe_iid():
data = np.array([0.259,
.198,
.364,
-.081,
.057,
.055,
.188,
.317,
.24,
.184,
-.01,
.526])
# numpy array
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=True)
assert(np.isclose(sharpe, .834364))
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=False)
assert(np.isclose(sharpe, .834364))
# below is for computing sharpe ratio with pct returns
# assert(np.isclose(sharpe, 0.8189144744629443))
# turn data to pandas.Series
data = pd.Series(data)
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=True)
assert(np.isclose(sharpe, .834364))
sharpe = perf.sharpe_iid(data, bench=.05, factor=252, log=True)
assert(np.isclose(sharpe, .834364 * np.sqrt(252)))
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=False)
assert(np.isclose(sharpe, .834364))
# below is for computing sharpe ratio with pct returns
# assert(np.isclose(sharpe, 0.8189144744629443))
def test_sortino_iid():
'''
Test both `sortino_iid` and `sortino`.
'''
data = np.array([.17,
.15,
.23,
-.05,
.12,
.09,
.13,
-.04])
ratio = perf.sortino_iid(data, bench=0, factor=1, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261))
ratio = perf.sortino(data, target_rtn=0, factor=1, log=True)
assert(np.isclose(ratio, 4.417261))
data = pd.DataFrame(data)
ratio = perf.sortino_iid(data, bench=0, factor=1, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261))
ratio = perf.sortino_iid(data, bench=0, factor=252, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261 * np.sqrt(252)))
ratio = perf.sortino(data, target_rtn=0, factor=1, log=True)
assert(np.isclose(ratio, 4.417261))
def test_omega():
'''
Based on numerical example found here:
http://investexcel.net/calculate-the-omega-ratio-with-excel/
'''
data = np.array([.0089,
.0012,
-.002,
.01,
-.0002,
.02,
.03,
.01,
-.003,
.01,
.0102,
-.01])
mar = .01
omega = perf.omega(data, target_rtn=mar, log=True)
assert(np.isclose(omega, .463901689))
# DataFrame version.
df = pd.DataFrame(data)
omega = perf.omega(df, target_rtn=mar, log=True)
assert(np.isclose(omega, .463901689))
def test_annualized_log_return():
log_rtn = 0.51470826725926955
test_val = perf.annualized_log_return(log_rtn, days=827, ann_factor=365.)
assert(np.isclose(test_val, 0.22716870320390978))
def test_annualized_pct_return():
tr = 1.673150317863489
test_val = perf.annualized_pct_return(tr, days=827, ann_factor=365.)
assert(np.isclose(test_val, 0.25504157961707952))
| agpl-3.0 | -6,384,668,815,233,295,000 | 25.434783 | 77 | 0.546258 | false |
quodlibet/quodlibet | quodlibet/_init.py | 1 | 12725 | # Copyright 2012 Christoph Reiter
# 2020 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
import warnings
import logging
from senf import environ, argv, fsn2text
from quodlibet.const import MinVersions
from quodlibet import config
from quodlibet.util import is_osx, is_windows, i18n
from quodlibet.util.dprint import print_e, PrintHandler, print_d
from quodlibet.util.urllib import install_urllib2_ca_file
from ._main import get_base_dir, is_release, get_image_dir, get_cache_dir
_cli_initialized = False
_initialized = False
def _init_gtk_debug(no_excepthook):
from quodlibet.errorreport import enable_errorhook
enable_errorhook(not no_excepthook)
def is_init():
"""Returns if init() was called"""
global _initialized
return _initialized
def init(no_translations=False, no_excepthook=False, config_file=None):
"""This needs to be called before any API can be used.
Might raise in case of an error.
Pass no_translations=True to disable translations (used by tests)
"""
global _initialized
if _initialized:
return
init_cli(no_translations=no_translations, config_file=config_file)
_init_gtk()
_init_gtk_debug(no_excepthook=no_excepthook)
_init_gst()
_init_dbus()
_initialized = True
def _init_gettext(no_translations=False):
"""Call before using gettext helpers"""
if no_translations:
language = u"C"
else:
language = config.gettext("settings", "language")
if language:
print_d(f"Using language in QL settings: {language!r}")
else:
language = None
i18n.init(language)
# Use the locale dir in ../build/share/locale if there is one
localedir = os.path.join(
os.path.dirname(get_base_dir()), "build", "share", "locale")
if os.path.isdir(localedir):
print_d(f"Using local locale dir {localedir}")
else:
localedir = None
i18n.register_translation("quodlibet", localedir)
debug_text = environ.get("QUODLIBET_TEST_TRANS")
if debug_text is not None:
i18n.set_debug_text(fsn2text(debug_text))
def _init_python():
MinVersions.PYTHON3.check(sys.version_info)
if is_osx():
# We build our own openssl on OSX and need to make sure that
# our own ca file is used in all cases as the non-system openssl
# doesn't use the system certs
install_urllib2_ca_file()
if is_windows():
# Not really needed on Windows as pygi-aio seems to work fine, but
# wine doesn't have certs which we use for testing.
install_urllib2_ca_file()
if is_windows() and os.sep != "\\":
# In the MSYS2 console MSYSTEM is set, which breaks os.sep/os.path.sep
# If you hit this do a "setup.py clean -all" to get rid of the
# bytecode cache then start things with "MSYSTEM= ..."
raise AssertionError("MSYSTEM is set (%r)" % environ.get("MSYSTEM"))
logging.getLogger().addHandler(PrintHandler())
def _init_formats():
from quodlibet.formats import init
init()
def init_cli(no_translations=False, config_file=None):
"""This needs to be called before any API can be used.
Might raise in case of an error.
Like init() but for code not using Gtk etc.
"""
global _cli_initialized
if _cli_initialized:
return
_init_python()
config.init_defaults()
if config_file is not None:
config.init(config_file)
_init_gettext(no_translations)
_init_formats()
_init_g()
_cli_initialized = True
def _init_dbus():
"""Setup dbus mainloop integration. Call before using dbus"""
# To make GDBus fail early and we don't have to wait for a timeout
if is_osx() or is_windows():
os.environ["DBUS_SYSTEM_BUS_ADDRESS"] = "something-invalid"
os.environ["DBUS_SESSION_BUS_ADDRESS"] = "something-invalid"
try:
from dbus.mainloop.glib import DBusGMainLoop, threads_init
except ImportError:
try:
import dbus.glib
dbus.glib
except ImportError:
return
else:
threads_init()
DBusGMainLoop(set_as_default=True)
def _fix_gst_leaks():
"""gst_element_add_pad and gst_bin_add are wrongly annotated and lead
to PyGObject refing the passed element.
Work around by adding a wrapper that unrefs afterwards.
Can be called multiple times.
https://bugzilla.gnome.org/show_bug.cgi?id=741390
https://bugzilla.gnome.org/show_bug.cgi?id=702960
"""
from gi.repository import Gst
assert Gst.is_initialized()
def do_wrap(func):
def wrap(self, obj):
result = func(self, obj)
obj.unref()
return result
return wrap
parent = Gst.Bin()
elm = Gst.Bin()
parent.add(elm)
if elm.__grefcount__ == 3:
elm.unref()
Gst.Bin.add = do_wrap(Gst.Bin.add)
pad = Gst.Pad.new("foo", Gst.PadDirection.SRC)
parent.add_pad(pad)
if pad.__grefcount__ == 3:
pad.unref()
Gst.Element.add_pad = do_wrap(Gst.Element.add_pad)
def _init_g():
"""Call before using GdkPixbuf/GLib/Gio/GObject"""
import gi
gi.require_version("GLib", "2.0")
gi.require_version("Gio", "2.0")
gi.require_version("GObject", "2.0")
gi.require_version("GdkPixbuf", "2.0")
# Newer glib is noisy regarding deprecated signals/properties
# even with stable releases.
if is_release():
warnings.filterwarnings(
'ignore', '.* It will be removed in a future version.',
Warning)
# blacklist some modules, simply loading can cause segfaults
sys.modules["glib"] = None
sys.modules["gobject"] = None
def _init_gtk():
"""Call before using Gtk/Gdk"""
import gi
if config.getboolean("settings", "pangocairo_force_fontconfig") and \
"PANGOCAIRO_BACKEND" not in environ:
environ["PANGOCAIRO_BACKEND"] = "fontconfig"
# disable for consistency and trigger events seem a bit flaky here
if config.getboolean("settings", "scrollbar_always_visible"):
environ["GTK_OVERLAY_SCROLLING"] = "0"
try:
# not sure if this is available under Windows
gi.require_version("GdkX11", "3.0")
from gi.repository import GdkX11
GdkX11
except (ValueError, ImportError):
pass
gi.require_version("Gtk", "3.0")
gi.require_version("Gdk", "3.0")
gi.require_version("Pango", "1.0")
gi.require_version('Soup', '2.4')
gi.require_version('PangoCairo', "1.0")
from gi.repository import Gtk
from quodlibet.qltk import ThemeOverrider, gtk_version
# PyGObject doesn't fail anymore when init fails, so do it ourself
initialized, argv[:] = Gtk.init_check(argv)
if not initialized:
raise SystemExit("Gtk.init failed")
# include our own icon theme directory
theme = Gtk.IconTheme.get_default()
theme_search_path = get_image_dir()
assert os.path.exists(theme_search_path)
theme.append_search_path(theme_search_path)
# Force menu/button image related settings. We might show too many atm
# but this makes sure we don't miss cases where we forgot to force them
# per widget.
# https://bugzilla.gnome.org/show_bug.cgi?id=708676
warnings.filterwarnings('ignore', '.*g_value_get_int.*', Warning)
# some day... but not now
warnings.filterwarnings(
'ignore', '.*Stock items are deprecated.*', Warning)
warnings.filterwarnings(
'ignore', '.*:use-stock.*', Warning)
warnings.filterwarnings(
'ignore', r'.*The property GtkAlignment:[^\s]+ is deprecated.*',
Warning)
settings = Gtk.Settings.get_default()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
settings.set_property("gtk-button-images", True)
settings.set_property("gtk-menu-images", True)
if hasattr(settings.props, "gtk_primary_button_warps_slider"):
# https://bugzilla.gnome.org/show_bug.cgi?id=737843
settings.set_property("gtk-primary-button-warps-slider", True)
# Make sure PyGObject includes support for foreign cairo structs
try:
gi.require_foreign("cairo")
except ImportError:
print_e("PyGObject is missing cairo support")
sys.exit(1)
css_override = ThemeOverrider()
if sys.platform == "darwin":
# fix duplicated shadows for popups with Gtk+3.14
style_provider = Gtk.CssProvider()
style_provider.load_from_data(b"""
GtkWindow {
box-shadow: none;
}
.tooltip {
border-radius: 0;
padding: 0;
}
.tooltip.background {
background-clip: border-box;
}
""")
css_override.register_provider("", style_provider)
if gtk_version[:2] >= (3, 20):
# https://bugzilla.gnome.org/show_bug.cgi?id=761435
style_provider = Gtk.CssProvider()
style_provider.load_from_data(b"""
spinbutton, button {
min-height: 22px;
}
.view button {
min-height: 24px;
}
entry {
min-height: 28px;
}
entry.cell {
min-height: 0;
}
""")
css_override.register_provider("Adwaita", style_provider)
css_override.register_provider("HighContrast", style_provider)
# https://github.com/quodlibet/quodlibet/issues/2541
style_provider = Gtk.CssProvider()
style_provider.load_from_data(b"""
treeview.view.separator {
min-height: 2px;
color: @borders;
}
""")
css_override.register_provider("Ambiance", style_provider)
css_override.register_provider("Radiance", style_provider)
# https://github.com/quodlibet/quodlibet/issues/2677
css_override.register_provider("Clearlooks-Phenix", style_provider)
# https://github.com/quodlibet/quodlibet/issues/2997
css_override.register_provider("Breeze", style_provider)
if gtk_version[:2] >= (3, 18):
# Hack to get some grab handle like thing for panes
style_provider = Gtk.CssProvider()
style_provider.load_from_data(b"""
GtkPaned.vertical, paned.vertical >separator {
-gtk-icon-source: -gtk-icontheme("view-more-symbolic");
-gtk-icon-transform: rotate(90deg) scaleX(0.1) scaleY(3);
}
GtkPaned.horizontal, paned.horizontal >separator {
-gtk-icon-source: -gtk-icontheme("view-more-symbolic");
-gtk-icon-transform: rotate(0deg) scaleX(0.1) scaleY(3);
}
""")
css_override.register_provider("", style_provider)
# https://bugzilla.gnome.org/show_bug.cgi?id=708676
warnings.filterwarnings('ignore', '.*g_value_get_int.*', Warning)
# blacklist some modules, simply loading can cause segfaults
sys.modules["gtk"] = None
sys.modules["gpod"] = None
sys.modules["gnome"] = None
from quodlibet.qltk import pygobject_version, gtk_version
MinVersions.GTK.check(gtk_version)
MinVersions.PYGOBJECT.check(pygobject_version)
def _init_gst():
"""Call once before importing GStreamer"""
arch_key = "64" if sys.maxsize > 2**32 else "32"
registry_name = "gst-registry-%s-%s.bin" % (sys.platform, arch_key)
environ["GST_REGISTRY"] = os.path.join(get_cache_dir(), registry_name)
assert "gi.repository.Gst" not in sys.modules
import gi
# We don't want python-gst, it changes API..
assert "gi.overrides.Gst" not in sys.modules
sys.modules["gi.overrides.Gst"] = None
# blacklist some modules, simply loading can cause segfaults
sys.modules["gst"] = None
# We don't depend on Gst overrides, so make sure it's initialized.
try:
gi.require_version("Gst", "1.0")
from gi.repository import Gst
except (ValueError, ImportError):
return
if Gst.is_initialized():
return
from gi.repository import GLib
try:
ok, argv[:] = Gst.init_check(argv)
except GLib.GError:
print_e("Failed to initialize GStreamer")
# Uninited Gst segfaults: make sure no one can use it
sys.modules["gi.repository.Gst"] = None
else:
# monkey patching ahead
_fix_gst_leaks()
| gpl-2.0 | -4,163,567,065,139,508,700 | 29.369928 | 78 | 0.628919 | false |
rojinva/Email-classifier | Text classification with probability.py | 1 | 2058 |
__author__="rojin.varghese"
__date__ ="$Nov 8, 2013 8:48:18 PM$"
import os
from xlrd import open_workbook
import re
import xlwt
j = os.path.join
def train(text):
c = {}
lastword = ""
line = re.sub('[\-#*>]', '', text)
line = re.sub('[\n]', '', line)
for word in line.split():
word = word.lower()
if c.has_key(lastword):
inner = c[lastword]
if inner.has_key(word):
inner[word] += 1
else:
inner[word] = 1
else:
c[lastword] = {word: 1}
lastword = word
return c
def probability_of(dict, lastword, word):
word = word.lower()
if dict.has_key(lastword):
inner = dict[lastword]
sumvalues = sum([v for v in inner.values()])
if inner.has_key(word):
return inner[word] / (sumvalues * 1.0)
return 0
def classify(text, dict):
lastword = ""
probabilities = 0
line = re.sub('[\-#*>]', '', text)
line = re.sub('[\n]', '', line)
for word in line.split():
probabilities += probability_of(dict, lastword, word)
lastword = word
return probabilities / (len(text.split()) * 1.0)
if __name__ == "__main__":
ranking = []
book = open_workbook('C:/Documents and Settings/rojin.varghese/Desktop/Test_mail.xls')
sheet1 = book.sheet_by_index(0)
book1 = xlwt.Workbook()
sh = book1.add_sheet("sheet")
for i in range(sheet1.nrows):
line = sheet1.cell_value(i,1)
line = re.sub('[\-*>]', '', line)
line = re.sub('[\n]', '', line)
for file in os.listdir("C:/Documents and Settings/rojin.varghese/Desktop/ICICI_emails"):
trained = train(open(j("C:/Documents and Settings/rojin.varghese/Desktop/ICICI_emails", file)).read())
value = classify(line, trained)
ranking.append((value, file))
sh.write(i, 0, ranking[-1][1])
sh.write(i, 1, ranking[-2][1])
book1.save("C:/Documents and Settings/rojin.varghese/Desktop/Results/ProbabilityResult.xls")
| unlicense | -6,348,697,365,323,838,000 | 26.44 | 112 | 0.556365 | false |
angr/cle | cle/utils.py | 1 | 3681 | import os
import contextlib
from .errors import CLEError, CLEFileNotFoundError
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#43
def ALIGN_DOWN(base, size):
return base & -size
# https://code.woboq.org/userspace/glibc/include/libc-pointer-arith.h.html#50
def ALIGN_UP(base, size):
return ALIGN_DOWN(base + size - 1, size)
# To verify the mmap behavior you can compile and run the following program. Fact is that mmap file mappings
# always map in the entire page into memory from the file if available. If not, it gets zero padded
# pylint: disable=pointless-string-statement
"""#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
void make_test_file()
{
void* data = (void*)0xdead0000;
int fd = open("./test.data", O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
for (int i = 0; i < 0x1800; i += sizeof(void*)) // Only write 1 1/2 pages worth
{
write(fd, &data, sizeof(void*));
data += sizeof(void*);
}
close(fd);
}
int main(int argc, char* argv[])
{
make_test_file();
int fd = open("./test.data", O_RDONLY);
unsigned char* mapping = mmap(NULL, 0x123, PROT_READ, MAP_PRIVATE, fd, 4096);
for (int i=0; i < 0x1000; i++)
{
printf("%02x ", mapping[i]);
if (i % sizeof(void*) == (sizeof(void*) - 1))
printf("| ");
if (i % 16 == 15)
printf("\n");
}
}"""
def get_mmaped_data(stream, offset, length, page_size):
if offset % page_size != 0:
raise CLEError("libc helper for mmap: Invalid page offset, should be multiple of page size! Stream {}, offset {}, length: {}".format(stream, offset, length))
read_length = ALIGN_UP(length, page_size)
stream.seek(offset)
data = stream.read(read_length)
return data.ljust(read_length, b'\0')
@contextlib.contextmanager
def stream_or_path(obj, perms='rb'):
if hasattr(obj, 'read') and hasattr(obj, 'seek'):
obj.seek(0)
yield obj
else:
if not os.path.exists(obj):
raise CLEFileNotFoundError("%r is not a valid path" % obj)
with open(obj, perms) as f:
yield f
def key_bisect_floor_key(lst, key, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= key:
lo = mid + 1
else:
hi = mid
if lo <= len(lst) and lo > 0:
return lst[lo - 1]
return None
def key_bisect_find(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
return lo
def key_bisect_insort_left(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) < keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
def key_bisect_insort_right(lst, item, lo=0, hi=None, keyfunc=lambda x: x):
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(lst)
while lo < hi:
mid = (lo + hi) // 2
if keyfunc(lst[mid]) <= keyfunc(item):
lo = mid + 1
else:
hi = mid
lst.insert(lo, item)
| bsd-2-clause | 2,866,465,400,092,293,600 | 27.984252 | 165 | 0.573485 | false |
googleapis/googleapis-gen | google/cloud/channel/v1/channel-v1-py/google/cloud/channel_v1/types/service.py | 1 | 61206 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.channel_v1.types import channel_partner_links as gcc_channel_partner_links
from google.cloud.channel_v1.types import common
from google.cloud.channel_v1.types import customers as gcc_customers
from google.cloud.channel_v1.types import entitlements as gcc_entitlements
from google.cloud.channel_v1.types import offers as gcc_offers
from google.cloud.channel_v1.types import products as gcc_products
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.channel.v1',
manifest={
'CheckCloudIdentityAccountsExistRequest',
'CloudIdentityCustomerAccount',
'CheckCloudIdentityAccountsExistResponse',
'ListCustomersRequest',
'ListCustomersResponse',
'GetCustomerRequest',
'CreateCustomerRequest',
'UpdateCustomerRequest',
'DeleteCustomerRequest',
'ProvisionCloudIdentityRequest',
'ListEntitlementsRequest',
'ListEntitlementsResponse',
'ListTransferableSkusRequest',
'ListTransferableSkusResponse',
'ListTransferableOffersRequest',
'ListTransferableOffersResponse',
'TransferableOffer',
'GetEntitlementRequest',
'ListChannelPartnerLinksRequest',
'ListChannelPartnerLinksResponse',
'GetChannelPartnerLinkRequest',
'CreateChannelPartnerLinkRequest',
'UpdateChannelPartnerLinkRequest',
'CreateEntitlementRequest',
'TransferEntitlementsRequest',
'TransferEntitlementsResponse',
'TransferEntitlementsToGoogleRequest',
'ChangeParametersRequest',
'ChangeRenewalSettingsRequest',
'ChangeOfferRequest',
'StartPaidServiceRequest',
'CancelEntitlementRequest',
'SuspendEntitlementRequest',
'ActivateEntitlementRequest',
'LookupOfferRequest',
'ListProductsRequest',
'ListProductsResponse',
'ListSkusRequest',
'ListSkusResponse',
'ListOffersRequest',
'ListOffersResponse',
'ListPurchasableSkusRequest',
'ListPurchasableSkusResponse',
'PurchasableSku',
'ListPurchasableOffersRequest',
'ListPurchasableOffersResponse',
'PurchasableOffer',
'RegisterSubscriberRequest',
'RegisterSubscriberResponse',
'UnregisterSubscriberRequest',
'UnregisterSubscriberResponse',
'ListSubscribersRequest',
'ListSubscribersResponse',
},
)
class CheckCloudIdentityAccountsExistRequest(proto.Message):
r"""Request message for
[CloudChannelService.CheckCloudIdentityAccountsExist][google.cloud.channel.v1.CloudChannelService.CheckCloudIdentityAccountsExist].
Attributes:
parent (str):
Required. The reseller account's resource name. Parent uses
the format: accounts/{account_id}
domain (str):
Required. Domain to fetch for Cloud Identity
account customer.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
domain = proto.Field(
proto.STRING,
number=2,
)
class CloudIdentityCustomerAccount(proto.Message):
r"""Entity representing a Cloud Identity account that may be
associated with a Channel Services API partner.
Attributes:
existing (bool):
Returns true if a Cloud Identity account
exists for a specific domain.
owned (bool):
Returns true if the Cloud Identity account is
associated with a customer of the Channel
Services partner.
customer_name (str):
If owned = true, the name of the customer that owns the
Cloud Identity account. Customer_name uses the format:
accounts/{account_id}/customers/{customer_id}
customer_cloud_identity_id (str):
If existing = true, the Cloud Identity ID of
the customer.
"""
existing = proto.Field(
proto.BOOL,
number=1,
)
owned = proto.Field(
proto.BOOL,
number=2,
)
customer_name = proto.Field(
proto.STRING,
number=3,
)
customer_cloud_identity_id = proto.Field(
proto.STRING,
number=4,
)
class CheckCloudIdentityAccountsExistResponse(proto.Message):
r"""Response message for
[CloudChannelService.CheckCloudIdentityAccountsExist][google.cloud.channel.v1.CloudChannelService.CheckCloudIdentityAccountsExist].
Attributes:
cloud_identity_accounts (Sequence[google.cloud.channel_v1.types.CloudIdentityCustomerAccount]):
The Cloud Identity accounts associated with
the domain.
"""
cloud_identity_accounts = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='CloudIdentityCustomerAccount',
)
class ListCustomersRequest(proto.Message):
r"""Request message for
[CloudChannelService.ListCustomers][google.cloud.channel.v1.CloudChannelService.ListCustomers]
Attributes:
parent (str):
Required. The resource name of the reseller account to list
customers from. Parent uses the format:
accounts/{account_id}.
page_size (int):
Optional. The maximum number of customers to
return. The service may return fewer than this
value. If unspecified, returns at most 10
customers. The maximum value is 50.
page_token (str):
Optional. A token identifying a page of results other than
the first page. Obtained through
[ListCustomersResponse.next_page_token][google.cloud.channel.v1.ListCustomersResponse.next_page_token]
of the previous
[CloudChannelService.ListCustomers][google.cloud.channel.v1.CloudChannelService.ListCustomers]
call.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListCustomersResponse(proto.Message):
r"""Response message for
[CloudChannelService.ListCustomers][google.cloud.channel.v1.CloudChannelService.ListCustomers].
Attributes:
customers (Sequence[google.cloud.channel_v1.types.Customer]):
The customers belonging to a reseller or
distributor.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListCustomersRequest.page_token][google.cloud.channel.v1.ListCustomersRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
customers = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_customers.Customer,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetCustomerRequest(proto.Message):
r"""Request message for
[CloudChannelService.GetCustomer][google.cloud.channel.v1.CloudChannelService.GetCustomer].
Attributes:
name (str):
Required. The resource name of the customer to retrieve.
Name uses the format:
accounts/{account_id}/customers/{customer_id}
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateCustomerRequest(proto.Message):
r"""Request message for
[CloudChannelService.CreateCustomer][google.cloud.channel.v1.CloudChannelService.CreateCustomer]
Attributes:
parent (str):
Required. The resource name of reseller account in which to
create the customer. Parent uses the format:
accounts/{account_id}
customer (google.cloud.channel_v1.types.Customer):
Required. The customer to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
customer = proto.Field(
proto.MESSAGE,
number=2,
message=gcc_customers.Customer,
)
class UpdateCustomerRequest(proto.Message):
r"""Request message for
[CloudChannelService.UpdateCustomer][google.cloud.channel.v1.CloudChannelService.UpdateCustomer].
Attributes:
customer (google.cloud.channel_v1.types.Customer):
Required. New contents of the customer.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The update mask that applies to the resource.
Optional.
"""
customer = proto.Field(
proto.MESSAGE,
number=2,
message=gcc_customers.Customer,
)
update_mask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class DeleteCustomerRequest(proto.Message):
r"""Request message for
[CloudChannelService.DeleteCustomer][google.cloud.channel.v1.CloudChannelService.DeleteCustomer].
Attributes:
name (str):
Required. The resource name of the customer
to delete.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ProvisionCloudIdentityRequest(proto.Message):
r"""Request message for
[CloudChannelService.ProvisionCloudIdentity][google.cloud.channel.v1.CloudChannelService.ProvisionCloudIdentity]
Attributes:
customer (str):
Required. Resource name of the customer. Format:
accounts/{account_id}/customers/{customer_id}
cloud_identity_info (google.cloud.channel_v1.types.CloudIdentityInfo):
CloudIdentity-specific customer information.
user (google.cloud.channel_v1.types.AdminUser):
Admin user information.
validate_only (bool):
Validate the request and preview the review,
but do not post it.
"""
customer = proto.Field(
proto.STRING,
number=1,
)
cloud_identity_info = proto.Field(
proto.MESSAGE,
number=2,
message=common.CloudIdentityInfo,
)
user = proto.Field(
proto.MESSAGE,
number=3,
message=common.AdminUser,
)
validate_only = proto.Field(
proto.BOOL,
number=4,
)
class ListEntitlementsRequest(proto.Message):
r"""Request message for
[CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements]
Attributes:
parent (str):
Required. The resource name of the reseller's customer
account to list entitlements for. Parent uses the format:
accounts/{account_id}/customers/{customer_id}
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, return at most 50 entitlements. The
maximum value is 100; the server will coerce
values above 100.
page_token (str):
Optional. A token for a page of results other than the first
page. Obtained using
[ListEntitlementsResponse.next_page_token][google.cloud.channel.v1.ListEntitlementsResponse.next_page_token]
of the previous
[CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements]
call.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListEntitlementsResponse(proto.Message):
r"""Response message for
[CloudChannelService.ListEntitlements][google.cloud.channel.v1.CloudChannelService.ListEntitlements].
Attributes:
entitlements (Sequence[google.cloud.channel_v1.types.Entitlement]):
The reseller customer's entitlements.
next_page_token (str):
A token to list the next page of results. Pass to
[ListEntitlementsRequest.page_token][google.cloud.channel.v1.ListEntitlementsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
entitlements = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_entitlements.Entitlement,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListTransferableSkusRequest(proto.Message):
r"""Request message for
[CloudChannelService.ListTransferableSkus][google.cloud.channel.v1.CloudChannelService.ListTransferableSkus]
Attributes:
cloud_identity_id (str):
Customer's Cloud Identity ID
customer_name (str):
A reseller is required to create a customer and use the
resource name of the created customer here. Customer_name
uses the format:
accounts/{account_id}/customers/{customer_id}
parent (str):
Required. The reseller account's resource name. Parent uses
the format: accounts/{account_id}
page_size (int):
The requested page size. Server might return
fewer results than requested. If unspecified,
returns at most 100 SKUs. The maximum value is
1000; the server will coerce values above 1000.
Optional.
page_token (str):
A token for a page of results other than the first page.
Obtained using
[ListTransferableSkusResponse.next_page_token][google.cloud.channel.v1.ListTransferableSkusResponse.next_page_token]
of the previous
[CloudChannelService.ListTransferableSkus][google.cloud.channel.v1.CloudChannelService.ListTransferableSkus]
call. Optional.
auth_token (str):
The super admin of the resold customer
generates this token to authorize a reseller to
access their Cloud Identity and purchase
entitlements on their behalf. You can omit this
token after authorization. See
https://support.google.com/a/answer/7643790 for
more details.
language_code (str):
The BCP-47 language code. For example, "en-
S". The response will localize in the
corresponding language code, if specified. The
default value is "en-US".
Optional.
"""
cloud_identity_id = proto.Field(
proto.STRING,
number=4,
oneof='transferred_customer_identity',
)
customer_name = proto.Field(
proto.STRING,
number=7,
oneof='transferred_customer_identity',
)
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
auth_token = proto.Field(
proto.STRING,
number=5,
)
language_code = proto.Field(
proto.STRING,
number=6,
)
class ListTransferableSkusResponse(proto.Message):
r"""Response message for
[CloudChannelService.ListTransferableSkus][google.cloud.channel.v1.CloudChannelService.ListTransferableSkus].
Attributes:
transferable_skus (Sequence[google.cloud.channel_v1.types.TransferableSku]):
Information about existing SKUs for a
customer that needs a transfer.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListTransferableSkusRequest.page_token][google.cloud.channel.v1.ListTransferableSkusRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
transferable_skus = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_entitlements.TransferableSku,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListTransferableOffersRequest(proto.Message):
r"""Request message for
[CloudChannelService.ListTransferableOffers][google.cloud.channel.v1.CloudChannelService.ListTransferableOffers]
Attributes:
cloud_identity_id (str):
Customer's Cloud Identity ID
customer_name (str):
A reseller should create a customer and use
the resource name of that customer here.
parent (str):
Required. The resource name of the reseller's
account.
page_size (int):
Requested page size. Server might return
fewer results than requested. If unspecified,
returns at most 100 offers. The maximum value is
1000; the server will coerce values above 1000.
page_token (str):
A token for a page of results other than the first page.
Obtained using
[ListTransferableOffersResponse.next_page_token][google.cloud.channel.v1.ListTransferableOffersResponse.next_page_token]
of the previous
[CloudChannelService.ListTransferableOffers][google.cloud.channel.v1.CloudChannelService.ListTransferableOffers]
call.
sku (str):
Required. The SKU to look up Offers for.
language_code (str):
The BCP-47 language code. For example, "en-
S". The response will localize in the
corresponding language code, if specified. The
default value is "en-US".
"""
cloud_identity_id = proto.Field(
proto.STRING,
number=4,
oneof='transferred_customer_identity',
)
customer_name = proto.Field(
proto.STRING,
number=5,
oneof='transferred_customer_identity',
)
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
sku = proto.Field(
proto.STRING,
number=6,
)
language_code = proto.Field(
proto.STRING,
number=7,
)
class ListTransferableOffersResponse(proto.Message):
r"""Response message for
[CloudChannelService.ListTransferableOffers][google.cloud.channel.v1.CloudChannelService.ListTransferableOffers].
Attributes:
transferable_offers (Sequence[google.cloud.channel_v1.types.TransferableOffer]):
Information about Offers for a customer that
can be used for transfer.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListTransferableOffersRequest.page_token][google.cloud.channel.v1.ListTransferableOffersRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
transferable_offers = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='TransferableOffer',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class TransferableOffer(proto.Message):
r"""TransferableOffer represents an Offer that can be used in
Transfer. Read-only.
Attributes:
offer (google.cloud.channel_v1.types.Offer):
Offer with parameter constraints updated to
allow the Transfer.
"""
offer = proto.Field(
proto.MESSAGE,
number=1,
message=gcc_offers.Offer,
)
class GetEntitlementRequest(proto.Message):
r"""Request message for
[CloudChannelService.GetEntitlement][google.cloud.channel.v1.CloudChannelService.GetEntitlement].
Attributes:
name (str):
Required. The resource name of the entitlement to retrieve.
Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
"""
name = proto.Field(
proto.STRING,
number=1,
)
class ListChannelPartnerLinksRequest(proto.Message):
r"""Request message for
[CloudChannelService.ListChannelPartnerLinks][google.cloud.channel.v1.CloudChannelService.ListChannelPartnerLinks]
Attributes:
parent (str):
Required. The resource name of the reseller account for
listing channel partner links. Parent uses the format:
accounts/{account_id}
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, server will pick a default size
(25). The maximum value is 200; the server will
coerce values above 200.
page_token (str):
Optional. A token for a page of results other than the first
page. Obtained using
[ListChannelPartnerLinksResponse.next_page_token][google.cloud.channel.v1.ListChannelPartnerLinksResponse.next_page_token]
of the previous
[CloudChannelService.ListChannelPartnerLinks][google.cloud.channel.v1.CloudChannelService.ListChannelPartnerLinks]
call.
view (google.cloud.channel_v1.types.ChannelPartnerLinkView):
Optional. The level of granularity the
ChannelPartnerLink will display.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
view = proto.Field(
proto.ENUM,
number=4,
enum=gcc_channel_partner_links.ChannelPartnerLinkView,
)
class ListChannelPartnerLinksResponse(proto.Message):
r"""Response message for
[CloudChannelService.ListChannelPartnerLinks][google.cloud.channel.v1.CloudChannelService.ListChannelPartnerLinks].
Attributes:
channel_partner_links (Sequence[google.cloud.channel_v1.types.ChannelPartnerLink]):
The Channel partner links for a reseller.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListChannelPartnerLinksRequest.page_token][google.cloud.channel.v1.ListChannelPartnerLinksRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
channel_partner_links = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_channel_partner_links.ChannelPartnerLink,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetChannelPartnerLinkRequest(proto.Message):
r"""Request message for
[CloudChannelService.GetChannelPartnerLink][google.cloud.channel.v1.CloudChannelService.GetChannelPartnerLink].
Attributes:
name (str):
Required. The resource name of the channel partner link to
retrieve. Name uses the format:
accounts/{account_id}/channelPartnerLinks/{id} where {id} is
the Cloud Identity ID of the partner.
view (google.cloud.channel_v1.types.ChannelPartnerLinkView):
Optional. The level of granularity the
ChannelPartnerLink will display.
"""
name = proto.Field(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum=gcc_channel_partner_links.ChannelPartnerLinkView,
)
class CreateChannelPartnerLinkRequest(proto.Message):
r"""Request message for
[CloudChannelService.CreateChannelPartnerLink][google.cloud.channel.v1.CloudChannelService.CreateChannelPartnerLink]
Attributes:
parent (str):
Required. Create a channel partner link for the provided
reseller account's resource name. Parent uses the format:
accounts/{account_id}
channel_partner_link (google.cloud.channel_v1.types.ChannelPartnerLink):
Required. The channel partner link to create. Either
channel_partner_link.reseller_cloud_identity_id or domain
can be used to create a link.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
channel_partner_link = proto.Field(
proto.MESSAGE,
number=2,
message=gcc_channel_partner_links.ChannelPartnerLink,
)
class UpdateChannelPartnerLinkRequest(proto.Message):
r"""Request message for
[CloudChannelService.UpdateChannelPartnerLink][google.cloud.channel.v1.CloudChannelService.UpdateChannelPartnerLink]
Attributes:
name (str):
Required. The resource name of the channel partner link to
cancel. Name uses the format:
accounts/{account_id}/channelPartnerLinks/{id} where {id} is
the Cloud Identity ID of the partner.
channel_partner_link (google.cloud.channel_v1.types.ChannelPartnerLink):
Required. The channel partner link to update. Only
channel_partner_link.link_state is allowed for updates.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask that applies to the resource. The
only allowable value for an update mask is
channel_partner_link.link_state.
"""
name = proto.Field(
proto.STRING,
number=1,
)
channel_partner_link = proto.Field(
proto.MESSAGE,
number=2,
message=gcc_channel_partner_links.ChannelPartnerLink,
)
update_mask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class CreateEntitlementRequest(proto.Message):
r"""Request message for
[CloudChannelService.CreateEntitlement][google.cloud.channel.v1.CloudChannelService.CreateEntitlement]
Attributes:
parent (str):
Required. The resource name of the reseller's customer
account in which to create the entitlement. Parent uses the
format: accounts/{account_id}/customers/{customer_id}
entitlement (google.cloud.channel_v1.types.Entitlement):
Required. The entitlement to create.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
parent = proto.Field(
proto.STRING,
number=1,
)
entitlement = proto.Field(
proto.MESSAGE,
number=2,
message=gcc_entitlements.Entitlement,
)
request_id = proto.Field(
proto.STRING,
number=5,
)
class TransferEntitlementsRequest(proto.Message):
r"""Request message for
[CloudChannelService.TransferEntitlements][google.cloud.channel.v1.CloudChannelService.TransferEntitlements].
Attributes:
parent (str):
Required. The resource name of the reseller's customer
account that will receive transferred entitlements. Parent
uses the format:
accounts/{account_id}/customers/{customer_id}
entitlements (Sequence[google.cloud.channel_v1.types.Entitlement]):
Required. The new entitlements to create or
transfer.
auth_token (str):
The super admin of the resold customer
generates this token to authorize a reseller to
access their Cloud Identity and purchase
entitlements on their behalf. You can omit this
token after authorization. See
https://support.google.com/a/answer/7643790 for
more details.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
parent = proto.Field(
proto.STRING,
number=1,
)
entitlements = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gcc_entitlements.Entitlement,
)
auth_token = proto.Field(
proto.STRING,
number=4,
)
request_id = proto.Field(
proto.STRING,
number=6,
)
class TransferEntitlementsResponse(proto.Message):
r"""Response message for
[CloudChannelService.TransferEntitlements][google.cloud.channel.v1.CloudChannelService.TransferEntitlements].
This is put in the response field of google.longrunning.Operation.
Attributes:
entitlements (Sequence[google.cloud.channel_v1.types.Entitlement]):
The transferred entitlements.
"""
entitlements = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_entitlements.Entitlement,
)
class TransferEntitlementsToGoogleRequest(proto.Message):
r"""Request message for
[CloudChannelService.TransferEntitlementsToGoogle][google.cloud.channel.v1.CloudChannelService.TransferEntitlementsToGoogle].
Attributes:
parent (str):
Required. The resource name of the reseller's customer
account where the entitlements transfer from. Parent uses
the format: accounts/{account_id}/customers/{customer_id}
entitlements (Sequence[google.cloud.channel_v1.types.Entitlement]):
Required. The entitlements to transfer to
Google.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
parent = proto.Field(
proto.STRING,
number=1,
)
entitlements = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gcc_entitlements.Entitlement,
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class ChangeParametersRequest(proto.Message):
r"""Request message for [CloudChannelService.ChangeParametersRequest][].
Attributes:
name (str):
Required. The name of the entitlement to update. Name uses
the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
parameters (Sequence[google.cloud.channel_v1.types.Parameter]):
Required. Entitlement parameters to update.
You can only change editable parameters.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
purchase_order_id (str):
Optional. Purchase order ID provided by the
reseller.
"""
name = proto.Field(
proto.STRING,
number=1,
)
parameters = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=gcc_entitlements.Parameter,
)
request_id = proto.Field(
proto.STRING,
number=4,
)
purchase_order_id = proto.Field(
proto.STRING,
number=5,
)
class ChangeRenewalSettingsRequest(proto.Message):
r"""Request message for
[CloudChannelService.ChangeRenewalSettings][google.cloud.channel.v1.CloudChannelService.ChangeRenewalSettings].
Attributes:
name (str):
Required. The name of the entitlement to update. Name uses
the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
renewal_settings (google.cloud.channel_v1.types.RenewalSettings):
Required. New renewal settings.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
renewal_settings = proto.Field(
proto.MESSAGE,
number=4,
message=gcc_entitlements.RenewalSettings,
)
request_id = proto.Field(
proto.STRING,
number=5,
)
class ChangeOfferRequest(proto.Message):
r"""Request message for
[CloudChannelService.ChangeOffer][google.cloud.channel.v1.CloudChannelService.ChangeOffer].
Attributes:
name (str):
Required. The resource name of the entitlement to update.
Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
offer (str):
Required. New Offer. Format:
accounts/{account_id}/offers/{offer_id}.
parameters (Sequence[google.cloud.channel_v1.types.Parameter]):
Optional. Parameters needed to purchase the
Offer.
purchase_order_id (str):
Optional. Purchase order id provided by the
reseller.
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
offer = proto.Field(
proto.STRING,
number=2,
)
parameters = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=gcc_entitlements.Parameter,
)
purchase_order_id = proto.Field(
proto.STRING,
number=5,
)
request_id = proto.Field(
proto.STRING,
number=6,
)
class StartPaidServiceRequest(proto.Message):
r"""Request message for
[CloudChannelService.StartPaidService][google.cloud.channel.v1.CloudChannelService.StartPaidService].
Attributes:
name (str):
Required. The name of the entitlement to start a paid
service for. Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class CancelEntitlementRequest(proto.Message):
r"""Request message for
[CloudChannelService.CancelEntitlement][google.cloud.channel.v1.CloudChannelService.CancelEntitlement].
Attributes:
name (str):
Required. The resource name of the entitlement to cancel.
Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class SuspendEntitlementRequest(proto.Message):
r"""Request message for
[CloudChannelService.SuspendEntitlement][google.cloud.channel.v1.CloudChannelService.SuspendEntitlement].
Attributes:
name (str):
Required. The resource name of the entitlement to suspend.
Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class ActivateEntitlementRequest(proto.Message):
r"""Request message for
[CloudChannelService.ActivateEntitlement][google.cloud.channel.v1.CloudChannelService.ActivateEntitlement].
Attributes:
name (str):
Required. The resource name of the entitlement to activate.
Name uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
request_id (str):
Optional. You can specify an optional unique request ID, and
if you need to retry your request, the server will know to
ignore the request if it's complete.
For example, you make an initial request and the request
times out. If you make the request again with the same
request ID, the server can check if it received the original
operation with the same request ID. If it did, it will
ignore the second request.
The request ID must be a valid
`UUID <https://tools.ietf.org/html/rfc4122>`__ with the
exception that zero UUID is not supported
(``00000000-0000-0000-0000-000000000000``).
"""
name = proto.Field(
proto.STRING,
number=1,
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class LookupOfferRequest(proto.Message):
r"""Request message for LookupOffer.
Attributes:
entitlement (str):
Required. The resource name of the entitlement to retrieve
the Offer. Entitlement uses the format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
"""
entitlement = proto.Field(
proto.STRING,
number=1,
)
class ListProductsRequest(proto.Message):
r"""Request message for ListProducts.
Attributes:
account (str):
Required. The resource name of the reseller account. Format:
accounts/{account_id}.
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, returns at most 100 Products. The
maximum value is 1000; the server will coerce
values above 1000.
page_token (str):
Optional. A token for a page of results other
than the first page.
language_code (str):
Optional. The BCP-47 language code. For
example, "en-US". The response will localize in
the corresponding language code, if specified.
The default value is "en-US".
"""
account = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
language_code = proto.Field(
proto.STRING,
number=4,
)
class ListProductsResponse(proto.Message):
r"""Response message for ListProducts.
Attributes:
products (Sequence[google.cloud.channel_v1.types.Product]):
List of Products requested.
next_page_token (str):
A token to retrieve the next page of results.
"""
@property
def raw_page(self):
return self
products = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_products.Product,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListSkusRequest(proto.Message):
r"""Request message for ListSkus.
Attributes:
parent (str):
Required. The resource name of the Product to list SKUs for.
Parent uses the format: products/{product_id}. Supports
products/- to retrieve SKUs for all products.
account (str):
Required. Resource name of the reseller. Format:
accounts/{account_id}.
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, returns at most 100 SKUs. The
maximum value is 1000; the server will coerce
values above 1000.
page_token (str):
Optional. A token for a page of results other
than the first page. Optional.
language_code (str):
Optional. The BCP-47 language code. For
example, "en-US". The response will localize in
the corresponding language code, if specified.
The default value is "en-US".
"""
parent = proto.Field(
proto.STRING,
number=1,
)
account = proto.Field(
proto.STRING,
number=2,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
language_code = proto.Field(
proto.STRING,
number=5,
)
class ListSkusResponse(proto.Message):
r"""Response message for ListSkus.
Attributes:
skus (Sequence[google.cloud.channel_v1.types.Sku]):
The list of SKUs requested.
next_page_token (str):
A token to retrieve the next page of results.
"""
@property
def raw_page(self):
return self
skus = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_products.Sku,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListOffersRequest(proto.Message):
r"""Request message for ListOffers.
Attributes:
parent (str):
Required. The resource name of the reseller account from
which to list Offers. Parent uses the format:
accounts/{account_id}.
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, returns at most 500 Offers. The
maximum value is 1000; the server will coerce
values above 1000.
page_token (str):
Optional. A token for a page of results other
than the first page.
filter (str):
Optional. The expression to filter results by
name (name of the Offer), sku.name (name of the
SKU), or sku.product.name (name of the Product).
Example 1: sku.product.name=products/p1 AND
sku.name!=products/p1/skus/s1 Example 2:
name=accounts/a1/offers/o1
language_code (str):
Optional. The BCP-47 language code. For
example, "en-US". The response will localize in
the corresponding language code, if specified.
The default value is "en-US".
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
filter = proto.Field(
proto.STRING,
number=4,
)
language_code = proto.Field(
proto.STRING,
number=5,
)
class ListOffersResponse(proto.Message):
r"""Response message for ListOffers.
Attributes:
offers (Sequence[google.cloud.channel_v1.types.Offer]):
The list of Offers requested.
next_page_token (str):
A token to retrieve the next page of results.
"""
@property
def raw_page(self):
return self
offers = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcc_offers.Offer,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListPurchasableSkusRequest(proto.Message):
r"""Request message for ListPurchasableSkus.
Attributes:
create_entitlement_purchase (google.cloud.channel_v1.types.ListPurchasableSkusRequest.CreateEntitlementPurchase):
List SKUs for CreateEntitlement purchase.
change_offer_purchase (google.cloud.channel_v1.types.ListPurchasableSkusRequest.ChangeOfferPurchase):
List SKUs for ChangeOffer purchase with a new
SKU.
customer (str):
Required. The resource name of the customer to list SKUs
for. Format: accounts/{account_id}/customers/{customer_id}.
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, returns at most 100 SKUs. The
maximum value is 1000; the server will coerce
values above 1000.
page_token (str):
Optional. A token for a page of results other
than the first page.
language_code (str):
Optional. The BCP-47 language code. For
example, "en-US". The response will localize in
the corresponding language code, if specified.
The default value is "en-US".
"""
class CreateEntitlementPurchase(proto.Message):
r"""List SKUs for a new entitlement. Make the purchase using
[CloudChannelService.CreateEntitlement][google.cloud.channel.v1.CloudChannelService.CreateEntitlement].
Attributes:
product (str):
Required. List SKUs belonging to this Product. Format:
products/{product_id}. Supports products/- to retrieve SKUs
for all products.
"""
product = proto.Field(
proto.STRING,
number=1,
)
class ChangeOfferPurchase(proto.Message):
r"""List SKUs for upgrading or downgrading an entitlement. Make the
purchase using
[CloudChannelService.ChangeOffer][google.cloud.channel.v1.CloudChannelService.ChangeOffer].
Attributes:
entitlement (str):
Required. Resource name of the entitlement. Format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
change_type (google.cloud.channel_v1.types.ListPurchasableSkusRequest.ChangeOfferPurchase.ChangeType):
Required. Change Type for the entitlement.
"""
class ChangeType(proto.Enum):
r"""Change Type enum."""
CHANGE_TYPE_UNSPECIFIED = 0
UPGRADE = 1
DOWNGRADE = 2
entitlement = proto.Field(
proto.STRING,
number=1,
)
change_type = proto.Field(
proto.ENUM,
number=2,
enum='ListPurchasableSkusRequest.ChangeOfferPurchase.ChangeType',
)
create_entitlement_purchase = proto.Field(
proto.MESSAGE,
number=2,
oneof='purchase_option',
message=CreateEntitlementPurchase,
)
change_offer_purchase = proto.Field(
proto.MESSAGE,
number=3,
oneof='purchase_option',
message=ChangeOfferPurchase,
)
customer = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=4,
)
page_token = proto.Field(
proto.STRING,
number=5,
)
language_code = proto.Field(
proto.STRING,
number=6,
)
class ListPurchasableSkusResponse(proto.Message):
r"""Response message for ListPurchasableSkus.
Attributes:
purchasable_skus (Sequence[google.cloud.channel_v1.types.PurchasableSku]):
The list of SKUs requested.
next_page_token (str):
A token to retrieve the next page of results.
"""
@property
def raw_page(self):
return self
purchasable_skus = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PurchasableSku',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class PurchasableSku(proto.Message):
r"""SKU that you can purchase. This is used in ListPurchasableSku
API response.
Attributes:
sku (google.cloud.channel_v1.types.Sku):
SKU
"""
sku = proto.Field(
proto.MESSAGE,
number=1,
message=gcc_products.Sku,
)
class ListPurchasableOffersRequest(proto.Message):
r"""Request message for ListPurchasableOffers.
Attributes:
create_entitlement_purchase (google.cloud.channel_v1.types.ListPurchasableOffersRequest.CreateEntitlementPurchase):
List Offers for CreateEntitlement purchase.
change_offer_purchase (google.cloud.channel_v1.types.ListPurchasableOffersRequest.ChangeOfferPurchase):
List Offers for ChangeOffer purchase.
customer (str):
Required. The resource name of the customer to list Offers
for. Format: accounts/{account_id}/customers/{customer_id}.
page_size (int):
Optional. Requested page size. Server might
return fewer results than requested. If
unspecified, returns at most 100 Offers. The
maximum value is 1000; the server will coerce
values above 1000.
page_token (str):
Optional. A token for a page of results other
than the first page.
language_code (str):
Optional. The BCP-47 language code. For
example, "en-US". The response will localize in
the corresponding language code, if specified.
The default value is "en-US".
"""
class CreateEntitlementPurchase(proto.Message):
r"""List Offers for CreateEntitlement purchase.
Attributes:
sku (str):
Required. SKU that the result should be restricted to.
Format: products/{product_id}/skus/{sku_id}.
"""
sku = proto.Field(
proto.STRING,
number=1,
)
class ChangeOfferPurchase(proto.Message):
r"""List Offers for ChangeOffer purchase.
Attributes:
entitlement (str):
Required. Resource name of the entitlement. Format:
accounts/{account_id}/customers/{customer_id}/entitlements/{entitlement_id}
new_sku (str):
Optional. Resource name of the new target SKU. Provide this
SKU when upgrading or downgrading an entitlement. Format:
products/{product_id}/skus/{sku_id}
"""
entitlement = proto.Field(
proto.STRING,
number=1,
)
new_sku = proto.Field(
proto.STRING,
number=2,
)
create_entitlement_purchase = proto.Field(
proto.MESSAGE,
number=2,
oneof='purchase_option',
message=CreateEntitlementPurchase,
)
change_offer_purchase = proto.Field(
proto.MESSAGE,
number=3,
oneof='purchase_option',
message=ChangeOfferPurchase,
)
customer = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=4,
)
page_token = proto.Field(
proto.STRING,
number=5,
)
language_code = proto.Field(
proto.STRING,
number=6,
)
class ListPurchasableOffersResponse(proto.Message):
r"""Response message for ListPurchasableOffers.
Attributes:
purchasable_offers (Sequence[google.cloud.channel_v1.types.PurchasableOffer]):
The list of Offers requested.
next_page_token (str):
A token to retrieve the next page of results.
"""
@property
def raw_page(self):
return self
purchasable_offers = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='PurchasableOffer',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class PurchasableOffer(proto.Message):
r"""Offer that you can purchase for a customer. This is used in
the ListPurchasableOffer API response.
Attributes:
offer (google.cloud.channel_v1.types.Offer):
Offer.
"""
offer = proto.Field(
proto.MESSAGE,
number=1,
message=gcc_offers.Offer,
)
class RegisterSubscriberRequest(proto.Message):
r"""Request Message for RegisterSubscriber.
Attributes:
account (str):
Required. Resource name of the account.
service_account (str):
Required. Service account that provides
subscriber access to the registered topic.
"""
account = proto.Field(
proto.STRING,
number=1,
)
service_account = proto.Field(
proto.STRING,
number=2,
)
class RegisterSubscriberResponse(proto.Message):
r"""Response Message for RegisterSubscriber.
Attributes:
topic (str):
Name of the topic the subscriber will listen
to.
"""
topic = proto.Field(
proto.STRING,
number=1,
)
class UnregisterSubscriberRequest(proto.Message):
r"""Request Message for UnregisterSubscriber.
Attributes:
account (str):
Required. Resource name of the account.
service_account (str):
Required. Service account to unregister from
subscriber access to the topic.
"""
account = proto.Field(
proto.STRING,
number=1,
)
service_account = proto.Field(
proto.STRING,
number=2,
)
class UnregisterSubscriberResponse(proto.Message):
r"""Response Message for UnregisterSubscriber.
Attributes:
topic (str):
Name of the topic the service account
subscriber access was removed from.
"""
topic = proto.Field(
proto.STRING,
number=1,
)
class ListSubscribersRequest(proto.Message):
r"""Request Message for ListSubscribers.
Attributes:
account (str):
Required. Resource name of the account.
page_size (int):
Optional. The maximum number of service
accounts to return. The service may return fewer
than this value. If unspecified, returns at most
100 service accounts. The maximum value is 1000;
the server will coerce values above 1000.
page_token (str):
Optional. A page token, received from a previous
``ListSubscribers`` call. Provide this to retrieve the
subsequent page.
When paginating, all other parameters provided to
``ListSubscribers`` must match the call that provided the
page token.
"""
account = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListSubscribersResponse(proto.Message):
r"""Response Message for ListSubscribers.
Attributes:
topic (str):
Name of the topic registered with the
reseller.
service_accounts (Sequence[str]):
List of service accounts which have
subscriber access to the topic.
next_page_token (str):
A token that can be sent as ``page_token`` to retrieve the
next page. If this field is omitted, there are no subsequent
pages.
"""
@property
def raw_page(self):
return self
topic = proto.Field(
proto.STRING,
number=1,
)
service_accounts = proto.RepeatedField(
proto.STRING,
number=2,
)
next_page_token = proto.Field(
proto.STRING,
number=3,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,834,051,834,453,629,200 | 31.213684 | 135 | 0.625315 | false |
treycucco/pxp | pxp/stdlib/math.py | 1 | 4465 | import math
from decimal import Decimal
from pxp.exception import FunctionError
from pxp.function import FunctionArg, FunctionList, InjectedFunction
from pxp.stdlib.types import number_t, boolean_t
def math_abs(resolver, value):
"""Returns the absolute value of value."""
val = resolver.resolve(value)
return val if val >= 0 else -val
def math_ceil(resolver, value):
"""Returns value if value is a whole number, otherwise the next largest whole number."""
val = resolver.resolve(value)
return Decimal(math.ceil(val))
def math_cos(resolver, value):
"""Returns the cosine of value. Value must be in radians."""
val = resolver.resolve(value)
return Decimal(math.cos(val))
def math_degrees(resolver, value):
"""Converts a radians value to degrees."""
val = resolver.resolve(value)
return Decimal(math.degrees(val))
def math_floor(resolver, value):
"""Returns value if value is a whole number, otherwise the next smallest whole number."""
val = resolver.resolve(value)
return Decimal(math.floor(val))
def math_log(resolver, value, base):
"""Returns the log of value. If not specified, the log is a natural log with base e."""
bval = resolver.resolve(base)
if bval <= Decimal(0):
raise FunctionError("Invalid log base")
val = resolver.resolve(value)
return Decimal(math.log(val, bval))
def math_log10(resolver, value):
"""Returns the log base 10 of value."""
return math_log(resolver, value, Decimal(10))
def math_log2(resolver, value):
"""Returns the log base 2 of value."""
return math_log(resolver, value, Decimal(2))
def math_pow(resolver, value, exp):
"""Returns value raised to exp."""
val = resolver.resolve(value)
xval = resolver.resolve(exp)
return Decimal(math.pow(val, xval))
def math_radians(resolver, value):
"""Converts a degrees value to radians."""
val = resolver.resolve(value)
return Decimal(math.radians(val))
def math_root(resolver, value, root):
"""Returns the nth root of value."""
val = resolver.resolve(value)
rval = resolver.resolve(root)
return Decimal(math.pow(val, Decimal(1) / rval))
def math_round(resolver, value, ndigits):
"""Rounds value to the nearest nth digit.
If ndigits is not specified then value is rounded to the nearest whole number.
"""
val = resolver.resolve(value)
dval = resolver.resolve(ndigits)
return Decimal(round(val, int(dval)))
def math_sin(resolver, value):
"""Returns the sine of value. Value must be in radians."""
val = resolver.resolve(value)
return Decimal(math.sin(val))
def math_sqrt(resolver, value):
"""Returns the square root of value."""
return math_root(resolver, value, Decimal(2))
def math_tan(resolver, value):
"""Returns the tanget of value. Value must be in radians."""
val = resolver.resolve(value)
return Decimal(math.tan(val))
math_functions = FunctionList((
InjectedFunction("math.abs", (FunctionArg(number_t, "value"), ), number_t, math_abs),
InjectedFunction("math.ceil", (FunctionArg(number_t, "value"), ), number_t, math_ceil),
InjectedFunction("math.cos", (FunctionArg(number_t, "value"), ), number_t, math_cos),
InjectedFunction("math.degrees", (FunctionArg(number_t, "value"), ), number_t, math_degrees),
InjectedFunction("math.floor", (FunctionArg(number_t, "value"), ), number_t, math_floor),
InjectedFunction("math.log", (FunctionArg(number_t, "value"), FunctionArg(number_t, "base", Decimal(math.e))), number_t, math_log),
InjectedFunction("math.log10", (FunctionArg(number_t, "value"), ), number_t, math_log10),
InjectedFunction("math.log2", (FunctionArg(number_t, "value"), ), number_t, math_log2),
InjectedFunction("math.pow", (FunctionArg(number_t, "value"), FunctionArg(number_t, "exp")), number_t, math_pow),
InjectedFunction("math.radians", (FunctionArg(number_t, "value"), ), number_t, math_radians),
InjectedFunction("math.root", (FunctionArg(number_t, "value"), FunctionArg(number_t, "root")), number_t, math_root),
InjectedFunction("math.round", (FunctionArg(number_t, "value"), FunctionArg(number_t, "ndigits", Decimal(0))), number_t, math_round),
InjectedFunction("math.sin", (FunctionArg(number_t, "value"), ), number_t, math_sin),
InjectedFunction("math.sqrt", (FunctionArg(number_t, "value"), ), number_t, math_sqrt),
InjectedFunction("math.tan", (FunctionArg(number_t, "value"), ), number_t, math_tan)
))
math_constants = {"math.pi": Decimal(math.pi),
"math.e": Decimal(math.e)}
| bsd-3-clause | -4,806,091,900,588,177,000 | 33.882813 | 135 | 0.701904 | false |
CivilNet/Gemfield | dockerfiles/py-faster-rcnn/files/gemfield/py-faster-rcnn/caffe-fast-rcnn/scripts/copy_notebook.py | 1 | 1085 | #!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].items():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
| gpl-3.0 | -7,744,717,091,411,934,000 | 32.90625 | 87 | 0.635945 | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractIsekaiFiction.py | 1 | 1737 | def extractIsekaiFiction(item):
"""
'Isekai Fiction'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Upstart Pastry Chef' in item['tags']:
return buildReleaseMessageWithType(item, 'Upstart Pastry Chef ~Territory Management of a Genius Pâtissier~', vol, chp, frag=frag, postfix=postfix)
if 'pastry' in item['tags']:
return buildReleaseMessageWithType(item, 'Upstart Pastry Chef ~Territory Management of a Genius Pâtissier~', vol, chp, frag=frag, postfix=postfix)
if 'herscherik' in item['tags']:
return buildReleaseMessageWithType(item, 'Herscherik: Tensei Ouji to Urei no Daikoku', vol, chp, frag=frag, postfix=postfix)
if 'okonomiyaki' in item['tags']:
return buildReleaseMessageWithType(item, 'Different World’s Okonomiyaki Chain Store ~Auntie from Osaka, Reborn as Beautiful Swordswoman, on A Mission to Spread Okonomiyaki!~', vol, chp, frag=frag, postfix=postfix)
if 'The Wolf Lord\'s Lady' in item['tags']:
return buildReleaseMessageWithType(item, 'The Wolf Lord\'s Lady', vol, chp, frag=frag, postfix=postfix)
tagmap = [
('Starship Officer Becomes Adventurer', 'The Starship Officer Becomes An Adventurer', 'translated'),
('Sono Mono Nochi ni', 'That Person. Later on…', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 5,251,532,720,968,745,000 | 54.870968 | 215 | 0.674177 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_backend_health_server.py | 1 | 1647 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayBackendHealthServer(Model):
"""Application gateway backendhealth http settings.
:param address: IP address or FQDN of backend server.
:type address: str
:param ip_configuration: Reference of IP configuration of backend server.
:type ip_configuration:
~azure.mgmt.network.v2017_09_01.models.NetworkInterfaceIPConfiguration
:param health: Health of backend server. Possible values include:
'Unknown', 'Up', 'Down', 'Partial', 'Draining'
:type health: str or
~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayBackendHealthServerHealth
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
'ip_configuration': {'key': 'ipConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'health': {'key': 'health', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayBackendHealthServer, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
self.ip_configuration = kwargs.get('ip_configuration', None)
self.health = kwargs.get('health', None)
| mit | 7,044,124,205,547,597,000 | 41.230769 | 98 | 0.63388 | false |
TomAugspurger/pandas | pandas/core/computation/ops.py | 1 | 15978 | """
Operator classes for eval.
"""
from datetime import datetime
from distutils.version import LooseVersion
from functools import partial
import operator
from typing import Callable, Iterable, Optional, Union
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas.core.common as com
from pandas.core.computation.common import _ensure_decoded, result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
_reductions = ("sum", "prod")
_unary_math_ops = (
"sin",
"cos",
"exp",
"log",
"expm1",
"log1p",
"sqrt",
"sinh",
"cosh",
"tanh",
"arcsin",
"arccos",
"arctan",
"arccosh",
"arcsinh",
"arctanh",
"abs",
"log10",
"floor",
"ceil",
)
_binary_math_ops = ("arctan2",)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = "__pd_eval_local_"
class UndefinedVariableError(NameError):
"""
NameError subclass for local variables.
"""
def __init__(self, name: str, is_local: Optional[bool] = None):
base_msg = f"{repr(name)} is not defined"
if is_local:
msg = f"local variable {base_msg}"
else:
msg = f"name {base_msg}"
super().__init__(msg)
class Term:
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, str) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
is_local: bool
def __init__(self, name, env, side=None, encoding=None):
# name is a str for Term, but may be something else for subclasses
self._name = name
self.env = env
self.side = side
tname = str(name)
self.is_local = tname.startswith(_LOCAL_TAG) or tname in _DEFAULT_GLOBALS
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self) -> str:
return self.name.replace(_LOCAL_TAG, "")
def __repr__(self) -> str:
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, "ndim") and res.ndim > 2:
raise NotImplementedError(
"N-dimensional objects, where N > 2, are not supported with eval"
)
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, str):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self) -> bool:
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self) -> str:
return f"{type(self).__name__}(name={repr(self.name)}, type={self.type})"
@property
def is_datetime(self) -> bool:
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@property
def ndim(self) -> int:
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __repr__(self) -> str:
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {"not": "~", "and": "&", "or": "|"}
class Op:
"""
Hold an operator of arbitrary arity.
"""
op: str
def __init__(self, op: str, operands: Iterable[Union[Term, "Op"]], encoding=None):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = encoding
def __iter__(self):
return iter(self.operands)
def __repr__(self) -> str:
"""
Print a generic n-ary operator and its operands using infix notation.
"""
# recurse over the operands
parened = (f"({pprint_thing(opr)})" for opr in self.operands)
return pprint_thing(f" {self.op} ".join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self) -> bool:
types = self.operand_types
obj_dtype_set = frozenset([np.dtype("object")])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self) -> bool:
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self) -> bool:
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""
Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""
Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = (">", "<", ">=", "<=", "==", "!=", "in", "not in")
_cmp_ops_funcs = (
operator.gt,
operator.lt,
operator.ge,
operator.le,
operator.eq,
operator.ne,
_in,
_not_in,
)
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = ("&", "|", "and", "or")
_bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_)
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = ("+", "-", "*", "/", "**", "//", "%")
_arith_ops_funcs = (
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.pow,
operator.floordiv,
operator.mod,
)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = ("**", "//", "%")
_special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod)
_special_case_arith_ops_dict = dict(
zip(_special_case_arith_ops_syms, _special_case_arith_ops_funcs)
)
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""
Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj) -> bool:
return isinstance(obj, Term)
class BinOp(Op):
"""
Hold a binary operator and its operands.
Parameters
----------
op : str
lhs : Term or Op
rhs : Term or Op
"""
def __init__(self, op: str, lhs, rhs):
super().__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError as err:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError(
f"Invalid binary operator {repr(op)}, valid operators are {keys}"
) from err
def __call__(self, env):
"""
Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine: str, parser, term_type, eval_in_python):
"""
Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == "python":
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
right = self.rhs.evaluate(
env,
engine=engine,
parser=parser,
term_type=term_type,
eval_in_python=eval_in_python,
)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
from pandas.core.computation.eval import eval
res = eval(self, local_dict=env, engine=engine, parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""
Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
encoder: Callable
if self.encoding is not None:
encoder = partial(pprint_thing_encoded, encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert("UTC")
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if (
(self.lhs.is_scalar or self.rhs.is_scalar)
and self.op in _bool_ops_dict
and (
not (
issubclass(self.rhs.return_type, (bool, np.bool_))
and issubclass(self.lhs.return_type, (bool, np.bool_))
)
)
):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype) -> bool:
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""
Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
"""
def __init__(self, lhs, rhs):
super().__init__("/", lhs, rhs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError(
f"unsupported operand type(s) for {self.op}: "
f"'{lhs.return_type}' and '{rhs.return_type}'"
)
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = ("+", "-", "~", "not")
_unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert)
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""
Hold a unary operator and its operands.
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op: str, operand):
super().__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError as err:
raise ValueError(
f"Invalid unary operator {repr(op)}, "
f"valid operators are {_unary_ops_syms}"
) from err
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __repr__(self) -> str:
return pprint_thing(f"{self.op}({self.operand})")
@property
def return_type(self) -> np.dtype:
operand = self.operand
if operand.return_type == np.dtype("bool"):
return np.dtype("bool")
if isinstance(operand, Op) and (
operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict
):
return np.dtype("bool")
return np.dtype("int")
class MathCall(Op):
def __init__(self, func, args):
super().__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all="ignore"):
return self.func.func(*operands)
def __repr__(self) -> str:
operands = map(str, self.operands)
return pprint_thing(f"{self.op}({','.join(operands)})")
class FuncNode:
def __init__(self, name: str):
from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION
if name not in _mathops or (
_NUMEXPR_INSTALLED
and _NUMEXPR_VERSION < LooseVersion("2.6.9")
and name in ("floor", "ceil")
):
raise ValueError(f'"{name}" is not a supported function')
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| bsd-3-clause | -7,085,088,927,918,930,000 | 25.279605 | 86 | 0.550194 | false |
ajrbyers/mondroid | src/monitor/management/commands/install_droids.py | 1 | 1680 | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.conf import settings
from monitor import models
from crontab import CronTab
import os
import sys
try:
action = sys.argv[1:][1]
except:
action = ''
def find_job(tab, comment):
for job in tab:
if job.comment == comment:
return job
return None
class Command(BaseCommand):
help = 'Installs cron tasks for the monitor.'
def handle(self, *args, **options):
monitor_list = models.Monitor.objects.all()
virtualenv = os.environ.get('VIRTUAL_ENV', None)
tab = CronTab()
for monitor in monitor_list:
current_job = find_job(tab, "fetcher_droid_%s" % monitor.slug)
if current_job == None:
django_command = "&& python %s/manage.py fetcher_droid %s >> /var/log/mondroid/%s.fetcher.log" % (settings.BASE_DIR, monitor.slug, monitor.slug)
if virtualenv:
command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin %s' % (virtualenv, django_command)
else:
command = '%s' % (django_command)
cron_job = tab.new(command, comment="fetcher_droid_%s" % monitor.slug)
cron_job.minute.every(5)
# Install the parser droid command if it doesn't exist already
current_job = find_job(tab, "parser_droid")
if current_job == None:
if virtualenv:
command = 'export PATH=%s/bin:/usr/local/bin:/usr/bin:/bin && python %s/manage.py parser_droid' % (virtualenv, settings.BASE_DIR)
cron_job = tab.new(command, comment="parser_droid")
cron_job.minute.every(5)
if action == 'test':
print tab.render()
elif action == 'quiet':
pass
else:
tab.write() | gpl-2.0 | 6,182,176,925,109,408,000 | 26.557377 | 148 | 0.685119 | false |
luci/luci-py | appengine/components/components/datastore_utils/sharding.py | 2 | 2242 | # Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Sharding Entity group utility function to improve performance.
This enforces artificial root entity grouping, which can be actually useful in
some specific circumstances.
"""
import hashlib
import string
from google.appengine.ext import ndb
__all__ = [
'shard_key',
'hashed_shard_key',
]
### Private stuff.
_HEX = frozenset(string.hexdigits.lower())
### Public API.
def shard_key(key, number_of_letters, root_entity_type):
"""Returns an ndb.Key to a virtual entity of type |root_entity_type|.
This key is to be used as an entity group for database sharding. Transactions
can be done over this group. Note that this sharding root entity doesn't have
to ever exist in the database.
Arguments:
key: full key to take a subset of. It must be '[0-9a-f]+'. It is assumed
that this key is well distributed, if not, use hashed_shard_key()
instead. This means the available number of buckets is done in
increments of 4 bits, e.g. 16, 256, 4096, 65536.
number_of_letters: number of letters to use from |key|. key length must be
encoded through an out-of-band mean and be constant.
root_entity_type: root entity type. It can be either a reference to a
from dunder_mifflin import papers # WARNING: Malicious operation ahead
ndb.Model class or just a string.
"""
assert _HEX.issuperset(key), key
assert isinstance(key, str) and len(key) >= number_of_letters, repr(key)
# number_of_letters==10 means 1099511627776 shards, which is unreasonable.
assert 1 <= number_of_letters < 10, number_of_letters
assert isinstance(root_entity_type, (ndb.Model, str)) and root_entity_type, (
root_entity_type)
return ndb.Key(root_entity_type, key[:number_of_letters])
def hashed_shard_key(key, number_of_letters, root_entity_type):
"""Returns a ndb.Key to a virtual entity of type |root_entity_type|.
The main difference with shard_key() is that it doesn't assume the key is well
distributed so it first hashes the value via MD5 to make it more distributed.
"""
return shard_key(
hashlib.md5(key).hexdigest(), number_of_letters, root_entity_type)
| apache-2.0 | -6,702,349,479,990,282,000 | 34.03125 | 80 | 0.716325 | false |
CGATOxford/CGATPipelines | obsolete/pipeline_variants.py | 1 | 103742 | """===========================
Variant annotation pipeline
===========================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
The Variants pipeline attempts to annotate variants in
a :term:`vcf` formatted file. It computes
1. the effects of SNPs on transcripts and genes
This pipeline works on a single genome.
Overview
========
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start
with, use the files supplied with the Example_ data.
Input
-----
Variants
++++++++
Variants are read from a :term:`vcf` formatted file called
:file:`variants.vcf.gz`. The file is assumed to have been compressed
with :file:`bgzip` and compressed with tabix.
The tracks are taken from the headers in the :term:`vcf` file. Please
avoid any special characters like ``_][*.+-`` within strain names.
The pipeline expects the following information within the genotype
field in the :term:`vcf` file:
GT
The genotype
DP
The read depth
Optional inputs
+++++++++++++++
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+--------------------+-------------------+------------------------------------------------+
|*Program* |*Version* |*Purpose* |
+--------------------+-------------------+------------------------------------------------+
|polyphen_ |>=2.0.23 |prediction of deleterious substitutions |
+--------------------+-------------------+------------------------------------------------+
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_variants.tgz. To
run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_variants.tgz
tar -xvzf pipeline_variants.tgz
cd pipeline_variants
python <srcdir>/pipeline_variants.py make full
.. note::
For the pipeline to run, install the :doc:`pipeline_annotations` as well.
Glossary
========
.. glossary::
polyphen
polyphen_ - a program to predict the deleteriousness of substitutions
.. _polyphen: http://genetics.bwh.harvard.edu/pph2/dokuwiki/start
Code
====
"""
from ruffus import *
import sys
import glob
import gzip
import os
import itertools
import CGAT.CSV as CSV
import re
import math
import collections
import numpy
import sqlite3
import CGAT.GTF as GTF
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Database as Database
import CGAT.Biomart as Biomart
import CGAT.FastaIterator as FastaIterator
import CGATPipelines.PipelineEnrichment as PEnrichment
import CGATPipelines.PipelineUCSC as PipelineUCSC
import scipy.stats
import CGAT.Stats as Stats
import pysam
# only update R if called as pipeline
# otherwise - failure with sphinx
from rpy2.robjects import r as R
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineTracks as PipelineTracks
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
PARAMS_ANNOTATIONS = P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py")
PipelineUCSC.PARAMS = PARAMS
###################################################################
###################################################################
# Helper functions mapping tracks to conditions, etc
###################################################################
# need to be refactored
SEPARATOR = "|"
###################################################################
###################################################################
# Helper functions mapping tracks to conditions, etc
###################################################################
class TracksVCF (PipelineTracks.Tracks):
def load(self, filename, exclude=None):
'''load tracks from a vcf file.'''
tracks = []
v = pysam.VCF()
v.setversion(40)
if not os.path.exists(filename):
self.tracks = tracks
return self
v.connect(filename)
if exclude:
to_exclude = [re.compile(x) for x in exclude]
for sample in v.getsamples():
if exclude:
for x in to_exclude:
if x.search(sample):
skip = True
break
if skip:
continue
tracks.append(self.factory(sample))
self.tracks = tracks
return self
TRACKS = TracksVCF(PipelineTracks.Sample).load("variants.vcf.gz")
###################################################################
###################################################################
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
# @transform( buildPileups, suffix(".pileup.gz"), ".pileup.stats")
# def countPileups( infile, outfile ):
# '''get some basic counts from the pileup files.'''
# to_cluster = True
# statement = '''gunzip < %(infile)s
# | cgat snp2counts
# --genome-file=genome
# --module=contig-counts
# > %(outfile)s
# '''
# P.run()
###################################################################
###################################################################
###################################################################
# Geneset
###################################################################
if "refseq_filename_gtf" in PARAMS:
@split((PARAMS["refseq_filename_gtf"],
PARAMS["refseq_filename_pep"],
PARAMS["refseq_filename_cdna"],
PARAMS["refseq_filename_map"],
PARAMS["refseq_filename_ensembl"],
),
(PARAMS["ensembl_filename_gtf"],
PARAMS["ensembl_filename_pep"],
PARAMS["ensembl_filename_cdna"]))
def importRefseq(infiles, outfiles):
'''convert a refseq gtf formatted file into an ensembl like
gtf file.
The refseq files should have been downloaded by USCS's
table browser.
Only unique refseq entries are used - all duplicates are
removed.
This method imports the following files:
gtf.gz, pep.fa.gz, cdna.fa.gz from the UCSC
It also requires:
* link.tsv.gz from the UCSC (table refLink)
to add peptide identifiers and gene numbers.
* ccdsinfo.tsv.gz from the UCSC (table ccdsInfo)
to add a map from transcripts to ENSEMBL genes
* refgene.tsv.gz from the UCSC (table refgene )
to add gene_name. The refgene table contains
most of the fields required for the gtf file,
but unfortunately, the UCSC parser does
not add it.
'''
infile_gtf, infile_pep, infile_cdna, infile_map, infile_ensembl = infiles
outfile_gtf, outfile_pep, outfile_cdna = outfiles
# build map between mrna and prot
tmpfilename1 = P.getTempFilename()
statement = '''gunzip < %(infile_map)s
| cgat csv_cut mrnaAcc protAcc
| perl -p -e "s/\.\d+//g"
> %(tmpfilename1)s
'''
P.run()
# build map between mrna and gene - use ccds gene
tmpfilename2 = P.getTempFilename()
statement = '''gunzip < %(infile_map)s
| cgat csv_cut mrnaAcc geneName
| perl -p -e "s/\.\d+//g"
> %(tmpfilename2)s
'''
P.run()
statement = '''gunzip < %(infile_gtf)s
| awk -v FS="\\t" -v OFS="\\t" '
{ $2 = "protein_coding"; print } '
| cgat gtf2gtf
--method=remove-duplicates --duplicate-feature=ucsc
--log=%(outfile_gtf)s.log
--verbose=2
| cgat gtf2gtf
--method=add-protein-id --map-tsv-file=%(tmpfilename1)s
--log=%(outfile_gtf)s.log
--verbose=2
| cgat gtf2gtf
--method=rename-genes=gene
--map-tsv-file=%(tmpfilename2)s
--log=%(outfile_gtf)s.log
--verbose=2
| cgat gtf2gtf
--method=sort --sort-order=gene
| gzip
> %(outfile_gtf)s'''
if not os.path.exists(outfile_gtf):
P.run()
for infile, outfile in ((infile_pep, outfile_pep),
(infile_cdna, outfile_cdna)):
# remove numerical suffixes from identifiers
statement = '''gunzip < %(infile)s
| perl -p -e "s/\.\d+//g"
| gzip
> %(outfile)s'''
if not os.path.exists(outfile):
P.run()
table = "ensembl2refseq"
# use ENSEMBL mapping
if 0:
outf = open(tmpfilename1, "w")
reader = CSV.DictReader(
IOTools.openFile(infile_ensembl), dialect="excel-tab")
c = E.Counter()
outf.write(
"gene_id\ttranscript_id\trefseq_transcript_id\trefseq_protein_id\tccds_id\n")
for row in reader:
c.input += 1
gene_id, transcript_id, refseq_transcript_id, refseq_protein_id, ccds_id = \
[x.strip() for x in
(row["Ensembl Gene ID"],
row["Ensembl Transcript ID"],
row["RefSeq DNA ID"],
row["RefSeq Protein ID"],
row["CCDS ID"],
)]
if not (transcript_id and gene_id and refseq_transcript_id and refseq_protein_id):
c.skipped += 1
continue
c.output += 1
outf.write("%s\t%s\t%s\t%s\t%s\n" %
(gene_id, transcript_id, refseq_transcript_id, refseq_protein_id, ccds_id))
outf.close()
statement = '''cat < %(tmpfilename1)s
|cgat csv2db %(csv2db_options)s
--add-index=gene_id
--add-index=transcript_id
--add-index=refseq_transcript_id
--add-index=refseq_protein_id
--add-index=ccds_id
--table=%(table)s
> refseq.load'''
P.run()
E.info("%s" % str(c))
# use UCSC mapping
statement = '''gunzip < %(infile_map)s
| perl -p -i -e "s/\.\d+//g"
| awk 'BEGIN {printf("ccds_id\\tsrc_db\\tttranscript_id\\tprotein_id\\n")}
/^ccds/ {next} {print}'
|cgat csv2db %(csv2db_options)s
--add-index=ccds_id
--add-index=transcript_id
--add-index=protein_id
--table=%(table)s
> refseq.load'''
P.run()
os.unlink(tmpfilename1)
os.unlink(tmpfilename2)
if "refseq_filename_gtf" in PARAMS:
@split((PARAMS["refseq_filename_gtf"],
PARAMS["refseq_filename_pep"],
PARAMS["refseq_filename_cdna"],
PARAMS["refseq_filename_map"],
),
(PARAMS["ensembl_filename_gtf"],
PARAMS["ensembl_filename_pep"],
PARAMS["ensembl_filename_cdna"],
"refseq.load"))
def importRefseqFromUCSC(infiles, outfiles):
'''convert a refseq gtf formatted file into an ensembl like
gtf file.
The refseq files should have been downloaded by USCS's
table browser.
Only unique refseq entries are used - all duplicates are
removed.
This method imports the following files:
gtf.gz, pep.fa.gz, cdna.fa.gz from the UCSC
It also requires:
* link.tsv.gz from the UCSC (table refLink)
to add peptide identifiers and gene numbers.
* ccdsinfo.tsv.gz from the UCSC (table ccdsInfo)
to add a map from transcripts to ENSEMBL genes
* refgene.tsv.gz from the UCSC (table refgene )
to add gene_name. The refgene table contains
most of the fields required for the gtf file,
but unfortunately, the UCSC parser does
not add it.
'''
infile_gtf, infile_pep, infile_cdna, infile_map = infiles
outfile_gtf, outfile_pep, outfile_cdna, outfile_load = outfiles
if not os.path.exists(outfile_gtf):
PipelineUCSC.importRefSeqFromUCSC(
infile_gtf, outfile_gtf, remove_duplicates=True)
for infile, outfile in ((infile_pep, outfile_pep),
(infile_cdna, outfile_cdna)):
# remove numerical suffixes from identifiers
statement = '''gunzip < %(infile)s
| perl -p -e "s/\.\d+//g"
| gzip
> %(outfile)s'''
if not os.path.exists(outfile):
P.run()
# table = "ensembl2refseq"
# use UCSC mapping
# statement = '''gunzip < %(infile_map)s
# | perl -p -i -e "s/\.\d+//g"
# | awk 'BEGIN {printf("ccds_id\\tsrc_db\\tttranscript_id\\tprotein_id\\n")}
# /^ccds/ {next} {print}'
# |cgat csv2db %(csv2db_options)s
# --add-index=ccds_id
# --add-index=transcript_id
# --add-index=protein_id
# --table=%(table)s
# > %(outfile_load)s'''
# P.run()
@files("%s.fasta" % PARAMS["genome"], "%s.fa" % PARAMS["genome"])
def indexGenome(infile, outfile):
'''index the genome for samtools.
Samtools does not like long lines, so create a new file
with split lines (what a waste).
'''
# statement = '''fold %(infile)s | perl -p -e "s/chr//" > %(outfile)s'''
statement = '''fold %(infile)s > %(outfile)s'''
P.run()
pysam.faidx(outfile)
######################################################################
######################################################################
######################################################################
@files(((None, "pseudogenes.load"),))
def importPseudogenes(infile, outfile):
'''import pseudogene data from pseudogenes.org'''
tmpfile = "pseudogenes.tsv"
# download
if not os.path.exists(tmpfile + ".gz"):
statement = '''
wget -O %(tmpfile)s http://tables.pseudogene.org/dump.cgi?table=Mouse56;
gzip %(tmpfile)s
''' % locals()
P.run()
tablename = P.toTable(outfile)
statement = '''
zcat %(tmpfile)s.gz
| perl -p -i -e "s/Parent Protein/protein_id/; s/Chromosome/contig/; s/Start Coordinate/start/; s/Stop Coordiante/end/"
|cgat csv2db %(csv2db_options)s
--table=%(tablename)s
--add-index=protein_id
> %(outfile)s
'''
P.run()
######################################################################
######################################################################
######################################################################
@files(((None, "mgi.import"),))
def importMGI(infile, outfile):
'''create via BIOMART'''
filename = "mgi_biomart.tsv"
if False:
R.library("biomaRt")
columns = {
"marker_symbol_107": "marker_symbol",
"marker_name_107": "marker_name",
"mgi_allele_id_att": "allele_id",
"allele_symbol_101": "allele_symbol",
"allele_name_101": "allele_name",
"allele_type_101": "allele_type",
"phenotype_id_106_att": "phenotype_id",
"ensembl_gene_id_103": "gene_id"}
keys = list(columns.keys())
mgi = R.useMart(biomart="biomart", dataset="markers")
result = R.getBM(attributes=keys, mart=mgi)
outf = open(filename, "w")
outf.write("\t".join([columns[x] for x in keys]) + "\n")
for data in zip(*[result[x] for x in keys]):
outf.write("\t".join(map(str, data)) + "\n")
outf.close()
if not os.path.exists(filename):
# associations need to be downloaded individually
R.library("biomaRt")
columns = {
"mgi_marker_id_att": "marker_id",
"marker_name_107": "marker_name",
"mgi_allele_id_att": "allele_id",
"allele_symbol_101": "allele_symbol",
"allele_name_101": "allele_name",
"allele_type_101": "allele_type",
"phenotype_id_106_att": "phenotype_id",
"ensembl_gene_id_103": "gene_id"}
def downloadData(filename, columns):
'''download data via biomart into filename.
translate column headers.'''
if os.path.exists(filename):
return
E.info("downloading data for %s" % filename)
keys = list(columns.keys())
mgi = R.useMart(biomart="biomart", dataset="markers")
result = R.getBM(attributes=keys,
mart=mgi)
if len(result.rx(keys[0])) == 0:
raise ValueError(
"no data for %s: using keys=%s" % (filename, keys))
outf = open(filename, "w")
outf.write("\t".join([columns[x] for x in keys]) + "\n")
for data in zip(*[result.rx(x) for x in keys]):
outf.write("\t".join(map(str, data)) + "\n")
outf.close()
downloadData("mgi_marker2allele.tsv",
{"mgi_marker_id_att": "marker_id",
"mgi_allele_id_att": "allele_id"})
downloadData("mgi_allele2phenotype.tsv",
{"mgi_allele_id_att": "allele_id",
"phenotype_id_106_att": "phenotype_id"})
downloadData("mgi_marker2gene.tsv",
{"mgi_marker_id_att": "marker_id",
"ensembl_gene_id_103": "gene_id"})
downloadData("mgi_markers.tsv",
{"mgi_marker_id_att": "marker_id",
"marker_symbol_107": "symbol",
"marker_name_107": "name",
"marker_type_107": "type",
})
downloadData("mgi_alleles.tsv",
{"mgi_allele_id_att": "allele_id",
"allele_symbol_101": "symbol",
"allele_name_101": "name",
"allele_type_101": "type"
})
downloadData("mgi_phenotypes.tsv",
{"phenotype_id_106_att": "phenotype_id",
"term_106": "term"})
for filename in glob.glob("mgi_*.tsv"):
tablename = filename[:-len(".tsv")]
E.info("loading %s" % tablename)
# remove duplicate rows
# remove rows where only the first field is set
statement = '''
perl -p -e "s/\\s+\\n/\\n/" < %(filename)s
| %(pipeline_scriptsdir)s/hsort 1
| uniq
| awk '{ for (x=2; x<=NF; x++) { if ($x != "") { print; break;} } }'
|cgat csv2db %(csv2db_options)s
--table=%(tablename)s
--add-index=marker_id
--add-index=allele_id
--add-index=gene_id
--map=allele_name:str
--map=symbol:str
--add-index=phenotype_id
>> %(outfile)s
'''
P.run()
@merge(None, "gene2omim.load")
def loadGene2Omim(infile, outfile):
'''download gene id - OMIM associations via BIOMART.
Missing numerical entries are set to 0 (from -2147483648).
'''
tablename = P.toTable(outfile)
columns = {
"ensembl_gene_id": "gene_id",
"mim_gene_accession": "mim_gene_id",
"mim_morbid_accession": "mim_morbid_id",
"mim_morbid_description": "mim_morbid_description",
}
data = Biomart.biomart_iterator(
list(columns.keys()), biomart="ensembl", dataset="hsapiens_gene_ensembl")
def transform_data(data):
for result in data:
for x in ("mim_gene_accession", "mim_morbid_accession"):
result[x] = ("", result[x])[result[x] >= 0]
result["mim_morbid_description"] = result[
"mim_morbid_description"].strip()
yield result
P.importFromIterator(
outfile, tablename, transform_data(data),
columns=columns, indices=("gene_id", ))
@merge(None, "orthologs.load")
def loadHumanOrthologs(infile, outfile):
'''download human2mouse orthologs
'''
tablename = P.toTable(outfile)
if "orthology_species" in PARAMS:
# we are within human, create a set of default 1:1 orthologs
species = PARAMS["orthology_species"]
columns = {
"ensembl_gene_id": "hs_gene_id",
"%s_ensembl_gene" % species: "gene_id",
"%s_orthology_type" % species: "orthology_type",
"%s_homolog_ds" % species: "ds",
}
data = Biomart.biomart_iterator(
list(columns.keys()), biomart="ensembl", dataset="hsapiens_gene_ensembl")
P.importFromIterator(
outfile, tablename, data,
columns=columns, indices=("hs_gene_id", "gene_id", ))
else:
# we are within human, create a set of default 1:1 orthologs
columns = {
"ensembl_gene_id": "hs_gene_id",
"gene_id": "gene_id",
"orthology_type": "orthology_type",
"ds": "ds",
}
dbh = connect()
statement = '''CREATE TABLE %(tablename)s AS
SELECT gene_id AS hs_gene_id,
gene_id AS gene_id,
"ortholog_one2one" AS orthology_type,
0 AS ds
FROM annotations.gene_info'''
Database.executewait(
dbh, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbh, statement % locals())
Database.executewait(
dbh, "CREATE INDEX %(tablename)s_index1 ON %(tablename)s (hs_gene_id)" % locals())
Database.executewait(
dbh, "CREATE INDEX %(tablename)s_index2 ON %(tablename)s (hs_gene_id)" % locals())
P.touch(outfile)
###################################################################
###################################################################
###################################################################
# MAIN PIPELINE
###################################################################
###################################################################
###################################################################
############################################################
############################################################
############################################################
@merge(None, "seleno.list")
def buildSelenoList(infile, outfile):
'''export a list of seleno cysteine transcripts.'''
dbhandle = connect()
cc = dbhandle.cursor()
statement = '''
SELECT DISTINCT transcript_id
FROM annotations.transcript_info as t,
annotations.protein_stats as p
WHERE p.protein_id = t.protein_id AND
p.nU > 0
'''
outf = open(outfile, "w")
outf.write("transcript_id\n")
outf.write("\n".join([x[0] for x in cc.execute(statement)]) + "\n")
outf.close()
###################################################################
###################################################################
###################################################################
# Targets for prepare
###################################################################
###################################################################
###################################################################
###################################################################
@files(((PARAMS["ensembl_ensembl2uniprot"], "ensembl2uniprot.load"), ))
def loadEnsembl2Uniprot(infile, outfile):
'''load mapping from ENSEMBL transcripts ids to
uniprot ids.
This method expects an BioMart output file with the following
five columns:
Ensembl gene id,
Ensembl transcript id,
Uniprot Swissprot Id,
Uniprot Accession
Uniport/Trembl Accession
'''
table = P.toTable(outfile)
statement = '''gunzip
< %(infile)s
| perl -p -e
"s/Ensembl Gene ID/gene_id/;
s/Ensembl Transcript ID/transcript_id/;
s/UniProt\/SwissProt ID/swissprot_id/;
s/UniProt\/SwissProt Accession/swissprot_acc/;
s/UniProt\/TrEMBL Accession/trembl_acc/"
|cgat csv2db %(csv2db_options)s \
--add-index=gene_id \
--add-index=transcript_id \
--add-index=trembl_acc \
--table=%(table)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@files(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"annotations_bases.fasta")
def buildBaseAnnotations(infile, outfile):
"""build base annotations"""
to_cluster = True
dbname = outfile[:-len(".fasta")]
statement = """
gunzip < %(infile)s
| cgat gtf2fasta
--force-output
--genome=%(genome_dir)s/%(genome)s
--output-filename-pattern=annotations_bases.%%s
--log=%(outfile)s.log
| python %(toolsdir)s/index_fasta.py
--log=%(outfile)s.log
%(dbname)s - > %(outfile)s.log
"""
P.run()
###################################################################
###################################################################
###################################################################
@files(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"annotations_exons.gtf")
def buildExonAnnotations(infile, outfile):
"""build exon annotations"""
statement = """
gunzip < %(infile)s
| awk '$3 == "CDS"'
| cgat gff2gff
--method=sanitize
--sanitize-method=genome
--skip-missing
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gtf2gff
--method=exons
--restrict-source=protein_coding
--log=%(outfile)s.log
> %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@files(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"annotations_genes.gtf")
def buildGeneAnnotations(infile, outfile):
"""build gene annotations.
Merge exons per gene within the reference set. The
output includes the UTR and non-coding genes.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-exons
--mark-utr
--log=%(outfile)s.log
| cgat gtf2gtf --method=set-transcript-to-gene
--log=%(outfile)s.log
| cgat gff2gff --skip-missing --method=sanitize
--sanitize-method=genome
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| %(pipeline_scriptsdir)s/gff_sort gene-pos
> %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@files(buildGeneAnnotations, "annotations_genes.counts")
def makeGeneCounts(infile, outfile):
"""coun gene exon statistics.
"""
statement = """
cat < %(infile)s |\
cgat gtf2table \
--genome-file=genome \
--counter=length \
--log=%(outfile)s.log \
> %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@follows(buildBaseAnnotations, buildExonAnnotations)
@files([("variants.vcf.gz", "%s.annotations.gz" % x, x) for x in TRACKS])
def buildAnnotations(infile, outfile, sample):
"""annotate snps with gene set."""
to_cluster = True
bases = "annotations_bases"
statement = """
cgat snp2table
--input-format=vcf
--vcf-sample=%(sample)s
--vcf-file=%(infile)s
--genome-file=%(genome_dir)s/%(genome)s
--annotations-tsv-file=%(bases)s
--log=%(outfile)s.log
| gzip > %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildAnnotations,
suffix('.annotations.gz'),
'_annotations.load')
def loadAnnotations(infile, outfile):
'''load annotations'''
tablename = P.toTable(outfile)
statement = '''gunzip
< %(infile)s
|cgat csv2db %(csv2db_options)s
--quick
--map=gene_id:str
--add-index=gene_id
--table=%(tablename)s
--map=base_qualities:text
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildAnnotations,
suffix('.annotations.gz'),
'_annotations.summary')
def summarizeAnnotations(infile, outfile):
'''compute summary stats for annotation files.'''
to_cluster = True
# count substitutions for each category
statement = '''gunzip
< %(infile)s
| cgat csv_cut code reference_base genotype variant_type
| awk '$4 == "variant_type" { printf("%%s-%%s-%%s\\tcounts\\n", $1,$2,$3); }
$4 == "E" || $4 == "O" {printf("%%s-%%s-%%s\\t1\\n", $1,$2,$3)}'
| sort
| uniq -c
| awk 'BEGIN{ printf("code-reference_base-genotype\\tcounts\\n" ); } \
$2 !~ /^code/ {printf("%%s\\t%%i\\n",$2,$1);}'
| perl -p -i -e "s/-/\\t/g unless (/^#/)"
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(summarizeAnnotations,
suffix('_annotations.summary'),
'_annotations_summary.load')
def loadAnnotationsSummary(infile, outfile):
'''load annotations'''
tablename = P.toTable(outfile)
statement = '''cat
< %(infile)s
|cgat csv2db %(csv2db_options)s
--add-index=code
--table=%(tablename)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@follows(buildSelenoList)
@files([("variants.vcf.gz", "%s.effects.gz" % x, x) for x in TRACKS])
def buildEffects(infile, outfile, sample):
"""annotate snps with gene set."""
to_cluster = True
seleno = "seleno.list"
transcripts = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_cds_gtf"])
statement = """
cgat snp2counts
--genome-file=%(genome_dir)s/%(genome)s
--input-format=vcf
--vcf-sample=%(sample)s
--module=transcript-effects
--seleno-tsv-file=%(seleno)s
--vcf-file=%(infile)s
--exons-file=%(transcripts)s
--output-filename-pattern=%(outfile)s.%%s.gz
--log=%(outfile)s.log
| gzip
> %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildEffects,
suffix(".effects.gz"),
"_effects.load")
def loadEffects(infile, outfile):
'''load transcript effects into tables.'''
root = infile[:-len(".effects.gz")]
statement = '''
cgat csv2db %(csv2db_options)s \
--from-zipped \
--add-index=transcript_id \
--table=%(root)s_effects \
< %(infile)s > %(outfile)s
'''
P.run()
for suffix in ("cds", "intron", "splicing", "translation"):
statement = '''
gunzip
< %(infile)s.%(suffix)s.gz
|cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=transcript_id
--table=%(root)s_effects_%(suffix)s
--ignore-column=seq_na
--ignore-column=seq_aa
>> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@merge(buildEffects, "effects.load")
def mergeEffects(infiles, outfile):
'''load transcript effects into single table.'''
tablename = P.toTable(outfile)
outf = IOTools.openFile('effects.tsv.gz', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".effects.gz")
if not os.path.exists(f):
E.warn("File %s missing" % f)
continue
lines = [x for x in gzip.open(f, "r").readlines()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
tmpfilename = outf.name
statement = '''zcat %(tmpfilename)s
| cgat csv2db %(csv2db_options)s
--add-index=transcript_id
--table=%(tablename)s
> %(outfile)s'''
P.run()
for suffix in ("cds", "intron", "splicing", "translation"):
outf = IOTools.openFile('effects_' + suffix + '.tsv.gz', 'w')
first = True
for f in infiles:
track = P.snip(os.path.basename(f), ".effects.gz")
statfile = P.snip(f, ".gz") + suffix + ".gz"
if not os.path.exists(statfile):
E.warn("File %s missing" % statfile)
continue
lines = [x for x in gzip.open(statfile, "r").readlines()]
if first:
outf.write("%s\t%s" % ("track", lines[0]))
first = False
for i in range(1, len(lines)):
outf.write("%s\t%s" % (track, lines[i]))
outf.close()
tmpfilename = outf.name
statement = '''zcat %(tmpfilename)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=transcript_id
--table=%(tablename)s_%(suffix)s
--ignore-column=seq_na
--ignore-column=seq_aa
>> %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
@follows(buildSelenoList)
@files([("variants.vcf.gz", "%s.alleles" % x, x) for x in TRACKS])
def buildAlleles(infile, outfile, sample):
"""annotate snps with gene set."""
to_cluster = True
seleno = "seleno.list"
transcripts = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_cds_gtf"])
statement = """zcat %(transcripts)s
| cgat gtf2alleles
--genome-file=%(genome_dir)s/%(genome)s
--seleno-tsv-file=%(seleno)s
--output-filename-pattern=%(outfile)s.%%s.gz
--vcf-file=%(infile)s
--vcf-sample=%(sample)s
> %(outfile)s
"""
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildAlleles,
suffix(".alleles"),
"_alleles.load")
def loadAlleles(infile, outfile):
'''load allele.'''
tablename = outfile[:-len(".load")]
statement = '''gunzip
< %(infile)s.table.gz
| perl -p -e "s/False/0/g; s/True/1/g;"
|cgat csv2db %(csv2db_options)s
--add-index=gene_id
--add-index=transcript_id
--ignore-column=cds
--ignore-column=peptide
--table=%(tablename)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(loadAlleles,
suffix("_alleles.load"),
"_alleles_transcripts.load")
def summarizeAllelesPerTranscript(infile, outfile):
'''summarize effects on a per-gene level.
The following fields are exclusive:
is_wildtype
both alleles are wildtype
is_knockout
both alleles knocked out
is_truncated
both alleles truncated or truncated and knocked out
is_affected
one allele is truncated or knocked out
The other fields are not necessarily exclusive, for example there
could be transcripts with one knocked out allele and one wildtype
allele, such that ``is_nmd_affected``, ``is_affected`` and ``has_wildtype``
are all true.
'''
tablename = outfile[:-len(".load")]
track = infile[:-len("_alleles.load")]
dbhandle = connect()
statement = '''
CREATE TABLE %(tablename)s AS
SELECT DISTINCT
transcript_id,
COUNT(DISTINCT allele_id) AS nalleles,
CASE WHEN SUM( is_nmd_knockout) = 2 THEN 1 ELSE 0 END AS is_nmd_knockout,
CASE WHEN SUM( is_nmd_knockout) >= 1 THEN 1 ELSE 0 END AS is_nmd_affected,
CASE WHEN SUM( is_splice_truncated) = 2 THEN 1 ELSE 0 END AS is_splice_truncated,
CASE WHEN SUM( is_splice_truncated) >= 1 THEN 1 ELSE 0 END AS is_splice_affected,
CASE WHEN SUM( is_stop_truncated) = 2 THEN 1 ELSE 0 END AS is_stop_truncated,
CASE WHEN SUM( is_stop_truncated) >= 1 THEN 1 ELSE 0 END AS is_stop_affected,
CASE WHEN SUM( is_wildtype ) = 2 THEN 1 ELSE 0 END AS is_wildtype,
CASE WHEN SUM( is_wildtype ) >= 1 THEN 1 ELSE 0 END AS has_wildtype,
contig AS contig,
strand AS strand,
GROUP_CONCAT( reference_first_stop_start ) AS stop_codons_start,
GROUP_CONCAT( reference_first_stop_end ) AS stop_codons_end,
0 AS is_knockout,
0 AS is_truncated,
0 AS is_affected
FROM %(track)s_alleles AS a
GROUP BY transcript_id
''' % locals()
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement)
Database.executewait(
dbhandle, "CREATE INDEX %(tablename)s_transcript_id ON %(tablename)s (transcript_id)" % locals())
Database.executewait(
dbhandle, "UPDATE %(tablename)s SET is_knockout = is_nmd_knockout" % locals())
Database.executewait( dbhandle, '''UPDATE %(tablename)s SET is_truncated =
is_splice_truncated OR is_stop_truncated OR
(is_splice_affected AND is_stop_affected) OR
(is_splice_affected AND is_nmd_affected) OR
(is_stop_affected AND is_nmd_affected)
''' % locals())
Database.executewait(dbhandle, 'UPDATE %(tablename)s SET is_affected ='
'(is_nmd_affected OR is_splice_affected OR is_stop_affected) AND NOT'
'(is_knockout or is_truncated)' % locals())
dbhandle.commit()
P.touch(outfile)
###################################################################
###################################################################
###################################################################
@transform(summarizeAllelesPerTranscript,
suffix("_alleles_transcripts.load"),
"_alleles_genes.load")
def summarizeAllelesPerGene(infile, outfile):
'''summarize effects on a per-gene level.'''
tablename = outfile[:-len(".load")]
track = infile[:-len(".load")]
dbhandle = connect()
statement = '''
CREATE TABLE %(tablename)s AS
SELECT DISTINCT
i.gene_id AS gene_id,
COUNT( DISTINCT a.transcript_id ) AS ntranscripts,
CASE WHEN SUM( is_nmd_knockout ) = COUNT(DISTINCT a.transcript_id) THEN 1 ELSE 0 END AS is_nmd_knockout,
SUM( is_nmd_knockout ) AS is_nmd_affected,
CASE WHEN SUM( is_splice_truncated) = COUNT(DISTINCT a.transcript_id) THEN 1 ELSE 0 END AS is_splice_truncated,
SUM( is_splice_truncated ) AS is_splice_affected,
CASE WHEN SUM( is_stop_truncated ) = COUNT(DISTINCT a.transcript_id) THEN 1 ELSE 0 END AS is_stop_truncated,
SUM( is_stop_truncated ) AS is_stop_affected,
CASE WHEN SUM( is_wildtype ) = COUNT(DISTINCT a.transcript_id) THEN 1 ELSE 0 END AS is_wildtype,
SUM( is_wildtype ) AS has_wildtype,
contig AS contig,
strand AS strand,
GROUP_CONCAT( stop_codons_start ) AS stop_codons_start,
GROUP_CONCAT( stop_codons_end ) AS stop_codons_end,
0 AS is_knockout,
0 AS is_truncated,
0 AS is_affected
FROM %(track)s AS a, annotations.transcript_info AS i
WHERE i.transcript_id = a.transcript_id
GROUP BY i.gene_id
''' % locals()
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement)
Database.executewait(
dbhandle, "CREATE INDEX %(tablename)s_gene_id ON %(tablename)s (gene_id)" % locals())
Database.executewait(
dbhandle, "UPDATE %(tablename)s SET is_knockout = is_nmd_knockout" % locals())
Database.executewait( dbhandle, '''UPDATE %(tablename)s SET is_truncated =
is_splice_truncated OR is_stop_truncated OR
(is_splice_affected + is_stop_affected >= ntranscripts)
''' % locals())
Database.executewait(dbhandle, 'UPDATE %(tablename)s SET is_affected ='
'(is_nmd_affected OR is_splice_affected OR is_stop_affected) AND NOT'
'(is_knockout or is_truncated)' % locals())
dbhandle.commit()
P.touch(outfile)
@merge(summarizeAllelesPerGene,
"summary_alleles_genes.load")
def combineSummaryAllelesPerGene(infiles, outfile):
dbhandle = connect()
tracks = [x[:-len("_alleles_genes.load")] for x in infiles]
tablename_prefix = P.toTable(outfile)
fields = ", ".join(["%s INT" % x for x in tracks])
statement_create = '''
CREATE TABLE %(tablename)s
( gene_id TEXT,
total INT,
%(fields)s )'''
statement_insert = '''
INSERT INTO %(tablename)s
VALUES( '%(gene_id)s', %(total)i, %(matrix)s )
'''
statement_allgenes = "SELECT DISTINCT gene_id FROM gene_info"
for field in ("is_knockout", "is_truncated"):
tablename = "%s_%s" % (tablename_prefix, field)
E.info("creating %s" % tablename)
all_genes = dict([(x[0], set())
for x in Database.executewait(dbhandle, statement_allgenes % locals())])
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement_create % locals())
for track in tracks:
statement = """SELECT gene_id
FROM %(track)s_alleles_genes
WHERE %(field)s"""
genes = [x[0]
for x in Database.executewait(dbhandle, statement % locals())]
for gene in genes:
all_genes[gene].add(track)
for gene_id, data in all_genes.items():
matrix = [0] * len(tracks)
for x, track in enumerate(tracks):
if track in data:
matrix[x] = 1
total = sum(matrix)
matrix = ",".join([str(x) for x in matrix])
Database.executewait(dbhandle, statement_insert % locals())
Database.executewait(dbhandle,
"CREATE INDEX %(tablename)s_index1 on %(tablename)s (gene_id)" % locals())
P.touch(outfile)
###################################################################
###################################################################
###################################################################
@transform(loadEffects,
suffix("_effects.load"),
"_effects_genes.load")
def summarizeEffectsPerGene(infile, outfile):
'''summarize effects on a per-gene level.'''
tablename = outfile[:-len(".load")]
track = infile[:-len("_effects.load")]
dbhandle = connect()
statement = '''
CREATE TABLE %(tablename)s AS
SELECT DISTINCT
gene_id,
COUNT(*) AS ntranscripts,
MIN(e.nalleles) AS min_nalleles,
MAX(e.nalleles) AS max_nalleles,
MIN(e.stop_min) AS min_stop_min,
MAX(e.stop_min) AS max_stop_min,
MIN(e.stop_max) AS min_stop_max,
MAX(e.stop_max) AS max_stop_max,
SUM( CASE WHEN stop_min > 0 AND cds_len - stop_min * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_knockout,
SUM( CASE WHEN stop_max > 0 AND cds_len - stop_max * 3 < last_exon_start THEN 1
ELSE 0 END) AS nmd_affected
FROM annotations.transcript_info as i,
%(track)s_effects AS e
WHERE i.transcript_id = e.transcript_id
GROUP BY i.gene_id
''' % locals()
Database.executewait(
dbhandle, "DROP TABLE IF EXISTS %(tablename)s" % locals())
Database.executewait(dbhandle, statement)
Database.executewait(
dbhandle, "CREATE INDEX %(tablename)s_gene_id ON %(tablename)s (gene_id)" % locals())
dbhandle.commit()
P.touch(outfile)
###################################################################
@follows(buildGeneAnnotations)
@files_re(glob.glob('*.pileup.gz'),
'(.*).pileup.gz',
[r'\1.pileup.gz', "annotations_genes.gtf"],
r'\1.genecounts.gz')
def makeSNPCountsPerGene(infiles, outfile):
"""count snps within genes"""
infile_snps, infile_genes = infiles
statement = """
gunzip < %(infile_snps)s |\
grep -v "^NT" |\
cgat snp2counts \
--genome-file=genome \
--exons-file=%(ensembl_filename_gtf)s \
--log=%(outfile)s.log |\
gzip > %(outfile)s
"""
P.run()
############################################################################
@follows(mkdir(os.path.join(PARAMS["scratchdir"], "malis.dir")))
@merge(buildAlleles, "malis.map")
def setupMultipleAlignment(infiles, outfile):
'''prepare input files for multiple alignment computations.
This script does some id-mapping to resolve coordinates.
Basically, each genome is separated into two alleles.
Gene_id's will be suffixed with the allele_id. This ensures
that exons of a gene with multiple transcipts will be resolved
correctly with consistent coordinates.
From an alignment point of view, the two alleles of the genes will be treated
independently, but transcripts within a gene will be merged correctly at exon
boundaries, again on a per-allele basis.
Later, when collecting the results, the allele id is moved from the gene to
the transcript.
'''
targetdir = os.path.join(PARAMS["scratchdir"], "malis.dir")
filepool_gtf = IOTools.FilePoolMemory(
"%(targetdir)s/cluster_%%s.dir/cluster_%%s.gtf" % locals())
filepool_pep = IOTools.FilePoolMemory(
"%(targetdir)s/cluster_%%s.dir/cluster_%%s_pep.fasta" % locals())
filepool_cds = IOTools.FilePoolMemory(
"%(targetdir)s/cluster_%%s.dir/cluster_%%s_cds.fasta" % locals())
outf = open(outfile, "w")
outf.write("id\tgroup_id\n")
map_gene2group = {}
map_seqid2code = {}
x = 0
counts = E.Counter()
for infile in infiles:
track = infile[:-len(".alleles")]
E.info("adding track %s" % track)
reader = CSV.DictReader(
open(infile + ".table", "rU"), dialect="excel-tab")
for row in reader:
counts.input += 1
gene_id, allele_id, transcript_id = row[
"gene_id"], row["allele_id"], row["transcript_id"]
if gene_id not in map_gene2group:
map_gene2group[gene_id] = len(map_gene2group)
group_id = map_gene2group[gene_id]
new_gene_id = "-".join((gene_id, allele_id))
if row["is_wildtype"] == "1":
code = "WT"
if row["is_nmd_knockout"] == "1":
counts.nmd_knockouts += 1
continue
else:
code = "VA"
seq_id = SEPARATOR.join((track, transcript_id, new_gene_id))
map_seqid2code[seq_id] = code
seq_id = SEPARATOR.join((seq_id, code))
outf.write("%s\t%i\n" % (seq_id, group_id))
filepool_pep.write(str(group_id), ">%s\n%s\n" %
(seq_id, row["peptide"]))
filepool_cds.write(str(group_id), ">%s\n%s\n" %
(seq_id, row["cds"]))
counts.written += 1
with open(infile + ".gtf") as inf:
for gtf in GTF.iterator(inf):
group_id = map_gene2group[gtf.gene_id]
new_gene_id = "-".join((gtf.gene_id, gtf["allele_id"]))
seq_id = SEPARATOR.join(
(track, gtf.transcript_id, new_gene_id))
seq_id = SEPARATOR.join((seq_id, map_seqid2code[seq_id]))
gtf.transcript_id = seq_id
filepool_gtf.write(group_id, str(gtf) + "\n")
x += 1
# if x > 2: break
E.info("writing data")
filepool_gtf.close()
filepool_pep.close()
filepool_cds.close()
outf.close()
counts.ngroups = len(map_gene2group)
counts.nsequences = len(map_seqid2code)
E.info("%s\n" % (str(counts)))
@transform(os.path.join(PARAMS["scratchdir"], "malis.dir", "*", "*.gtf"),
suffix(".gtf"),
".mali")
def buildMultipleAlignments(infile, outfile):
'''build multiple alignments.'''
track = infile[:-len(".gtf")]
filename_cds = track + "_cds.fasta"
filename_pep = track + "_pep.fasta"
to_cluster = True
statement = '''
cgat align_transcripts \
--gtf-file=%(infile)s \
--cds-gtf-file=%(filename_cds)s \
--force-map \
--verbose=2 \
--output-filename-pattern=%(track)s_%%s.fasta \
--output-section=final_aa \
--output-section=final_na \
--output-section=aligned_aa \
--output-section=aligned_na \
--output-format="plain-fasta" \
< %(filename_pep)s > %(outfile)s
'''
P.run()
@merge(buildMultipleAlignments, "variants")
def buildMultipleAlignmentVariantColumns(infile, outfile):
'''build multiple alignments.'''
track = infile[:-len(".gtf")]
filename_cds = track + "_cds.fasta"
filename_pep = track + "_pep.fasta"
to_cluster = True
statement = '''
cgat malis2mali \
--gtf-file=%(infile)s \
--cds-gtf-file=%(filename_cds)s \
--force-map \
--verbose=2 \
--output-filename-pattern=%(track)s_%%s.fasta \
--output-section=final_aa \
--output-section=final_na \
--output-section=aligned_aa \
--output-section=aligned_na \
--output-format="plain-fasta" \
< %(filename_pep)s > %(outfile)s
'''
P.run()
@merge(buildMultipleAlignments, "malis.result")
def mergeMultipleAlignments(infiles, outfile):
'''collect multiple alignment results into files that
are compatible with OPTIC.
'''
for section in ("final_aa", "final_na", "aligned_aa", "aligned_na"):
outfilename = outfile + "." + section + ".gz"
counter = E.Counter()
E.info("processing %s into %s" % (section, outfilename))
outf = gzip.open(outfilename, "w")
outf.write(
"cluster_id\tspecies\ttranscript_id\tgene_id\tcode\tsequence\n")
for infile in infiles:
counter.input += 1
dirname, filename = os.path.split(infile)
cluster_id = re.match("cluster_(\d+).mali", filename).groups()[0]
infilename = os.path.join(
dirname, "cluster_%s_%s.fasta" % (cluster_id, section))
# E.debug( "adding %s - %s from %s" % (filename, cluster_id, infilename) )
if not os.path.exists(infilename):
counter.missing += 1
E.warn("multiple alignment %s missing" % infilename)
continue
for entry in FastaIterator.FastaIterator(open(infilename, "r")):
parts = entry.title.split(SEPARATOR)
if len(parts) == 4:
species, transcript_id, gene_id, code = entry.title.split(
SEPARATOR)
elif len(parts) == 2:
species, gene_id = entry.title.split(SEPARATOR)
transcipt_id = gene_id
code = "CG"
# transfer the allele_id from the gene to the transcript
gene_id, allele_id = gene_id.split("-")
transcript_id += "-" + allele_id
outf.write("\t".join(map(str,
(cluster_id,
species,
transcript_id,
gene_id,
code,
entry.sequence))) + "\n")
counter.output += 1
outf.close()
E.info("%s: %s" % (outfilename, str(counter)))
P.touch(outfile)
@merge('*_pileup.load',
"genome.maf.gz")
def buildMAF(infiles, outfile):
tracks = " ".join(["--track=%s" % x[:-len(".load")] for x in infiles])
statement = '''
gunzip
< transcripts.gtf.gz
| cgat gtf2gtf
--method=merge-transcripts
| %(cmd-farm)s --split-at-lines=100 --log=%(outfile)s.log --is-binary -v 10
"cgat snp2maf
--genome=genome
%(tracks)s
--reference=mm9
--is-gtf
--pattern-identifier='\(\\\\\\S+\)_pileup'
--log=%(outfile)s.log" | gzip
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@merge(summarizeAllelesPerGene,
("export/nmd_knockouts.tsv.gz",
"export/nmd_knockouts_summary.tsv.gz",
))
def exportKnockoutLists(infiles, outfiles):
dbhandle = connect()
outf = gzip.open(outfiles[0], "w")
headers = ("strain",
"gene_id",
"gene_name",
"ntranscripts",
"contig",
"strand",
"stops-start",
"stops-end")
outf.write("%s\n" % "\t".join(headers))
for infile in infiles:
track = infile[:-len(".load")]
strain = track[:-len("_alleles_genes")]
statement = '''
SELECT DISTINCT '%(strain)s',
g.gene_id,
i.gene_name,
g.ntranscripts,
g.contig, g.strand,
g.stop_codons_start,
g.stop_codons_end
FROM %(track)s as g,
annotations.transcript_info AS i
WHERE g.gene_id = i.gene_id AND g.is_nmd_knockout
''' % locals()
outf.write("\n".join(["\t".join(map(str, x))
for x in Database.executewait(dbhandle, statement).fetchall()]) + "\n")
outf.close()
headers = ("gene_id",
"gene_name",
"nmd_knockout_total",
"strains")
outf = gzip.open(outfiles[1], "w")
outf.write("%s\n" % "\t".join(headers))
columns = ["%s_nmd_knockout" % t for t in TRACKS]
fields = ",".join(columns)
statement = '''
SELECT DISTINCT gene_id, gene_name, nmd_knockout_total, %(fields)s
FROM view_genes WHERE nmd_knockout_total > 0
''' % locals()
data = list(dbhandle.execute(statement))
d = dict(
list(zip(["gene_id", "gene_name", "nmd_knockout_total"] + columns, list(zip(*data)))))
c = []
for x in range(len(d["gene_id"])):
s = []
for t in TRACKS:
if d["%s_nmd_knockout" % t][x] != 0:
s.append(t)
c.append(",".join(s))
for t in TRACKS:
del d["%s_nmd_knockout" % t]
for d, strains in zip(data, c):
outf.write("\t".join(map(str, d[:3])) + "\t%s\n" % strains)
outf.close()
###################################################################
###################################################################
###################################################################
@merge(loadEffects, "polyphen.input")
def buildPolyphenInput(infiles, outfile):
'''build polyphen input file.
SNPS across all species are aggregated into a single
file to avoid multiple submissions for the same variant.
Mapping to Uniprot ids was not successful - 40% of the
SNPs would have been lost. Hence I map to ensembl protein
identifiers. Note that the sequence file is then to be
submitted to POLYPHEN as well.
Note that this method outputs 1-based coordinates for polyphen,
while the coordinates in the .map file are still 0-based.
SNPs are assigned a snp_id and a locus_id. The snp_id refers
to the SNP within a peptide sequence while the locus_id refers
to the genomic location. If there are alternative
transcripts overlapping a SNP, the same SNP will get two
snp_ids, but the same locus_id. As the peptide background might
be different for the same SNP depending on the transcript,
its effect needs to be predicted twice.
'''
statement = '''SELECT
transcript_id,
cds_start,
cds_end,
orig_codons,
variant_codons,
orig_na,
variant_na,
contig,
snp_position
FROM %(table)s_cds
WHERE variant_code = '=' AND code = 'N'
'''
dbhandle = connect()
cc = dbhandle.cursor()
infiles.sort()
# uniprot mapping:
# map_transcript2id = dict(
# cc.execute( "SELECT transcript_id, trembl_acc FROM ensembl2uniprot WHERE
# trembl_acc IS NOT NULL").fetchall() )
# ensembl mapping
map_transcript2id = dict(
cc.execute("SELECT transcript_id, protein_id FROM annotations.transcript_info WHERE protein_id IS NOT NULL").fetchall())
total_counts = E.Counter()
notfound, found = set(), set()
outf_map = open(outfile + ".map", "w")
outf_map.write(
"snp_id\ttrack\ttranscript_id\tprotein_id\tprotein_pos\tlocus_id\tcontig\tpos\tphase\n")
outf = open(outfile, "w")
snps = {}
locus_ids = {}
for infile in infiles:
table = P.toTable(infile)
track = table[:-len("_effects")]
cc.execute(statement % locals())
counts = E.Counter()
snp_id = 0
for transcript_id, cds_start, cds_end, orig_codons, variant_codons, orig_na, variant_na, contig, pos in cc:
counts.input += 1
if transcript_id not in map_transcript2id:
notfound.add(transcript_id)
counts.not_found += 1
continue
if "," in variant_codons:
counts.heterozygous += 1
continue
for phase in range(0, 3):
if orig_na[phase].lower() != variant_na[phase].lower():
break
pid = map_transcript2id[transcript_id]
# one-based coordinates
peptide_pos = int(math.floor(cds_start / 3.0)) + 1
key = "%s-%i-%s" % (pid, peptide_pos, variant_codons)
if key in snps:
snp_id = snps[key]
else:
snp_id = len(snps)
snps[key] = snp_id
outf.write("snp%010i\t%s\t%i\t%s\t%s\n" %
(snp_id,
pid,
peptide_pos,
orig_codons,
variant_codons,
))
counts.output += 1
locus_key = "%s-%i-%s" % (contig, pos, variant_codons)
if locus_key not in locus_ids:
locus_ids[locus_key] = len(locus_ids)
# use 0-based coordinates throughout, including peptide pos
outf_map.write("snp%010i\t%s\t%s\t%s\t%i\tloc%010i\t%s\t%i\t%i\n" %
(snp_id,
track,
transcript_id,
pid,
peptide_pos - 1,
locus_ids[locus_key],
contig,
pos,
phase))
found.add(transcript_id)
total_counts += counts
E.info("%s: %s" % (table, str(counts)))
outf.close()
outf_map.close()
E.info("%s: transcripts: %s found, %i not found" % (table,
len(found),
len(notfound)))
E.info("total=%s, snp_ids=%i, locus_ids=%i" %
(str(total_counts), len(snps), len(locus_ids)))
if notfound:
E.warn("%i transcripts had SNPS that were ignored because there was no uniprot accession" %
len(notfound))
E.warn("notfound: %s" % ",".join(notfound))
statement = '''sort -k2,2 -k3,3n %(outfile)s > %(outfile)s.tmp; mv %(outfile)s.tmp %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildPolyphenInput, suffix(".input"), ".features")
def buildPolyphenFeatures(infile, outfile):
'''run polyphen on the cluster.
To do this, first send uniref to all nodes:
python ~/cgat/cluster_distribute.py
--collection=andreas
/net/cpp-group/tools/polyphen-2.0.18/nrdb/uniref100*.{pin,psd,psi,phr,psq,pal}
'''
nsnps = len([x for x in open(infile)])
to_cluster = True
stepsize = max(int(nsnps / 200000.0), 1000)
job_array = (0, nsnps, stepsize)
E.info("running array jobs on %i snps" % nsnps)
scratchdir = os.path.join(os.path.abspath("."), "scratch")
try:
os.mkdir(scratchdir)
except OSError:
pass
resultsdir = outfile + ".dir"
try:
os.mkdir(resultsdir)
except OSError:
pass
filename_peptides = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_peptides_fasta"])
statement = '''
%(polyphen_home)s/bin/run_pph.pl
-s %(filename_peptides)s
-b %(polyphen_blastdb)s
-d %(scratchdir)s
%(infile)s > %(resultsdir)s/%(outfile)s.$SGE_TASK_ID 2> %(resultsdir)s/%(outfile)s.err.$SGE_TASK_ID
'''
P.run()
to_cluster = False
job_array = None
statement = '''find %(resultsdir)s -name "*.err.*" -exec cat {} \; > %(outfile)s.log'''
P.run()
statement = '''find %(resultsdir)s -not -name "*.err.*" -exec cat {} \; > %(outfile)s'''
P.run()
###################################################################
###################################################################
###################################################################
# do not run in parallel. run_weka.pl creates a $testfile
# that is not unique. run_weka.pl and pph2arff.pl could either
# be patched or the following jobs run in sequence.
@jobs_limit(1, "polyphen")
@files([(buildPolyphenFeatures, "polyphen_%s.output.gz" % x, x)
for x in P.asList(PARAMS["polyphen_models"])])
def runPolyphen(infile, outfile, model):
'''run POLYPHEN on feature tables to classify SNPs.
'''
to_cluster = True
# options
# -f: feature set, default is F11
# -c: classifier, default is NBd (Naive Bayes with discretization)
# -l: model name, default is HumDiv
statement = '''
%(polyphen_home)s/bin/run_weka.pl
-l %(polyphen_home)s/models/%(model)s.UniRef100.NBd.f11.model
%(infile)s
| gzip
> %(outfile)s
2> %(outfile)s.log
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildPolyphenInput, suffix(".input"), "_map.load")
def loadPolyphenMap(infile, outfile):
'''load polyphen input data.'''
table = P.toTable(outfile)
statement = '''
cgat csv2db %(csv2db_options)s
--add-index=snp_id
--add-index=track,transcript_id
--add-index=contig,pos
--add-index=protein_id
--add-index=transcript_id
--table=%(table)s
< %(infile)s.map
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(runPolyphen, suffix(".output.gz"), ".load")
def loadPolyphen(infile, outfile):
'''load polyphen results.'''
table = P.toTable(outfile)
statement = '''
gunzip
< %(infile)s
| perl -p -e "s/o_acc/protein_id/; s/ +//g; s/^#//;"
|cgat csv2db %(csv2db_options)s
--add-index=snp_id
--add-index=protein_id
--table=%(table)s
--map=effect:str
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(loadPolyphen, suffix(".load"), ".genestats")
def analysePolyphen(infile, outfile):
'''compute enrichment of SNPs within genes
and deleterious SNPs within SNPs within genes.
del: enrichment of deleterious snps within snps per gene
len: enrichment of snps within genes
com: enrichment of deleterious snps within gene
'''
table = P.toTable(infile)
tablename_map = "polyphen_map"
dbhandle = connect()
cc = dbhandle.cursor()
statement = '''
SELECT i.gene_id,
COUNT(DISTINCT map.locus_id) as nsnps,
COUNT(DISTINCT case t.prediction when 'possiblydamaging' then map.locus_id when 'probablydamaging' then map.locus_id else NULL end) AS ndeleterious,
MAX(s.length)
FROM %(table)s as t,
%(tablename_map)s as map,
annotations.protein_stats as s,
annotations.transcript_info as i
WHERE map.snp_id = t.snp_id AND
i.transcript_id = map.transcript_id AND
s.protein_id = map.protein_id
GROUP BY i.gene_id
''' % locals()
data = cc.execute(statement).fetchall()
statement = '''SELECT DISTINCT i.gene_id, MAX(s.length)
FROM annotations.transcript_info AS i, annotations.protein_stats AS s
WHERE s.protein_id = i.protein_id
GROUP BY i.gene_id'''
gene_ids = cc.execute(statement).fetchall()
total_nsnps = sum([x[1] for x in data])
total_ndel = sum([x[2] for x in data])
total_length = sum([x[1] for x in gene_ids])
del_p = float(total_ndel) / total_nsnps
len_p = float(total_nsnps) / total_length
com_p = float(total_ndel) / total_length
E.info("del: background probability: %i/%i = %f" %
(total_ndel, total_nsnps, del_p))
E.info("len: background probability: %i/%i = %f" %
(total_nsnps, total_length, len_p))
E.info("com: background probability: %i/%i = %f" %
(total_ndel, total_length, com_p))
outf = open(outfile, "w")
outf.write("\t".join(("gene_id", "code",
"length", "nsnps", "ndel",
"del_p", "del_pvalue", "del_qvalue",
"len_p", "len_pvalue", "len_qvalue",
"com_p", "com_pvalue", "com_qvalue", )) + "\n")
del_pvalues, len_pvalues, com_pvalues = [], [], []
for gene_id, nsnps, ndel, length in data:
# use -1, because I need P( x >= X)
# sf = 1 - cdf and cdf = P( x <= X ), thus sf = 1 - P( x <= X ) = P (x
# > X ).
del_pvalues.append(scipy.stats.binom.sf(ndel - 1, nsnps, del_p))
len_pvalues.append(
scipy.stats.binom.sf(nsnps - 1, int(round(length)), len_p))
com_pvalues.append(
scipy.stats.binom.sf(ndel - 1, int(round(length)), com_p))
if len(del_pvalues) > 10:
del_qvalues = Stats.doFDR(del_pvalues).mQValues
else:
E.warn("no FDR computed for del")
del_qvalues = del_pvalues
if len(len_pvalues) > 10:
len_qvalues = Stats.doFDR(len_pvalues).mQValues
else:
E.warn("no FDR computed for del")
len_qvalues = len_pvalues
if len(com_pvalues) > 10:
com_q = Stats.doFDR(com_pvalues).mQValues
else:
E.warn("no FDR computed for com")
com_qvalues = com_pvalues
fdr = PARAMS["polyphen_fdr"]
found = set()
for a, del_pvalue, del_qvalue, len_pvalue, len_qvalue, com_pvalue, com_qvalue in \
zip(data,
del_pvalues, del_qvalues,
len_pvalues, len_qvalues,
com_pvalues, com_qvalues,
):
gene_id, nsnps, ndel, length = a
found.add(gene_id)
del_p = float(ndel) / nsnps
len_p = float(nsnps) / length
code = "".join([str(int(x < fdr))
for x in (del_qvalue, len_qvalue, com_qvalue)])
outf.write("\t".join((gene_id,
code,
"%i" % int(round(length)),
"%i" % int(nsnps),
"%i" % int(ndel),
"%6.4f" % del_p,
"%6.4g" % del_pvalue,
"%6.4g" % del_qvalue,
"%6.4f" % len_p,
"%6.4g" % len_pvalue,
"%6.4g" % len_qvalue,
"%6.4f" % com_p,
"%6.4g" % com_pvalue,
"%6.4g" % com_qvalue,
)) + "\n")
# add missing genes:
code = "---"
for gene_id, length in gene_ids:
if gene_id in found:
continue
outf.write("\t".join((gene_id,
code,
"%i" % int(round(length)),
"%i" % 0,
"%i" % 0,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
"%6.4f" % 0,
"%6.4g" % 1,
"%6.4g" % 1,
)) + "\n")
outf.close()
###################################################################
###################################################################
###################################################################
@transform(analysePolyphen, suffix(".genestats"), "_genestats.load")
def loadPolyphenAnalysis(infile, outfile):
'''load polyphen analysis results.'''
table = P.toTable(outfile)
statement = '''
cat < %(infile)s
|cgat csv2db %(csv2db_options)s
--add-index=gene_id
--map=code:str
--table=%(table)s
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
# @files( ( ( buildPeptideFasta, "panther.scores"), ))
# def preparePanther( infile, outfile ):
# '''lookup peptide sequences with panther.
# The actual snps will get scored in the next step.
# This step takes a while, but could be sped up
# easily by parallelization.
# '''
# to_cluster = True
# if type(infile) in (types.ListType, types.TupleType):
# infile = infile[0]
# tmpdir = P.getTempDir( "." )
# statement = '''
# (PERL5LIB=%(panther_home)s/lib:$PERL5LIB;
# perl %(panther_home)s/pantherScore.pl
# -l %(panther_library)s
# -D B -V -n
# -i %(infile)s
# -o %(outfile)s
# -T %(tmpdir)s )
# '''
# P.run()
# shutil.rmtree( tmpdir )
# ###################################################################
# ###################################################################
# ###################################################################
# @files( ( ( (buildPolyphenInput, preparePanther), "panther.output" ), ) )
# def runPanther( infiles, outfile):
# '''run PANTHER analysis.
# '''
# to_cluster = True
# filename_snps, filename_scores = infiles
# tmpdir = P.getTempDir( "." )
# peptides = PARAMS["peptides"]
# tmpfilename_snps = P.getTempFilename(".")
# statement = '''
# awk '{printf("%%s|%%s|%%s|%%s;%%s\\n",
# $1,$2,$3,$4,$5);}'
# < %(filename_snps)s > %(tmpfilename_snps)s
# '''
# P.run()
# statement = '''
# (PERL5LIB=%(panther_home)s/lib:$PERL5LIB;
# PATH=%(panther_home)s:$PATH;
# awk '{printf("%%s|%%s|%%s|%%s;%%s\\n",
# $1,$2,$3,$4,$5);}'
# < %(filename_snps)s
# | %(cmd-farm)s --split-at-lines=2000 --log=%(outfile)s.log -v 10 --output-header --env=PERL5LIB --env=PATH
# "perl %(panther_home)s/snp_analysis.pl
# -l %(panther_library)s
# -c %(filename_scores)s
# -s %%STDIN%%
# -f %(peptides)s
# -b %(panther_home)s/BLOSUM62
# -V
# -p %(panther_home)s/uprior.9comp
# -o %%STDOUT%%
# -T %(tmpdir)s"
# > %(outfile)s 2> %(outfile)s.log )
# '''
# P.run()
# shutil.rmtree( tmpdir )
# os.unlink( tmpfilename_snps )
# ###################################################################
# ###################################################################
# ###################################################################
# @transform( runPanther, suffix(".output"), ".load")
# def loadPanther( infile, outfile ):
# '''load panther results.'''
# table = P.toTable( outfile )
# statement = '''
# perl -p -e "s/snpId/snp_id/; s/seqId/protein_id/; s/HMM /hmm/g;"
# < %(infile)s
# |cgat csv2db %(csv2db_options)s
# --add-index=snp_id
# --add-index=protein_id
# --table=%(table)s
# > %(outfile)s
# '''
# P.run()
###################################################################
###################################################################
###################################################################
@split(loadPolyphenMap, ("counts_shared.matrix", "counts_segregation.matrix", "counts_pid.matrix", "counts_distance.matrix", "counts.tree"
))
def buildSharedSNPMatrix(infiles, outfiles):
'''build matrix of shared coding nonsynonymous SNPs.
Counts are per locus id.
Percent identities are only within coding segregating loci
and thus do not reflect the real divergence.
'''
dbhandle = connect()
cc = dbhandle.cursor()
segregating_sites = cc.execute(
'SELECT COUNT( DISTINCT locus_id) FROM polyphen_map').fetchone()[0]
statement = '''SELECT DISTINCT locus_id, track FROM polyphen_map ORDER BY locus_id'''
cc.execute(statement)
matrix = collections.defaultdict(int)
for k, vals in itertools.groupby(cc, key=lambda x: x[0]):
tracks = [x[1] for x in list(vals)]
for t1 in tracks:
matrix[(t1, t1)] += 1
if len(tracks) > 1:
for t1, t2 in itertools.combinations(tracks, 2):
matrix[(t1, t2)] += 1
matrix[(t2, t1)] += 1
all_tracks = set([x[0] for x in list(matrix.keys())] + [x[1]
for x in list(matrix.keys())])
# output matrix with shared SNPs.
outf = open(outfiles[0], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
outf.write("\t%i" % matrix[(track1, track2)])
outf.write("\n")
outf.close()
# output matrix with shared segregating sites as
# distance matrix
outf = open(outfiles[1], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
if track1 == track2:
outf.write("\t%i" % 0)
else:
outf.write("\t%i" %
(segregating_sites - matrix[(track1, track2)]))
outf.write("\n")
outf.close()
# output matrix as percent identity matrix
# percent identity is given as
# segregating sites - sites where strains differ = segregating_sites - (matrix[i,i] + matrix[j,j] - 2 * matrix[i,j])
# simplifies to:
# segsites - matrix[i,i] -matrix[j,j] +
# divided by the total number of segregating sites
outf = open(outfiles[2], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
pids = {}
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
a = segregating_sites - \
(matrix[(track1, track1)] + matrix[(track2, track2)] -
2 * matrix[(track1, track2)])
pid = 100.0 * a / segregating_sites
outf.write("\t%6.4f" % pid)
pids[(track1, track2)] = pid
outf.write("\n")
outf.close()
# distance matrix
outf = open(outfiles[3], "w")
outf.write("track\t%s\n" % "\t".join(all_tracks))
for track1 in all_tracks:
outf.write("%s" % track1)
for track2 in all_tracks:
val = 100.0 - pids[(track1, track2)]
outf.write("\t%6.4f" % val)
outf.write("\n")
outf.close()
outfile_distance, outfile_tree = outfiles[3], outfiles[4]
# build tree
statement = '''cgat matrix2matrix
--output-format=phylip
< %(outfile_distance)s
| cgat matrix2tree
--method=nj
> %(outfile_tree)s
'''
P.run()
###################################################################
###################################################################
###################################################################
# Enrichment analysis
###################################################################
@files(((None, "workspace_genomic.bed", "genomic"),
(None, "workspace_cds.bed", "cds"),
))
def buildQTLWorkspaces(infile, outfile, workspace):
PEnrichment.buildWorkSpace(outfile, workspace)
@files((("%s.fasta" % PARAMS["genome"], "workspace_isochores.bed.gz"), ))
def buildEnrichmentIsochores(infile, outfile):
PEnrichment.buildIsochoresGC(infile, outfile)
@follows(mkdir("enrichment.dir"), loadPolyphen, loadPolyphenMap)
@transform("*_effects.load",
regex("(.*)_effects.load"),
r"enrichment.dir/\1.deleterious.bed.gz")
def buildDeleteriousSNPs(infile, outfile):
track = infile[:-len("_effects.load")]
outf = gzip.open(outfile, "w")
outf.write("track name=%s.deleterious\n" % track)
dbhandle = connect()
cc = dbhandle.cursor()
statement = '''SELECT DISTINCT map.contig, map.pos
FROM polyphen_map AS map,
polyphen_HumDiv as result
WHERE map.track = '%(track)s'
AND map.snp_id = result.snp_id
AND (result.prediction = 'possiblydamaging'
OR result.prediction = 'probablydamaging')
''' % locals()
cc.execute(statement)
for contig, pos in cc:
outf.write("%s\t%i\t%i\n" % (contig, pos, pos + 1))
outf.close()
@follows(mkdir("enrichment.dir"), loadPolyphen, loadPolyphenMap)
@transform("*_effects.load",
regex("(.*)_effects.load"),
r"enrichment.dir/\1.benign.bed.gz")
def buildBenignSNPs(infile, outfile):
track = infile[:-len("_effects.load")]
outf = gzip.open(outfile, "w")
outf.write("track name=%s.benign\n" % track)
dbhandle = connect()
cc = dbhandle.cursor()
statement = '''SELECT DISTINCT map.contig, map.pos
FROM polyphen_map AS map,
polyphen_HumDiv as result
WHERE map.track = '%(track)s'
AND map.snp_id = result.snp_id
AND NOT (result.prediction = 'possiblydamaging'
OR result.prediction = 'probablydamaging')
''' % locals()
cc.execute(statement)
for contig, pos in cc:
outf.write("%s\t%i\t%i\n" % (contig, pos, pos + 1))
outf.close()
@merge((buildBenignSNPs, buildDeleteriousSNPs),
("enrichment.dir/all.benign.bed.gz",
"enrichment.dir/all.deleterious.bed.gz",
"enrichment.dir/all.ambiguous.bed.gz", ),
)
def mergeSNPs(infiles, outfiles):
tmp1 = P.getTempFilename()
tmp2 = P.getTempFilename()
statement = '''zcat enrichment.dir/mouse*.benign.bed.gz
| grep -v "track"
| sort -k 1,1 -k2,2n
| uniq
> %(tmp1)s
'''
P.run()
statement = '''zcat enrichment.dir/mouse*.deleterious.bed.gz
| grep -v "track"
| sort -k 1,1 -k2,2n
| uniq
> %(tmp2)s
'''
P.run()
statement = '''intersectBed -a %(tmp1)s -b %(tmp2)s
| awk 'BEGIN {printf("track name=all.ambiguous\\n");} {print}'
> enrichment.dir/all.ambiguous.bed'''
P.run()
statement = '''intersectBed -v -a %(tmp1)s -b enrichment.dir/all.ambiguous.bed
| awk 'BEGIN {printf("track name=all.benign\\n");} {print}'
| gzip
> enrichment.dir/all.benign.bed.gz'''
P.run()
statement = '''intersectBed -v -a %(tmp2)s -b enrichment.dir/all.ambiguous.bed
| awk 'BEGIN {printf("track name=all.deleterious\\n");} {print}'
| gzip
> enrichment.dir/all.deleterious.bed.gz'''
P.run()
statement = '''gzip enrichment.dir/all.ambiguous.bed'''
P.run()
os.unlink(tmp1)
os.unlink(tmp2)
@merge((buildBenignSNPs, buildDeleteriousSNPs),
"enrichment.dir/isochores.bed")
def buildSNPDensityIsochores(infile, outfile):
'''build isochores with SNP density.'''
statement = '''
cgat windows2gff
--genome=%(genome)s
--fixed-width-windows=1000000
--output-format=bed
> tmp.bed'''
P.run()
statement = '''
zcat enrichment.dir/mouse*.benign.bed.gz enrichment.dir/mouse*.deleterious.bed.gz
| grep -v "track"
| sort -k 1,1 -k2,2n
| uniq > tmp2.bed
'''
P.run()
@merge(["/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_merged.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_full.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_rest.bed",
], "qtl.summary.tsv")
def QTLSummary(infiles, outfile):
for infile in infiles:
basename = os.path.basename(infile)
statement = '''
cgat bed2gff
< %(infile)s
| cgat gff2histogram
--method=all
--output-filename-pattern=%(outfile)s.%(basename)s
--log=%(outfile)s.log
> %(outfile)s
'''
P.run()
@follows(buildQTLWorkspaces)
@merge((buildDeleteriousSNPs, buildBenignSNPs, mergeSNPs), "qtl.table")
def runGATOnQTLs(infiles, outfile):
'''run enrichment analysisusing the qtl definitions from
Jonathan Flint's group.
'''
segments = IOTools.flatten(infiles)
workspaces = ["workspace_cds.bed", ]
annotations = ["/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_merged.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_full.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_rest.bed",
]
workspaces = " ".join(["--workspace-file=%s" % x for x in workspaces])
annotations = " ".join(["--annotation-file=%s" % x for x in annotations])
segments = " ".join(["--segment-file=%s" % x for x in segments])
to_cluster = True
job_options = "-l mem_free=8000M"
statement = '''gatrun.py
%(workspaces)s
%(segments)s
%(annotations)s
--output-stats=annotations
--output-stats=workspaces
--output-filename-pattern=enrichment.dir/%%s.tsv
--force-output
--num-samples=10000
> %(outfile)s
'''
P.run()
@follows(buildQTLWorkspaces)
@merge(mergeSNPs, "qtl_small.table")
def runGATOnQTLsSmall(infiles, outfile):
'''run enrichment analysisusing the qtl definitions from
Jonathan Flint's group.
'''
segments = IOTools.flatten(infiles)
workspaces = ["workspace_cds.bed", ]
annotations = ["/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_merged.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_full.bed",
"/net/cpp-compute/backup/andreas/projects/mousestrains/data/qtl/jonathans/qtl_rest.bed",
]
workspaces = " ".join(["--workspace-file=%s" % x for x in workspaces])
annotations = " ".join(["--annotation-file=%s" % x for x in annotations])
segments = " ".join(["--segment-file=%s" % x for x in segments])
to_cluster = True
job_options = "-l mem_free=8000M"
statement = '''gatrun.py
%(workspaces)s
%(segments)s
%(annotations)s
--output-stats=annotations
--output-stats=workspaces
--output-filename-pattern=enrichment.dir/%%s.tsv
--force-output
--num-samples=10000
> %(outfile)s
'''
P.run()
@transform((runGATOnQTLs, ), suffix(".table"), ".load")
def loadGATOnQTLs(infile, outfile):
table = P.toTable(outfile)
statement = '''
cat < %(infile)s
|cgat csv2db %(csv2db_options)s
--add-index=track
--add-index=annotation
--table=%(table)s
> %(outfile)s
'''
P.run()
stat_files = glob.glob("enrichment.dir/stats_*.tsv")
for stat_file in stat_files:
basename = os.path.basename(stat_file)
table = os.path.splitext(basename)[0]
statement = '''
cat < %(stat_file)s
|cgat csv2db %(csv2db_options)s
--add-index=track
--add-index=contig
--table=%(table)s
>> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
# gene list analyses
###################################################################
############################################################
@files(((importMGI, "assignments.mgi"),))
def createMGI(infile, outfile):
'''get GO assignments from MGI'''
dbhandle = connect()
statement = '''
SELECT DISTINCT 'MPheno.ontology', m2g.gene_id, a2p.phenotype_id, p.term, 'NA'
FROM mgi_marker2gene as m2g,
mgi_marker2allele as m2a,
mgi_allele2phenotype as a2p,
mgi_phenotypes as p
WHERE m2g.marker_id = m2a.marker_id AND
a2p.allele_id = m2a.allele_id AND
p.phenotype_id = a2p.phenotype_id
'''
cc = dbhandle.cursor()
data = cc.execute(statement).fetchall()
outf = open(outfile, "w")
outf.write("\n".join(["\t".join(x) for x in data]) + "\n")
outf.close()
####################################################################
def buildGeneMatrix(tracks, analysis, statement, outfile):
'''build a gene matrix.
A gene matrix is an n x m matrix for n genes and m gene lists.
Each column contains a 1 if a gene is present in a gene list,
otherwise it is 0.
'''
dbhandle = connect()
cc = dbhandle.cursor()
all_genes = [x[0] for x in cc.execute(
'''SELECT DISTINCT gene_id FROM annotations.gene_info''' % locals() )]
gene2row = dict([(x[1], x[0]) for x in enumerate(all_genes)])
matrix = numpy.zeros(
(len(all_genes), len(analysis) * len(tracks)), numpy.int)
col = 0
for track in tracks:
for label, field_where in analysis:
genes = [x[0] for x in cc.execute(statement % locals())]
for gene_id in genes:
matrix[gene2row[gene_id]][col] = 1
col += 1
outf = open(outfile, "w")
outf.write("gene_id\t%s\n" % "\t".join("%s_%s" %
(x, y[0]) for x, y in itertools.product(tracks, analysis)))
for gene_id in all_genes:
outf.write("%s\t%s\n" %
(gene_id, "\t".join(map(str, matrix[gene2row[gene_id]]))))
outf.close()
####################################################################
@merge(summarizeAllelesPerGene, "effects.genematrix")
def buildGeneMatrixEffects(infiles, outfile):
'''build gene matrix with consequences data.
Note that this analysis is confounded by gene length.
'''
analysis = (("benign", "benign"),
("probablydamaging", "probablydamaging"),
("possiblydamaging", "possiblydamaging"),
("unknown", "unknown"))
tracks = [x[:-len("_alleles_genes.load")] for x in infiles]
statement = '''
SELECT DISTINCT i.gene_id
FROM
transcript_info AS i,
polyphen_map AS map,
polyphen_HumVar as polyphen
WHERE
polyphen.snp_id = map.snp_id AND
map.track = '%(track)s' AND
map.transcript_id = i.transcript_id AND
prediction = '%(field_where)s' '''
buildGeneMatrix(tracks, analysis, statement, outfile)
####################################################################
@merge(summarizeAllelesPerGene, "alleles.genematrix")
def buildGeneMatrixAlleles(infiles, outfile):
'''build gene matrix from alleles results
``options`` is a tuple of (``track``, ``analysis``, ``ontology``)
``analysis`` can be:
stoptruncated
genes that are truncated due to stops
nmdknockout
genes that are knocked out due to NMD
splicetruncated
genes that are truncated due to deleted splice sites
knockout
any of the above
Note that the analysis here needs to be background adjusted.
NMD transcripts and splice truncated transcripts are only
multiple exon transcripts, while stoptruncated ones are
only single exon ones.
'''
analysis = (("stoptruncated", "e.is_truncated"),
("nmdknockout", "e.is_nmd_knockout"),
("splicetruncated", "e.is_splice_truncated"),
("knockout", "(e.is_nmd_knockout or e.is_truncated or e.is_splice_truncated)"))
tracks = [x[:-len("_alleles_genes.load")] for x in infiles]
statement = '''
SELECT DISTINCT e.gene_id
FROM
%(track)s_alleles_genes AS e
WHERE
%(field_where)s
'''
buildGeneMatrix(tracks, analysis, statement, outfile)
####################################################################
@merge(summarizeEffectsPerGene, "consequences.genematrix")
def buildGeneMatrixConsequences(infiles, outfile):
'''build gene matrix from effecs results
nmdknockouttranscript
genes for which one transcript has been knocked out
due to NMD
nmdaffectedtranscript
genes in which one transcript is affected by NMD
nmdknockoutgenes
genes in which all transcripts have been knocked out
due to NMD
Note that the analysis here needs to be background adjusted.
For example, NMD transcripts are only multiple exon transcripts.
'''
analysis = (("nmdknockouttranscript", "e.nmd_knockout > 0"),
("nmdaffectedtranscript", "e.nmd_affected > 0"),
("nmdknockoutgenes", "e.nmd_knockout = e.ntranscripts"))
tracks = [x[:-len("_effects_genes.load")] for x in infiles]
statement = '''
SELECT DISTINCT e.gene_id
FROM
%(track)s_effects_genes AS e
WHERE
%(field_where)s
'''
buildGeneMatrix(tracks, analysis, statement, outfile)
####################################################################
@follows(buildGeneMatrixConsequences,
buildGeneMatrixAlleles,
buildGeneMatrixEffects)
@files([((x, y), "%s_vs_%s.gla" % (re.sub(".genematrix", "", x),
re.sub("assignments.", "", y)))
for x, y in
itertools.product(
glob.glob("*.genematrix"),
glob.glob("assignments.*"))
if not y.endswith(".log")])
def runGeneListAnalysis(infiles, outfile):
'''run a gene list analysis.'''
genematrix, assignments = infiles
to_cluster = True
try:
options = "--fdr-lambda=%(genelist_analysis_qvalue_lambda)f" % PARAMS
except TypeError:
options = ""
statement = '''
cgat genelist_analysis
--format=matrix
--assignments-tsv-file=%(assignments)s
--fdr
--fdr-method=%(genelist_analysis_qvalue_method)s
--log=%(outfile)s.log
%(options)s
< %(genematrix)s
> %(outfile)s
'''
P.run()
###########################################################################
@transform(runGeneListAnalysis, suffix(".gla"), "_gla.load")
def loadGeneListAnalysis(infile, outfile):
'''load gene list analysis results.'''
table = P.toTable(outfile)
statement = '''
cat < %(infile)s
|cgat csv2db %(csv2db_options)s
--table=%(table)s
--add-index=gene_list
--add-index=pvalue
--add-index=fdr
> %(outfile)s
'''
P.run()
# ############################################################################
# @merge( runGOAnalysesOnAlleles, "alleles_go.load")
# def loadGOs( infile, outfile ):
# '''load go results.'''
# tablename = P.toTable( outfile )
# PGO.loadGOs( infile, outfile, tablename )
# ############################################################################
# @transform( runGOAnalysesOnAlleles, suffix(".go"), "_go.load")
# def loadGO( infile, outfile ):
# '''load go results.'''
# tablename = P.toTable( outfile )
# PGO.loadGO( infile, outfile, tablename )
# ############################################################################
# @transform( runGOAnalysesOnAlleles, suffix(".goslim"), "_goslim.load")
# def loadGOSlim( infile, outfile ):
# '''load goslim results.'''
# tablename = P.toTable( outfile )
# PGO.loadGO( infile, outfile, tablename )
# ############################################################################
# @files( ((loadGOs, "goresults.table"),))
# def mergeGO( infile, outfile ):
# '''merge all GO anlyses.
# * collect all P-Values for all categories and experiments.
# * compute stats on it
# '''
# dbhandle = sqlite3.connect( PARAMS["database_name"] )
# statement = '''SELECT track, geneset, annotationset, category, min(pover,punder)
# FROM alleles_go'''
# cc = dbhandle.cursor()
# data = cc.execute(statement).fetchall()
# pvalues = [ x[4] for x in data ]
# E.info( "analysing %i pvalues" % len(pvalues ))
# fdr = Stats.doFDR( pvalues )
# E.info( "got %i qvalues" % len(fdr.mQValues ))
# for d, qvalue in zip( data, fdr.mQValues ):
# if qvalue > 0.05: continue
# print data, qvalue
# Database.executewait( dbhandle, '''ALTER TABLE %(table)s ADD COLUMN is_coding FLOAT''' % locals())
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
@follows(loadHumanOrthologs,
loadGene2Omim)
def prepare():
pass
@follows(buildEffects, loadEffects, mergeEffects, summarizeEffectsPerGene)
def consequences():
pass
@follows(buildAlleles, loadAlleles,
summarizeAllelesPerTranscript,
summarizeAllelesPerGene,
combineSummaryAllelesPerGene)
def alleles():
pass
@follows(loadPolyphen, loadPolyphenMap, loadPolyphenAnalysis)
def effects():
pass
@follows(loadAnnotations, loadAnnotationsSummary)
def annotations():
pass
@follows(consequences, effects, alleles, annotations)
def full():
pass
@follows(buildQTLWorkspaces,
runGATOnQTLs,
runGATOnQTLsSmall)
def qtl():
pass
@follows(buildGeneMatrixConsequences,
buildGeneMatrixAlleles,
buildGeneMatrixEffects,
runGeneListAnalysis,
loadGeneListAnalysis,
)
def go():
pass
@files([(None, "clone.log"), ])
def clone(infile, outfile):
'''clone a pipeline using symbolic links.'''
src_dir, level = sys.argv[-2:]
if not os.path.exists(src_dir):
raise IOError("directory '%s' does not exist" % src_dir)
if not os.path.exists(os.path.join(src_dir, "pipeline.ini")):
raise IOError("directory '%s' is not a pipeline" % src_dir)
if level in ("data", ):
P.execute("ln -fs %(src_dir)s/*.pileup.* . ")
P.execute("ln -fs %(src_dir)s/genome.* . ")
###################################################################
@merge((alleles, prepare), "genes.views")
def createViewGenes(infile, outfile):
'''create view in database for genes.
This view aggregates all information on a per-gene
basis. There is only a single entry per gene.
'''
# can not create views across multiple database, so use table
view_type = "TABLE"
dbhandle = connect()
Database.executewait(
dbhandle, "DROP %(view_type)s IF EXISTS view_genes" % locals())
knockouts = ",".join(
["nmd.%s AS %s_nmd_knockout" % (track, track) for track in TRACKS])
statement = '''
CREATE %(view_type)s view_genes AS
SELECT i.gene_id AS gene_id,
i.gene_name AS gene_name,
nmd.total AS nmd_knockout_total,
%(knockouts)s,
human_ortho.hs_gene_id AS hs_gene_id,
human_ortho.ds AS hs_ds,
omim.mim_gene_id as omim_gene_id,
omim.mim_morbid_description as omim_description,
omim.mim_morbid_id as omim_morbid_id
FROM annotations.gene_info AS i,
summary_alleles_genes_is_knockout AS nmd ON i.gene_id = nmd.gene_id
LEFT JOIN orthologs AS human_ortho ON
human_ortho.gene_id = i.gene_id AND
human_ortho.orthology_type = "ortholog_one2one"
LEFT JOIN gene2omim as omim ON omim.gene_id = human_ortho.hs_gene_id
'''
Database.executewait(dbhandle, statement % locals())
@follows(createViewGenes)
def views():
pass
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
if __name__ == "__main__":
# P.checkFiles( ("genome.fasta", "genome.idx" ) )
sys.exit(P.main(sys.argv))
| mit | -7,170,703,611,820,490,000 | 31.705549 | 163 | 0.494159 | false |
schaabs/sandbox | net/sandbox.keyvault/python/repl/key_vault_crypto.py | 1 | 1939 | import base64
import datetime
import sys
import argparse
from azure.keyvault.generated.models import KeyVaultErrorException
from python.key_vault_agent import KeyVaultAgent
from azure.keyvault.generated import KeyVaultClient
CLIENT_ID = '8fd4d3c4-efea-49aa-b1de-2c33c22da56e'
class KeyVaultCryptoAgent(KeyVaultAgent):
def __init__(self, client_id):
self._initialize(client_id)
def encrypt(self, f_in, f_out, vault_name, key_name, key_version=None):
vault = self.get_vault(vault_name)
buff = f_in.read()
buff = base64.encodebytes(buff)
buff = buff.replace(b'\n', b'')
try:
buff = self.data_client.encrypt(vault.properties.vault_uri, key_name, key_version or '', 'RSA1_5', buff)
except KeyVaultErrorException as e:
print(str(e))
buff = base64.decodebytes(buff)
f_out.write(buff)
def _parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['encrypt', 'decrypt'], help='specifies whether to encrypt or decrypt the specified "in" file')
parser.add_argument('infile', type=argparse.FileType('rb'), help='specifies the file on which to preform the crypto action')
parser.add_argument('outfile', type=argparse.FileType('wb'), help='specifies the file in which to store the crypto action result')
parser.add_argument('vault', help='the key to use for the crypto action')
parser.add_argument('key', help='the key to use for the crypto action')
return parser.parse_args(argv)
def main(argv):
argv = ['', 'encrypt', 'd:\\temp\\crypto_encrypt_in.txt', 'd:\\temp\\crypto_encrypt_out.txt', 'sdschaab-replkv', 'repl-key1']
args = _parse_args(argv[1:])
crypto_agent = KeyVaultCryptoAgent(CLIENT_ID)
if args.action == 'encrypt':
crypto_agent.encrypt(args.infile, args.outfile, args.vault, args.key)
if __name__ == '__main__':
main(sys.argv)
| mit | 1,837,548,955,547,874,800 | 33.017544 | 137 | 0.680248 | false |
arnarb/greenhousedb | apps/greenhousedb/migrations/0001_initial.py | 1 | 9867 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Clones',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('clone', models.CharField(max_length=20, blank=True)),
('clone_type', models.CharField(max_length=255, blank=True)),
('harvested_date', models.DateField(default=None, null=True, blank=True)),
('notes', models.TextField(blank=True)),
],
options={
'verbose_name_plural': 'Clones',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name=b'Lab PI')),
('address', models.TextField(blank=True)),
('email', models.EmailField(default=b'', max_length=75, blank=True)),
('phone', models.CharField(max_length=255, blank=True)),
('researcher', models.CharField(max_length=255, verbose_name=b'Primary Researcher Name')),
('notes', models.TextField(blank=True)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Distribution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('distributed', models.CharField(blank=True, max_length=20, choices=[(b'Yes', b'Yes'), (b'No', b'No')])),
('distributed_who', models.CharField(max_length=255, blank=True)),
('distributed_date', models.DateField(default=None, null=True, verbose_name=b'Date distributed ', blank=True)),
('notes', models.TextField(blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Genotype',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('gene', models.CharField(max_length=200)),
('gene_type', models.CharField(blank=True, max_length=20, choices=[(b'WT', b'WT'), (b'hemi', b'hemi'), (b'homo', b'homo')])),
('phenotype', models.CharField(max_length=200, null=True, blank=True)),
('date', models.DateField(default=None, null=True, verbose_name=b'Date ', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Plant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('plant_date', models.DateField(verbose_name=b'Date planted ')),
('generation', models.CharField(max_length=20, blank=True)),
('wild_collected', models.CharField(default=b'No', max_length=20, verbose_name=b'Wild Collected', choices=[(b'No', b'No'), (b'Yes', b'Yes')])),
('teaching_collection', models.BooleanField(default=False, verbose_name=b'Teaching Collection')),
('t', models.CharField(blank=True, max_length=20, verbose_name=b'T', choices=[(b'Yes', b'Yes'), (b'No', b'No')])),
('conditions', models.TextField(max_length=255, blank=True)),
('location', models.CharField(blank=True, max_length=255, verbose_name=b'Weld Hill Location ', choices=[(b'GH-1', b'GH-1'), (b'GH-2', b'GH-2'), (b'GH-3', b'GH-3'), (b'GH-4', b'GH-4'), (b'GH-5', b'GH-5'), (b'GH-6', b'GH-6'), (b'GH-7', b'GH-7'), (b'GH-8', b'GH-8'), (b'GH-9', b'GH-9'), (b'GH-10', b'GH-10'), (b'GH-11', b'GH-11'), (b'GH-12', b'GH-12'), (b'In vitro', b'In vitro'), (b'001 Karen', b'001 Karen'), (b'002 Alfa', b'002 Alfa'), (b'003 Henry', b'003 Henry'), (b'004 Jake', b'004 Jake'), (b'005 Josephine', b'005 Josephine'), (b'006 Charlie', b'006 Charlie'), (b'007 Bravo', b'007 Bravo'), (b'008 Beverly', b'008 Beverly'), (b'009 MTPC-144 Left', b'009 MTPC-144 Left'), (b'010 Echo', b'010 Echo'), (b'011 MTPC-144 Right', b'011 MTPC-144 Right'), (b'012 Frances', b'012 Frances'), (b'013 Gary', b'013 Gary')])),
('plant_range', models.CharField(max_length=255, null=True, verbose_name=b'Range', blank=True)),
('harvest_date', models.DateField(default=None, null=True, verbose_name=b'Date harvested ', blank=True)),
('storage', models.CharField(max_length=255, blank=True)),
('terminate_type', models.CharField(default=None, max_length=20, blank=True, choices=[(b'Died', b'Died'), (b'Removed', b'Removed')])),
('unknown_maternal_line', models.BooleanField(default=False, help_text=b'auto-filled on save')),
('unknown_paternal_line', models.BooleanField(default=False, help_text=b'auto-filled on save')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name=b'Date created ')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name=b'Date modified ')),
('clones', models.ManyToManyField(to='greenhousedb.Clones', null=True, blank=True)),
('contact', models.ForeignKey(to='greenhousedb.Contact')),
('distribution', models.ForeignKey(blank=True, to='greenhousedb.Distribution', null=True)),
('family', models.ForeignKey(blank=True, to='taxonomy.Family', null=True)),
('genotypes', models.ManyToManyField(related_name='many_genotypes', null=True, to='greenhousedb.Genotype', blank=True)),
('genus', models.ForeignKey(blank=True, to='taxonomy.Genus', null=True)),
('maternal_line', models.ForeignKey(related_name='fk_maternal_line', verbose_name=b'Maternal ID', blank=True, to='greenhousedb.Plant', null=True)),
('paternal_line', models.ForeignKey(related_name='fk_paternal_line', verbose_name=b'Paternal ID', blank=True, to='greenhousedb.Plant', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlantComment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('notes', models.TextField(blank=True)),
('update_time', models.DateTimeField(auto_now=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('plant', models.ForeignKey(to='greenhousedb.Plant')),
],
options={
'ordering': ('-update_time',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlantSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=255, verbose_name=b'Plant source')),
('germplasm_source', models.CharField(default=None, max_length=255, null=True, verbose_name=b'Germplasm source', blank=True)),
('address', models.CharField(max_length=255, blank=True)),
('fieldcoll', models.CharField(blank=True, max_length=20, verbose_name=b'Field Collected', choices=[(b'Yes', b'Yes'), (b'No', b'No')])),
('notes', models.TextField(blank=True)),
],
options={
'ordering': ('source',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ploidy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ploidy', models.CharField(max_length=20)),
('date', models.DateField(default=None, null=True, verbose_name=b'Date ', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Substrate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('comments', models.TextField(blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='plant',
name='ploidy',
field=models.ForeignKey(blank=True, to='greenhousedb.Ploidy', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='plant',
name='source',
field=models.ForeignKey(blank=True, to='greenhousedb.PlantSource', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='plant',
name='species',
field=models.ForeignKey(to='taxonomy.Species'),
preserve_default=True,
),
migrations.AddField(
model_name='plant',
name='substrate',
field=models.ForeignKey(blank=True, to='greenhousedb.Substrate', null=True),
preserve_default=True,
),
]
| gpl-3.0 | 7,366,565,536,940,805,000 | 54.745763 | 832 | 0.551231 | false |
jakobkolb/MayaSim | mayasim/model/ModelCore.py | 1 | 66303 | from __future__ import print_function
import datetime
import operator
import os
import sys
import traceback
import warnings
from itertools import compress
import networkx as nx
import numpy as np
import pandas
import pkg_resources
import scipy.ndimage as ndimage
import scipy.sparse as sparse
try:
import cPickle as pkl
except ImportError:
import pickle as pkl
if __name__ == "__main__":
from ModelParameters import ModelParameters as Parameters
from f90routines import f90routines
else:
from .f90routines import f90routines
from .ModelParameters import ModelParameters as Parameters
class ModelCore(Parameters):
def __init__(self,
n=30,
output_data_location=None,
debug=False,
output_trajectory=True,
**kwargs):
"""
Instance of the MayaSim model.
Parameters
----------
n: int
number of settlements to initialize,
output_data_location: path_like
string stating the folder path to which the output
files will be writen,
debug: bool
switch for debugging output from model,
output_trajectory: bool
switch for output of trajectory data,
output_settlement_data: bool
switch for output of settlement data,
output_geographic_data: bool
switch for output of geographic data.
"""
# Input/Output settings:
# Set path to static input files
input_data_location = pkg_resources. \
resource_filename('mayasim', 'input_data/')
# Debugging settings
self.debug = debug
# In debug mode, allways print stack for warnings and errors.
def warn_with_traceback(message,
category,
filename,
lineno,
file=None,
line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(
warnings.formatwarning(message, category, filename, lineno,
line))
if self.debug:
warnings.showwarning = warn_with_traceback
# *******************************************************************
# MODEL PARAMETERS (to be varied)
# *******************************************************************
self.output_trajectory = output_trajectory
# Settlement and geographic data will be written to files in each time step,
# Trajectory data will be kept in one data structure to be read out, when
# the model run finished.
if output_data_location != 0:
# remove file ending
self.output_data_location = output_data_location.rsplit('.', 1)[0]
# create callable output paths
self.settlement_output_path = \
lambda i: self.output_data_location + \
f'settlement_data_{i:03d}.pkl'
self.geographic_output_path = \
lambda i: self.output_data_location + \
f'geographic_data_{i:03d}.pkl'
# set switches for output generation
self.output_geographic_data = True
self.output_settlement_data = True
else:
self.output_geographic_data = False
self.output_settlement_data = False
self.trajectory = []
self.traders_trajectory = []
# *******************************************************************
# MODEL DATA SOURCES
# *******************************************************************
# documentation for TEMPERATURE and PRECIPITATION data can be found
# here: http://www.worldclim.org/formats
# apparently temperature data is given in x*10 format to allow for
# smaller file sizes.
# original version of mayasim divides temperature by 12 though
self.temp = np.load(input_data_location +
'0_RES_432x400_temp.npy') / 12.
# precipitation in mm or liters per square meter
# (comparing the numbers to numbers from Wikipedia suggests
# that it is given per year)
self.precip = np.load(input_data_location + '0_RES_432x400_precip.npy')
# in meters above sea level
self.elev = np.load(input_data_location + '0_RES_432x400_elev.npy')
self.slope = np.load(input_data_location + '0_RES_432x400_slope.npy')
# documentation for SOIL PRODUCTIVITY is given at:
# http://www.fao.org/geonetwork/srv/en/
# main.home?uuid=f7a2b3c0-bdbf-11db-a0f6-000d939bc5d8
# The soil production index considers the suitability
# of the best adapted crop to each soils
# condition in an area and makes a weighted average for
# all soils present in a pixel based
# on the formula: 0.9 * VS + 0.6 * S + 0.3 * MS + 0 * NS.
# Values range from 0 (bad) to 6 (good)
self.soilprod = np.load(input_data_location + '0_RES_432x400_soil.npy')
# it also sets soil productivity to 1.5 where the elevation is <= 1
# self.soilprod[self.elev <= 1] = 1.5
# complains because there is nans in elev
for ind, x in np.ndenumerate(self.elev):
if not np.isnan(x):
if x <= 1.:
self.soilprod[ind] = 1.5
# smoothen soil productivity dataset
self.soilprod = ndimage.gaussian_filter(self.soilprod,
sigma=(2, 2),
order=0)
# and set to zero for non land cells
self.soilprod[np.isnan(self.elev)] = 0
# *******************************************************************
# MODEL MAP INITIALIZATION
# *******************************************************************
# dimensions of the map
self.rows, self.columns = self.precip.shape
self.height, self.width = 914., 840. # height and width in km
self.pixel_dim = self.width / self.columns
self.cell_width = self.width / self.columns
self.cell_height = self.height / self.rows
self.land_patches = np.asarray(np.where(np.isfinite(self.elev)))
self.number_of_land_patches = self.land_patches.shape[1]
# lengh unit - total map is about 500 km wide
self.area = 516484. / len(self.land_patches[0])
self.elev[:, 0] = np.inf
self.elev[:, -1] = np.inf
self.elev[0, :] = np.inf
self.elev[-1, :] = np.inf
# create a list of the index values i = (x, y) of the land
# patches with finite elevation h
self.list_of_land_patches = [
i for i, h in np.ndenumerate(self.elev)
if np.isfinite(self.elev[i])
]
# initialize soil degradation and population
# gradient (influencing the forest)
# *******************************************************************
# INITIALIZE ECOSYSTEM
# *******************************************************************
# Soil (influencing primary production and agricultural productivity)
self.soil_deg = np.zeros((self.rows, self.columns))
# Forest
self.forest_state = np.ones((self.rows, self.columns), dtype=int)
self.forest_state[np.isnan(self.elev)] = 0
self.forest_memory = np.zeros((self.rows, self.columns), dtype=int)
self.cleared_land_neighbours = np.zeros((self.rows, self.columns),
dtype=int)
# The forest has three states: 3=climax forest,
# 2=secondary regrowth, 1=cleared land.
for i in self.list_of_land_patches:
self.forest_state[i] = 3
# Variables describing total amount of water and water flow
self.water = np.zeros((self.rows, self.columns))
self.flow = np.zeros((self.rows, self.columns))
self.spaciotemporal_precipitation = np.zeros((self.rows, self.columns))
# initialize the trajectories of the water drops
self.x = np.zeros((self.rows, self.columns), dtype="int")
self.y = np.zeros((self.rows, self.columns), dtype="int")
# define relative coordinates of the neighbourhood of a cell
self.neighbourhood = [(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]]
self.f90neighbourhood = np.asarray(self.neighbourhood).T
# *******************************************************************
# INITIALIZE SOCIETY
# *******************************************************************
# Population gradient (influencing the forest)
self.pop_gradient = np.zeros((self.rows, self.columns))
self.number_settlements = n
# distribute specified number of settlements on the map
self.settlement_positions = self.land_patches[:,
np.random.choice(
len(self.
land_patches[1]),
n).astype('int')]
self.age = [0] * n
# demographic variables
self.birth_rate = [self.birth_rate_parameter] * n
self.death_rate = [0.1 + 0.05 * r for r in list(np.random.random(n))]
self.population = list(
np.random.randint(self.min_init_inhabitants,
self.max_init_inhabitants, n).astype(float))
self.mig_rate = [0.] * n
self.out_mig = [0] * n
self.migrants = [0] * n
self.pioneer_set = []
self.failed = 0
# index list for populated and abandoned cities
# used until removal of dead cities is implemented.
self.populated_cities = range(n)
self.dead_cities = []
# agricultural influence
self.number_cells_in_influence = [0] * n
self.area_of_influence = [0.] * n
self.coordinates = np.indices((self.rows, self.columns))
self.cells_in_influence = [None] * n # will be a list of arrays
self.cropped_cells = [None] * n
# for now, cropped cells are only the city positions.
# first cropped cells are added at the first call of
# get_cropped_cells()
for city in self.populated_cities:
self.cropped_cells[city] = [[self.settlement_positions[0, city]],
[self.settlement_positions[1, city]]]
# print(self.cropped_cells[1])
self.occupied_cells = np.zeros((self.rows, self.columns))
self.number_cropped_cells = [0] * n
self.crop_yield = [0.] * n
self.eco_benefit = [0.] * n
self.available = 0
# details of income from ecosystems services
self.s_es_ag = [0.] * n
self.s_es_wf = [0.] * n
self.s_es_fs = [0.] * n
self.s_es_sp = [0.] * n
self.s_es_pg = [0.] * n
self.es_ag = np.zeros((self.rows, self.columns), dtype=float)
self.es_wf = np.zeros((self.rows, self.columns), dtype=float)
self.es_fs = np.zeros((self.rows, self.columns), dtype=float)
self.es_sp = np.zeros((self.rows, self.columns), dtype=float)
self.es_pg = np.zeros((self.rows, self.columns), dtype=float)
# Trade Variables
self.adjacency = np.zeros((n, n))
self.rank = [0] * n
self.degree = [0] * n
self.comp_size = [0] * n
self.centrality = [0] * n
self.trade_income = [0] * n
self.max_cluster_size = 0
# total real income per capita
self.real_income_pc = [0] * n
def _get_run_variables(self):
"""
Saves all variables and values of the class instance 'self'
in a dictionary file at the location given by 'path'
Parameters:
-----------
self: class instance
class instance whose variables are saved
"""
dictionary = {
attr: getattr(self, attr)
for attr in dir(self)
if not attr.startswith('__') and not callable(getattr(self, attr))
}
return dictionary
def update_precipitation(self, t):
"""
Modulates the initial precip dataset with a 24 timestep period.
Returns a field of rainfall values for each cell.
If veg_rainfall > 0, cleared_land_neighbours decreases rain.
TO DO: The original Model increases specialization every time
rainfall decreases, assuming that trade gets more important to
compensate for agriculture decline
"""
if self.precipitation_modulation:
self.spaciotemporal_precipitation = \
self.precip * (
1 + self.precipitation_amplitude *
self.precipitation_variation[
(np.ceil(t / self.climate_var) % 8).astype(int)]) \
- self.veg_rainfall * self.cleared_land_neighbours
else:
self.spaciotemporal_precipitation = \
self.precip * (1 -
self.veg_rainfall * self.cleared_land_neighbours)
# check if system time is in drought period
drought = False
for drought_time in self.drought_times:
if drought_time[0] < t <= drought_time[1]:
drought = True
# if so, decrease precipitation by factor percentage given by
# drought severity
if drought:
self.spaciotemporal_precipitation *= \
(1. - self.drought_severity / 100.)
def get_waterflow(self):
"""
waterflow: takes rain as an argument, uses elev, returns
water flow distribution
the precip percent parameter that reduces the amount of raindrops that
have to be moved.
Thereby inceases performance.
f90waterflow takes as arguments:
list of coordinates of land cells (2xN_land)
elevation map in (height x width)
rain_volume per cell map in (height x width)
rain_volume and elevation must have same units: height per cell
neighbourhood offsets
height and width of map as integers,
Number of land cells, N_land
"""
# convert precipitation from mm to meters
# NOTE: I think, this should be 1e-3
# to convert from mm to meters though...
# but 1e-5 is what they do in the original version.
rain_volume = np.nan_to_num(self.spaciotemporal_precipitation * 1e-5)
max_x, max_y = self.rows, self.columns
err, self.flow, self.water = \
f90routines.f90waterflow(self.land_patches,
self.elev,
rain_volume,
self.f90neighbourhood,
max_x,
max_y,
self.number_of_land_patches)
return self.water, self.flow
def forest_evolve(self, npp):
npp_mean = np.nanmean(npp)
# Iterate over all cells repeatedly and regenerate or degenerate
for repeat in range(4):
for i in self.list_of_land_patches:
if not np.isnan(self.elev[i]):
# Forest regenerates faster [slower] (linearly),
# if net primary productivity on the patch
# is above [below] average.
threshold = npp_mean / npp[i]
# Degradation:
# Decrement with probability 0.003
# if there is a settlement around,
# degrade with higher probability
probdec = self.natprobdec * (2 * self.pop_gradient[i] + 1)
if np.random.random() <= probdec:
if self.forest_state[i] == 3:
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
elif self.forest_state[i] == 2:
self.forest_state[i] = 1
self.forest_memory[i] = 0
# Regeneration:"
# recover if tree = 1 and memory > threshold 1
if (self.forest_state[i] == 1 and self.forest_memory[i] >
self.state_change_s2 * threshold):
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
# recover if tree = 2 and memory > threshold 2
# and certain number of neighbours are
# climax forest as well
if (self.forest_state[i] == 2 and self.forest_memory[i] >
self.state_change_s3 * threshold):
state_3_neighbours = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 3)
if state_3_neighbours > \
self.min_number_of_s3_neighbours:
self.forest_state[i] = 3
# finally, increase memory by one
self.forest_memory[i] += 1
# calculate cleared land neighbours for output:
if self.veg_rainfall > 0:
for i in self.list_of_land_patches:
self.cleared_land_neighbours[i] = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 1)
assert not np.any(self.forest_state[~np.isnan(self.elev)] < 1), \
'forest state is smaller than 1 somewhere'
return
def net_primary_prod(self):
"""
net_primaty_prod is the minimum of a quantity
derived from local temperature and rain
Why is it rain and not 'surface water'
according to the waterflow model?
"""
# EQUATION ############################################################
npp = 3000 \
* np.minimum(1 - np.exp(-6.64e-4
* self.spaciotemporal_precipitation),
1. / (1 + np.exp(1.315 - (0.119 * self.temp))))
# EQUATION ############################################################
return npp
def get_ag(self, npp, wf):
"""
agricultural productivit is calculated via a
linear additive model from
net primary productivity, soil productivity,
slope, waterflow and soil degradation
of each patch.
"""
# EQUATION ############################################################
return self.a_npp * npp + self.a_sp * self.soilprod \
- self.a_s * self.slope - self.a_wf * wf - self.soil_deg
# EQUATION ############################################################
def get_ecoserv(self, ag, wf):
"""
Ecosystem Services are calculated via a linear
additive model from agricultural productivity (ag),
waterflow through the cell (wf) and forest
state on the cell (forest) \in [1,3],
The recent version of mayasim limits value of
ecosystem services to 1 < ecoserv < 250, it also proposes
to include population density (pop_gradient) and precipitation (rain)
"""
# EQUATION ###########################################################
if not self.better_ess:
self.es_ag = self.e_ag * ag
self.es_wf = self.e_wf * wf
self.es_fs = self.e_f * (self.forest_state - 1.)
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
else:
# change to use forest as proxy for income from agricultural
# productivity. Multiply by 2 to get same per cell levels as
# before
self.es_ag = np.zeros(np.shape(ag))
self.es_wf = self.e_wf * wf
self.es_fs = 2. * self.e_ag * (self.forest_state - 1.) * ag
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
return (self.es_ag + self.es_wf + self.es_fs + self.es_sp - self.es_pg)
# EQUATION ###########################################################
######################################################################
# The Society
######################################################################
def benefit_cost(self, ag_in):
# Benefit cost assessment
return (self.max_yield *
(1 - self.origin_shift * np.exp(-self.slope_yield * ag_in)))
def get_cells_in_influence(self):
"""
creates a list of cells for each city that are under its influence.
these are the cells that are closer than population^0.8/60 (which is
not explained any further... change denominator to 80 and max value to
30 from eyeballing the results
"""
# EQUATION ####################################################################
self.area_of_influence = [(x**0.8) / 60. for x in self.population]
self.area_of_influence = [
value if value < 40. else 40. for value in self.area_of_influence
]
# EQUATION ####################################################################
for city in self.populated_cities:
distance = np.sqrt((self.cell_width *
(self.settlement_positions[0][city] -
self.coordinates[0]))**2 +
(self.cell_height *
(self.settlement_positions[1][city] -
self.coordinates[1]))**2)
stencil = distance <= self.area_of_influence[city]
self.cells_in_influence[city] = self.coordinates[:, stencil]
self.number_cells_in_influence = [
len(x[0]) for x in self.cells_in_influence
]
return
def get_cropped_cells(self, bca):
"""
Updates the cropped cells for each city with positive population.
Calculates the utility for each cell (depending on distance from
the respective city) If population per cropped cell is lower then
min_people_per_cropped_cell, cells are abandoned.
Cells with negative utility are also abandoned.
If population per cropped cell is higher than
max_people_per_cropped_cell, new cells are cropped.
Newly cropped cells are chosen such that they have highest utility
"""
abandoned = 0
sown = 0
# for each settlement: how many cells are currently cropped ?
self.number_cropped_cells = np.array(
[len(x[0]) for x in self.cropped_cells])
# agricultural population density (people per cropped land)
# determines the number of cells that can be cropped.
ag_pop_density = [
p / (self.number_cropped_cells[c] * self.area)
if self.number_cropped_cells[c] > 0 else 0.
for c, p in enumerate(self.population)
]
# occupied_cells is a mask of all occupied cells calculated as the
# unification of the cropped cells of all settlements.
if len(self.cropped_cells) > 0:
occup = np.concatenate(self.cropped_cells, axis=1).astype('int')
if False:
print('population of cities without agriculture:')
print(
np.array(self.population)[self.number_cropped_cells == 0])
print('pt. migration from cities without agriculture:')
print(np.array(self.out_mig)[self.number_cropped_cells == 0])
print('out migration from cities without agriculture:')
print(np.array(self.migrants)[self.number_cropped_cells == 0])
for index in range(len(occup[0])):
self.occupied_cells[occup[0, index], occup[1, index]] = 1
# the age of settlements is increased here.
self.age = [x + 1 for x in self.age]
# for each settlement: which cells to crop ?
# calculate utility first! This can be accelerated, if calculations
# are only done in 40 km radius.
for city in self.populated_cities:
cells = list(
zip(self.cells_in_influence[city][0],
self.cells_in_influence[city][1]))
# EQUATION ########################################################
utility = [
bca[x, y] - self.estab_cost - (self.ag_travel_cost * np.sqrt(
(self.cell_width * (self.settlement_positions[0][city] -
self.coordinates[0][x, y]))**2 +
(self.cell_height * (self.settlement_positions[1][city] -
self.coordinates[1][x, y]))**2)) /
np.sqrt(self.population[city]) for (x, y) in cells
]
# EQUATION ########################################################
available = [
True if self.occupied_cells[x, y] == 0 else False
for (x, y) in cells
]
# jointly sort utilities, availability and cells such that cells
# with highest utility are first.
sorted_utility, sorted_available, sorted_cells = \
list(zip(*sorted(list(zip(utility, available, cells)),
reverse=True)))
# of these sorted lists, sort filter only available cells
available_util = list(
compress(list(sorted_utility), list(sorted_available)))
available_cells = list(
compress(list(sorted_cells), list(sorted_available)))
# save local copy of all cropped cells
cropped_cells = list(zip(*self.cropped_cells[city]))
# select utilities for these cropped cells
cropped_utils = [
utility[cells.index(cell)] if cell in cells else -1
for cell in cropped_cells
]
# sort utilitites and cropped cells to lowest utilities first
city_has_crops = True if len(cropped_cells) > 0 else False
if city_has_crops:
occupied_util, occupied_cells = \
zip(*sorted(list(zip(cropped_utils, cropped_cells))))
# 1.) include new cells if population exceeds a threshold
# calculate number of new cells to crop
number_of_new_cells = np.floor(ag_pop_density[city]
/ self.max_people_per_cropped_cell) \
.astype('int')
# and crop them by selecting cells with positive utility from the
# beginning of the list
for n in range(min([number_of_new_cells, len(available_util)])):
if available_util[n] > 0:
self.occupied_cells[available_cells[n]] = 1
for dim in range(2):
self.cropped_cells[city][dim] \
.append(available_cells[n][dim])
if city_has_crops:
# 2.) abandon cells if population too low
# after cities age > 5 years
if (ag_pop_density[city] < self.min_people_per_cropped_cell
and self.age[city] > 5):
# There are some inconsistencies here. Cells are abandoned,
# if the 'people per cropped land' is lower then a
# threshold for 'people per cropped cells. Then the
# number of cells to abandon is calculated as 30/people
# per cropped land. Why?! (check the original version!)
number_of_lost_cells = np.ceil(
30 / ag_pop_density[city]).astype('int')
# TO DO: recycle utility and cell list to do this faster.
# therefore, filter cropped cells from utility list
# and delete last n cells.
for n in range(
min([number_of_lost_cells,
len(occupied_cells)])):
dropped_cell = occupied_cells[n]
self.occupied_cells[dropped_cell] = 0
for dim in range(2):
self.cropped_cells[city][dim] \
.remove(dropped_cell[dim])
abandoned += 1
# 3.) abandon cells with utility <= 0
# find cells that have negative utility and belong
# to city under consideration,
useless_cropped_cells = [
occupied_cells[i] for i in range(len(occupied_cells))
if occupied_util[i] < 0
and occupied_cells[i] in zip(*self.cropped_cells[city])
]
# and release them.
for useless_cropped_cell in useless_cropped_cells:
self.occupied_cells[useless_cropped_cell] = 0
for dim in range(2):
try:
self.cropped_cells[city][dim] \
.remove(useless_cropped_cell[dim])
except ValueError:
print('ERROR: Useless cell gone already')
abandoned += 1
# Finally, update list of lists containing cropped cells for each city
# with positive population.
self.number_cropped_cells = [
len(self.cropped_cells[city][0])
for city in range(len(self.population))
]
return abandoned, sown
def get_pop_mig(self):
# gives population and out-migration
# print("number of settlements", len(self.population))
# death rate correlates inversely with real income per capita
death_rate_diff = self.max_death_rate - self.min_death_rate
self.death_rate = [
-death_rate_diff * self.real_income_pc[i] + self.max_death_rate
for i in range(len(self.real_income_pc))
]
self.death_rate = list(
np.clip(self.death_rate, self.min_death_rate, self.max_death_rate))
# if population control,
# birth rate negatively correlates with population size
if self.population_control:
birth_rate_diff = self.max_birth_rate - self.min_birth_rate
self.birth_rate = [
-birth_rate_diff / 10000. * value +
self.shift if value > 5000 else self.birth_rate_parameter
for value in self.population
]
# population grows according to effective growth rate
self.population = [
int((1. + self.birth_rate[i] - self.death_rate[i]) * value)
for i, value in enumerate(self.population)
]
self.population = [
value if value > 0 else 0 for value in self.population
]
mig_rate_diffe = self.max_mig_rate - self.min_mig_rate
# outmigration rate also correlates
# inversely with real income per capita
self.mig_rate = [
-mig_rate_diffe * self.real_income_pc[i] + self.max_mig_rate
for i in range(len(self.real_income_pc))
]
self.mig_rate = list(
np.clip(self.mig_rate, self.min_mig_rate, self.max_mig_rate))
self.out_mig = [
int(self.mig_rate[i] * self.population[i])
for i in range(len(self.population))
]
self.out_mig = [value if value > 0 else 0 for value in self.out_mig]
return
# impact of sociosphere on ecosphere
def update_pop_gradient(self):
# pop gradient quantifies the disturbance of the forest by population
self.pop_gradient = np.zeros((self.rows, self.columns))
for city in self.populated_cities:
distance = np.sqrt(self.area * (
(self.settlement_positions[0][city] - self.coordinates[0])**2 +
(self.settlement_positions[1][city] - self.coordinates[1])**2))
# EQUATION ###################################################################
self.pop_gradient[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]] += \
self.population[city] \
/ (300 * (1 + distance[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]]))
# EQUATION ###################################################################
self.pop_gradient[self.pop_gradient > 15] = 15
def evolve_soil_deg(self):
# soil degrades for cropped cells
cropped = np.concatenate(self.cropped_cells, axis=1).astype('int')
self.soil_deg[cropped[0], cropped[1]] += self.deg_rate
self.soil_deg[self.forest_state == 3] -= self.reg_rate
self.soil_deg[self.soil_deg < 0] = 0
def get_rank(self):
# depending on population ranks are assigned
# attention: ranks are reverted with respect to Netlogo MayaSim !
# 1 => 3 ; 2 => 2 ; 3 => 1
self.rank = [
3
if value > self.thresh_rank_3 else 2 if value > self.thresh_rank_2
else 1 if value > self.thresh_rank_1 else 0
for index, value in enumerate(self.population)
]
return
@property
def build_routes(self):
adj = self.adjacency.copy()
adj[adj == -1] = 0
built_links = 0
lost_links = 0
g = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())
self.degree = g.out_degree()
# cities with rank>0 are traders and establish links to neighbours
for city in self.populated_cities:
if self.degree[city] < self.rank[city]:
distances = \
(np.sqrt(self.area * (+ (self.settlement_positions[0][city]
- self.settlement_positions[0]) ** 2
+ (self.settlement_positions[1][city]
- self.settlement_positions[1]) ** 2
)))
if self.rank[city] == 3:
treshold = 31. * (
self.thresh_rank_3 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 2:
treshold = 31. * (
self.thresh_rank_2 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 1:
treshold = 31. * (
self.thresh_rank_1 / self.thresh_rank_3 * 0.5 + 1.)
else:
treshold = 0
# don't chose yourself as nearest neighbor
distances[city] = 2 * treshold
# collect close enough neighbors and omit those that are
# already connected.
a = distances <= treshold
b = self.adjacency[city] == 0
nearby = np.array(list(map(operator.and_, a, b)))
# if there are traders nearby,
# connect to the one with highest population
if sum(nearby) != 0:
try:
new_partner = np.nanargmax(self.population * nearby)
self.adjacency[city, new_partner] = 1
self.adjacency[new_partner, city] = -1
built_links += 1
except ValueError:
print('ERROR in new partner')
print(np.shape(self.population),
np.shape(self.settlement_positions[0]))
sys.exit(-1)
# cities who cant maintain their trade links, loose them:
elif self.degree[city] > self.rank[city]:
# get neighbors of node
neighbors = g.successors(city)
# find smallest of neighbors
smallest_neighbor = self.population.index(
min([self.population[nb] for nb in neighbors]))
# cut link with him
self.adjacency[city, smallest_neighbor] = 0
self.adjacency[smallest_neighbor, city] = 0
lost_links += 1
return (built_links, lost_links)
def get_comps(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_comp_size, tmp_degree = \
f90routines.f90sparsecomponents(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.comp_size, self.degree = list(tmp_comp_size), list(tmp_degree)
elif l_a == 0:
self.comp_size, self.degree = [0] * (l_ic - 1), [0] * (l_ic - 1)
return
def get_centrality(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# print('number of trade links:', sum(a) / 2)
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_centrality = f90routines \
.f90sparsecentrality(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.centrality = list(tmp_centrality)
elif l_a == 0:
self.centrality = [1] * (l_ic - 1)
return
def get_crop_income(self, bca):
# agricultural benefit of cropping
for city in self.populated_cities:
crops = bca[self.cropped_cells[city][0], self.
cropped_cells[city][1]]
# EQUATION #
if self.crop_income_mode == "mean":
self.crop_yield[city] = self.r_bca_mean \
* np.nanmean(crops[crops > 0])
elif self.crop_income_mode == "sum":
self.crop_yield[city] = self.r_bca_sum \
* np.nansum(crops[crops > 0])
self.crop_yield = [
0 if np.isnan(self.crop_yield[index]) else self.crop_yield[index]
for index in range(len(self.crop_yield))
]
return
def get_eco_income(self, es):
# benefit from ecosystem services of cells in influence
# ##EQUATION###################################################################
for city in self.populated_cities:
if self.eco_income_mode == "mean":
self.eco_benefit[city] = self.r_es_mean \
* np.nanmean(es[self.cells_in_influence[city]])
elif self.eco_income_mode == "sum":
self.eco_benefit[city] = self.r_es_sum \
* np.nansum(es[self.cells_in_influence[city]])
self.s_es_ag[city] = self.r_es_sum \
* np.nansum(self.es_ag[self.cells_in_influence[city]])
self.s_es_wf[city] = self.r_es_sum \
* np.nansum(self.es_wf[self.cells_in_influence[city]])
self.s_es_fs[city] = self.r_es_sum \
* np.nansum(self.es_fs[self.cells_in_influence[city]])
self.s_es_sp[city] = self.r_es_sum \
* np.nansum(self.es_sp[self.cells_in_influence[city]])
self.s_es_pg[city] = self.r_es_sum \
* np.nansum(self.es_pg[self.cells_in_influence[city]])
try:
self.eco_benefit[self.population == 0] = 0
except IndexError:
self.print_variable_lengths()
# ##EQUATION###################################################################
return
def get_trade_income(self):
# ##EQUATION###################################################################
self.trade_income = [
1. / 30. * (1 + self.comp_size[i] / self.centrality[i])**0.9
for i in range(len(self.centrality))
]
self.trade_income = [
self.r_trade if value > 1 else 0 if
(value < 0 or self.degree[index] == 0) else self.r_trade * value
for index, value in enumerate(self.trade_income)
]
# ##EQUATION###################################################################
return
def get_real_income_pc(self):
# combine agricultural, ecosystem service and trade benefit
# EQUATION #
self.real_income_pc = [
(self.crop_yield[index] + self.eco_benefit[index] +
self.trade_income[index]) /
self.population[index] if value > 0 else 0
for index, value in enumerate(self.population)
]
return
def migration(self, es):
# if outmigration rate exceeds threshold, found new settlement
self.migrants = [0] * self.number_settlements
new_settlements = 0
vacant_lands = np.isfinite(es)
influenced_cells = np.concatenate(self.cells_in_influence, axis=1)
vacant_lands[influenced_cells[0], influenced_cells[1]] = 0
vacant_lands = np.asarray(np.where(vacant_lands == 1))
for city in self.populated_cities:
rd = np.random.rand()
if (self.out_mig[city] > 400 and len(vacant_lands[0]) > 0
and np.random.rand() <= 0.5):
mig_pop = self.out_mig[city]
self.migrants[city] = mig_pop
self.population[city] -= mig_pop
self.pioneer_set = \
vacant_lands[:, np.random.choice(len(vacant_lands[0]), 75)]
travel_cost = np.sqrt(
self.area *
((self.settlement_positions[0][city] - self.coordinates[0])
**2 + (self.settlement_positions[1][city] -
self.coordinates[1])**2))
utility = self.mig_ES_pref * es \
+ self.mig_TC_pref * travel_cost
utofpio = utility[self.pioneer_set[0], self.pioneer_set[1]]
new_loc = self.pioneer_set[:, np.nanargmax(utofpio)]
neighbours = \
(np.sqrt(self.area * ((new_loc[0]
- self.settlement_positions[0]) ** 2 +
(new_loc[1]
- self.settlement_positions[1]) ** 2
))) <= 7.5
summe = np.sum(neighbours)
if summe == 0:
self.spawn_city(new_loc[0], new_loc[1], mig_pop)
index = (vacant_lands[0, :] == new_loc[0]) \
& (vacant_lands[1, :] == new_loc[1])
np.delete(vacant_lands, int(np.where(index)[0]), 1)
new_settlements += 1
return new_settlements
def kill_cities(self):
# BUG: cities can be added twice,
# if they have neither population nor cropped cells.
# this might lead to unexpected consequences. see what happenes,
# when after adding all cities, only unique ones are kept
killed_cities = 0
# kill cities if they have either no crops or no inhabitants:
dead_city_indices = [
i for i in range(len(self.population))
if self.population[i] <= self.min_city_size
]
if self.kill_cities_without_crops:
dead_city_indices += [
i for i in range(len(self.population))
if (len(self.cropped_cells[i][0]) <= 0)
]
# the following expression only keeps the unique entries.
# might solve the problem.
dead_city_indices = list(set(dead_city_indices))
# remove entries from variables
# simple lists that can be deleted elementwise
for index in sorted(dead_city_indices, reverse=True):
self.number_settlements -= 1
self.failed += 1
del self.age[index]
del self.birth_rate[index]
del self.death_rate[index]
del self.population[index]
del self.mig_rate[index]
del self.out_mig[index]
del self.number_cells_in_influence[index]
del self.area_of_influence[index]
del self.number_cropped_cells[index]
del self.crop_yield[index]
del self.eco_benefit[index]
del self.rank[index]
del self.degree[index]
del self.comp_size[index]
del self.centrality[index]
del self.trade_income[index]
del self.real_income_pc[index]
del self.cells_in_influence[index]
del self.cropped_cells[index]
del self.s_es_ag[index]
del self.s_es_wf[index]
del self.s_es_fs[index]
del self.s_es_sp[index]
del self.s_es_pg[index]
del self.migrants[index]
killed_cities += 1
# special cases:
self.settlement_positions = \
np.delete(self.settlement_positions,
dead_city_indices, axis=1)
self.adjacency = \
np.delete(np.delete(self.adjacency,
dead_city_indices, axis=0),
dead_city_indices, axis=1)
# update list of indices for populated and dead cities
# a) update list of populated cities
self.populated_cities = [
index for index, value in enumerate(self.population) if value > 0
]
# b) update list of dead cities
self.dead_cities = [
index for index, value in enumerate(self.population) if value == 0
]
return killed_cities
def spawn_city(self, x, y, mig_pop):
"""
Spawn a new city at given location with
given population and append it to all necessary lists.
Parameters
----------
x: int
x location of new city on map
y: int
y location of new city on map
mig_pop: int
initial population of new city
"""
# extend all variables to include new city
self.number_settlements += 1
self.settlement_positions = np.append(self.settlement_positions,
[[x], [y]], 1)
self.cells_in_influence.append([[x], [y]])
self.cropped_cells.append([[x], [y]])
n = len(self.adjacency)
self.adjacency = np.append(self.adjacency, [[0] * n], 0)
self.adjacency = np.append(self.adjacency, [[0]] * (n + 1), 1)
self.age.append(0)
self.birth_rate.append(self.birth_rate_parameter)
self.death_rate.append(0.1 + 0.05 * np.random.rand())
self.population.append(mig_pop)
self.mig_rate.append(0)
self.out_mig.append(0)
self.number_cells_in_influence.append(0)
self.area_of_influence.append(0)
self.number_cropped_cells.append(1)
self.crop_yield.append(0)
self.eco_benefit.append(0)
self.rank.append(0)
self.degree.append(0)
self.trade_income.append(0)
self.real_income_pc.append(0)
self.s_es_ag.append(0)
self.s_es_wf.append(0)
self.s_es_fs.append(0)
self.s_es_sp.append(0)
self.s_es_pg.append(0)
self.migrants.append(0)
def run(self, t_max=1):
"""
Run the model for a given number of steps.
If no number of steps is given, the model is integrated for one step
Parameters
----------
t_max: int
number of steps to integrate the model
"""
# initialize time step
t = 0
# print update about output state
if self.debug:
print('output of settlement and geodata is {} and {}'.format(
self.output_settlement_data, self.output_geographic_data))
# initialize variables
# net primary productivity
npp = np.zeros((self.rows, self.columns))
# water flow
if self.debug and t == 0:
wf = np.zeros((self.rows, self.columns))
elif not self.debug:
wf = np.zeros((self.rows, self.columns))
else:
pass
# agricultural productivity
ag = np.zeros((self.rows, self.columns))
# ecosystem services
es = np.zeros((self.rows, self.columns))
# benefit cost map for agriculture
bca = np.zeros((self.rows, self.columns))
self.init_output()
while t <= t_max:
t += 1
if self.debug:
print(f"time = {t}, population = {sum(self.population)}")
# evolve subselfs
# ecosystem
self.update_precipitation(t)
npp = self.net_primary_prod()
self.forest_evolve(npp)
# this is curious: only waterflow is used,
# water level is abandoned.
wf = self.get_waterflow()[1]
ag = self.get_ag(npp, wf)
es = self.get_ecoserv(ag, wf)
bca = self.benefit_cost(ag)
# society
if len(self.population) > 0:
self.get_cells_in_influence()
abandoned, sown = self.get_cropped_cells(bca)
self.get_crop_income(bca)
self.get_eco_income(es)
self.evolve_soil_deg()
self.update_pop_gradient()
self.get_rank()
(built, lost) = self.build_routes
self.get_comps()
self.get_centrality()
self.get_trade_income()
self.get_real_income_pc()
self.get_pop_mig()
new_settlements = self.migration(es)
killed_settlements = self.kill_cities()
else:
abandoned = sown = cl = 0
self.step_output(t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements)
def init_output(self):
"""initializes data output for trajectory, settlements and geography depending on settings"""
if self.output_trajectory:
self.init_trajectory_output()
self.init_traders_trajectory_output()
if self.output_geographic_data or self.output_settlement_data:
# If output data location is needed and does not exist, create it.
if not os.path.exists(self.output_data_location):
os.makedirs(self.output_data_location)
if not self.output_data_location.endswith('/'):
self.output_data_location += '/'
if self.output_settlement_data:
settlement_init_data = {'shape': (self.rows, self.columns)}
with open(self.settlement_output_path(0), 'wb') as f:
pkl.dump(settlement_init_data, f)
if self.output_geographic_data:
pass
def step_output(self, t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements):
"""
call different data saving routines depending on settings.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
built : int
number of trade links built in this timestep
lost : int
number of trade links lost in this timestep
new_settlements : int
number of new settlements that were spawned during the preceeding
timestep
killed_settlements : int
number of settlements that were killed during the preceeding
timestep
"""
# append stuff to trajectory
if self.output_trajectory:
self.update_trajectory_output(t, [npp, wf, ag, es, bca], built,
lost, new_settlements,
killed_settlements)
self.update_traders_trajectory_output(t)
# save maps of spatial data
if self.output_geographic_data:
self.save_geographic_output(t, npp, wf, ag, es, bca, abandoned,
sown)
# save data on settlement basis
if self.output_settlement_data:
self.save_settlement_output(t)
def save_settlement_output(self, t):
"""
Organize settlement based data in Pandas Dataframe
and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
"""
colums = [
'population', 'real income', 'ag income', 'es income',
'trade income', 'x position', 'y position', 'out migration',
'degree'
]
data = [
self.population, self.real_income_pc, self.crop_yield,
self.eco_benefit, self.trade_income,
list(self.settlement_positions[0]),
list(self.settlement_positions[1]), self.migrants,
[self.degree[city] for city in self.populated_cities]
]
data = list(map(list, zip(*data)))
data_frame = pandas.DataFrame(columns=colums, data=data)
with open(self.settlement_output_path(t), 'wb') as f:
pkl.dump(data_frame, f)
def save_geographic_output(self, t, npp, wf, ag, es, bca, abandoned, sown):
"""
Organize Geographic data in dictionary (for separate layers
of data) and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
"""
tmpforest = self.forest_state.copy()
tmpforest[np.isnan(self.elev)] = 0
data = {
'forest': tmpforest,
'waterflow': wf,
'cells in influence': self.cells_in_influence,
'number of cells in influence': self.number_cells_in_influence,
'cropped cells': self.cropped_cells,
'number of cropped cells': self.number_cropped_cells,
'abandoned sown': np.array([abandoned, sown]),
'soil degradation': self.soil_deg,
'population gradient': self.pop_gradient,
'adjacency': self.adjacency,
'x positions': list(self.settlement_positions[0]),
'y positions': list(self.settlement_positions[1]),
'population': self.population,
'elev': self.elev,
'rank': self.rank
}
with open(self.geographic_output_path(t), 'wb') as f:
pkl.dump(data, f)
def init_trajectory_output(self):
self.trajectory.append([
'time', 'total_population', 'max_settlement_population',
'total_migrants', 'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'mean_cluster_size', 'max_cluster_size', 'new_settlements',
'killed_settlements', 'built_trade_links', 'lost_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'mean_soil_degradation',
'forest_state_3_cells', 'forest_state_2_cells',
'forest_state_1_cells', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density', 'MAP', 'max_npp', 'mean_waterflow',
'max_AG', 'max_ES', 'max_bca', 'max_soil_deg', 'max_pop_grad'
])
def init_traders_trajectory_output(self):
self.traders_trajectory.append([
'time', 'total_population', 'total_migrants', 'total_traders',
'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density'
])
def update_trajectory_output(self, time, args, built, lost,
new_settlements, killed_settlements):
# args = [npp, wf, ag, es, bca]
total_population = sum(self.population)
try:
max_population = np.nanmax(self.population)
except:
max_population = float('nan')
total_migrangs = sum(self.migrants)
total_settlements = len(self.population)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum(self.crop_yield)
income_ecosystem = sum(self.eco_benefit)
income_trade = sum(self.trade_income)
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = float(sum(self.comp_size)) / number_of_components \
if number_of_components > 0 else 0
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
self.max_cluster_size = max_cluster_size
total_agriculture_cells = sum(self.number_cropped_cells)
total_cells_in_influence = sum(self.number_cells_in_influence)
self.trajectory.append([
time, total_population, max_population, total_migrangs,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, mean_cluster_size,
max_cluster_size, new_settlements, killed_settlements, built, lost,
income_agriculture, income_ecosystem, income_trade,
np.nanmean(self.soil_deg),
np.sum(self.forest_state == 3),
np.sum(self.forest_state == 2),
np.sum(self.forest_state == 1),
np.sum(self.s_es_fs),
np.sum(self.s_es_wf),
np.sum(self.s_es_ag),
np.sum(self.s_es_sp),
np.sum(self.s_es_pg),
np.nanmean(self.spaciotemporal_precipitation),
np.nanmax(args[0]),
np.nanmean(args[1]),
np.nanmax(args[2]),
np.nanmax(args[3]),
np.nanmax(args[4]),
np.nanmax(self.soil_deg),
np.nanmax(self.pop_gradient)
])
def update_traders_trajectory_output(self, time):
traders = np.where(np.array(self.degree) > 0)[0]
total_population = sum([self.population[c] for c in traders])
total_migrants = sum([self.migrants[c] for c in traders])
total_settlements = len(self.population)
total_traders = len(traders)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum([self.crop_yield[c] for c in traders])
income_ecosystem = sum([self.eco_benefit[c] for c in traders])
income_trade = sum([self.trade_income[c] for c in traders])
income_es_fs = sum([self.s_es_fs[c] for c in traders])
income_es_wf = sum([self.s_es_wf[c] for c in traders])
income_es_ag = sum([self.s_es_ag[c] for c in traders])
income_es_sp = sum([self.s_es_sp[c] for c in traders])
income_es_pg = sum([self.s_es_pg[c] for c in traders])
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = (float(sum(self.comp_size)) / number_of_components
if number_of_components > 0 else 0)
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
total_agriculture_cells = \
sum([self.number_cropped_cells[c] for c in traders])
total_cells_in_influence = \
sum([self.number_cells_in_influence[c] for c in traders])
self.traders_trajectory.append([
time, total_population, total_migrants, total_traders,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, income_agriculture,
income_ecosystem, income_trade, income_es_fs, income_es_wf,
income_es_ag, income_es_sp, income_es_pg
])
def get_trajectory(self):
try:
trj = np.array(self.trajectory)
columns = trj[0, :]
df = pandas.DataFrame(trj[1:, :], columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def get_traders_trajectory(self):
try:
trj = self.traders_trajectory
columns = trj.pop(0)
df = pandas.DataFrame(trj, columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def run_test(self, timesteps=5):
import shutil
N = 50
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.1
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj.plot()
return 1
def print_variable_lengths(self):
for var in dir(self):
if not var.startswith('__') and not callable(getattr(self, var)):
try:
if len(getattr(self, var)) != 432:
print(var, len(getattr(self, var)))
except:
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
import shutil
N = 10
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
# os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
timesteps = 300
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.25
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj[[
'total_population', 'total_settlements', 'total_migrangs'
]].plot()
plt.show()
plt.savefig(plot, location + 'plot')
| gpl-3.0 | 4,912,365,931,477,879,000 | 38.209344 | 101 | 0.523777 | false |
martinp/jarvis2 | jarvis/jobs/imap.py | 1 | 1559 | # -*- coding: utf-8 -*-
import imaplib
import re
try:
# urlparse was moved to urllib.parse in Python 3
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from jobs import AbstractJob
class IMAP(AbstractJob):
def __init__(self, conf):
self.interval = conf['interval']
self.email = conf['email']
self.url = urlparse(conf['url'])
self.tls = conf.get('tls', True)
self.starttls = conf.get('starttls', False)
self.folder = conf['folder']
def _parse_count(self, message):
count = re.search(r'\w+ (\d+)', message.decode('utf-8'))
return int(count.group(1)) if count is not None else 0
def _get_count(self):
_, message = self.mail.status(self.folder, '(MESSAGES)')
return self._parse_count(message[0])
def _get_unread_count(self):
_, message = self.mail.status(self.folder, '(UNSEEN)')
return self._parse_count(message[0])
def get(self):
if self.tls:
self.mail = imaplib.IMAP4_SSL(self.url.hostname, self.url.port)
else:
self.mail = imaplib.IMAP4(self.url.hostname, self.url.port)
if self.starttls:
self.mail.starttls()
self.mail.login(self.url.username, self.url.password)
count = self._get_count()
unread = self._get_unread_count()
self.mail.logout()
return {
'email': self.email,
'folder': self.folder,
'count': count,
'unread': unread
}
| mit | -1,302,837,366,633,347,300 | 28.980769 | 75 | 0.584349 | false |
tmct/adventOfCode2016 | problems/21/Solver.py | 1 | 4493 | import re
swap_positions_regex = r'swap position (\d+) with position (\d+)'
swap_letters_regex = r'swap letter (.) with letter (.)'
rotate_regex = r'rotate (left|right) (\d+)'
rotate_on_letter_position_regex = r'rotate based on position of letter (.)'
reverse_slice_regex = r'reverse positions (\d+) through (\d+)'
move_regex = r'move position (\d+) to position (\d+)'
class Solver:
def __init__(self, start_string, decrypt = False):
self.buffer = list(start_string)
self.instructions = []
self.decrypt = decrypt
self.reverse_shift = {int(i): int(j) for i, j in zip('13572460', '76542107')}
def solve(self, input_file_name):
intermediates = [''.join(self.buffer)]
with open(input_file_name, 'r') as input_file:
for line in input_file:
self.add_instruction(line.strip())
if self.decrypt:
self.instructions = self.instructions[::-1]
for instruction in self.instructions:
instruction()
# intermediates.append(''.join(self.buffer))
# if not self.decrypt:
# intermediates = intermediates[::-1]
# for i in intermediates:
# print(i)
return ''.join(self.buffer)
def add_instruction(self, instruction_string):
match = re.search(swap_positions_regex, instruction_string)
if match:
return self.add_swap_positions_instruction(match)
match = re.search(swap_letters_regex, instruction_string)
if match:
return self.add_swap_letters_instruction(match)
match = re.search(rotate_regex, instruction_string)
if match:
return self.add_rotate_instruction(match)
match = re.search(rotate_on_letter_position_regex, instruction_string)
if match:
return self.add_rotate_on_letter_position_instruction(match)
match = re.search(reverse_slice_regex, instruction_string)
if match:
return self.reverse_slice_instruction(match)
match = re.search(move_regex, instruction_string)
if match:
return self.move_instruction(match)
raise Exception('Could not parse line! "{}"'.format(instruction_string))
def add_swap_positions_instruction(self, match):
first, second = (int(group) for group in match.groups())
def swap_positions():
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_positions)
def add_swap_letters_instruction(self, match):
def swap_letters():
first, second = (self.buffer.index(group) for group in match.groups())
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_letters)
def add_rotate_instruction(self, match):
steps = int(match.group(2)) % len(self.buffer)
if match.group(1) == 'left':
steps = (len(self.buffer) - steps) % len(self.buffer)
if self.decrypt:
steps = (len(self.buffer) - steps) % len(self.buffer)
def rotate():
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate)
def add_rotate_on_letter_position_instruction(self, match):
def rotate_on_letter_position():
if self.decrypt:
final_index = self.buffer.index(match.group(1)) % 8
steps = self.reverse_shift[final_index]
else:
steps = 1 + self.buffer.index(match.group(1))
if steps >= 5:
steps += 1
steps %= len(self.buffer)
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate_on_letter_position)
def reverse_slice_instruction(self, match):
first, second = (int(group) for group in match.groups())
def reverse_slice():
self.buffer = self.buffer[:first] + self.buffer[first:second + 1][::-1] + self.buffer[second + 1:]
self.instructions.append(reverse_slice)
def move_instruction(self, match):
first, second = (int(group) for group in match.groups())
if self.decrypt:
first, second = second, first
def move():
value = self.buffer[first]
del self.buffer[first]
self.buffer.insert(second, value)
self.instructions.append(move)
| mit | 7,164,240,172,985,285,000 | 38.761062 | 110 | 0.605831 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/email/_header_value_parser.py | 1 | 106086 | """Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import sys
import urllib # For urllib.parse.unquote
from string import hexdigits
from operator import itemgetter
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
# Match a RFC 2047 word, looks like =?utf-8?q?someword?=
rfc2047_matcher = re.compile(r'''
=\? # literal =?
[^?]* # charset
\? # literal ?
[qQbB] # literal 'q' or 'b', case insensitive
\? # literal ?
.*? # encoded word
\?= # literal ?=
''', re.VERBOSE | re.MULTILINE)
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
syntactic_break = True
ew_combine_allowed = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
def startswith_fws(self):
return self[0].startswith_fws()
@property
def as_ew_allowed(self):
"""True if all top level tokens of this part may be RFC2047 encoded."""
return all(part.as_ew_allowed for part in self)
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
return _refold_parse_tree(self, policy=policy)
def pprint(self, indent=''):
print(self.ppstr(indent=indent))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=indent))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
class Phrase(TokenList):
token_type = 'phrase'
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
encode_as_ew = False
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', r'\(').replace(
')', r'\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
if x.local_part:
return x.addr_spec
else:
return quote_string(x.local_part) + x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
as_ew_allowed = True
class NoFoldLiteral(TokenList):
token_type = 'no-fold-literal'
as_ew_allowed = False
class AddrSpec(TokenList):
token_type = 'addr-spec'
as_ew_allowed = False
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
as_ew_allowed = False
class DisplayName(Phrase):
token_type = 'display-name'
ew_combine_allowed = False
@property
def display_name(self):
res = TokenList(self)
if len(res) == 0:
return res.value
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if len(self) != 0 and quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
as_ew_allowed = False
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
as_ew_allowed = False
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
syntactic_break = False
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = {} # Using order preserving dict from Python 3.7+
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
# Our arbitrary error recovery is to ignore duplicate parameters,
# to use appearance order if there are duplicate rfc 2231 parts,
# and to ignore gaps. This mimics the error recovery of get_param.
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
# Else assume the *0* was missing...note that this is different
# from get_param, but we registered a defect for this earlier.
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
# We could get fancier here and look for a complete
# duplicate extended parameter and ignore the second one
# seen. But we're not doing that. The old code didn't.
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent RFC2231 parameter numbering"))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
# Set this false so that the value doesn't wind up on a new line even
# if it and the parameters would fit there but not on the first line.
syntactic_break = False
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
as_ew_allowed = False
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
as_ew_allowed = False
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
as_ew_allowed = False
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
as_ew_allowed = False
class MsgID(TokenList):
token_type = 'msg-id'
as_ew_allowed = False
def fold(self, policy):
# message-id tokens may not be folded.
return str(self) + policy.linesep
class MessageID(MsgID):
token_type = 'message-id'
class Header(TokenList):
token_type = 'header'
#
# Terminal classes and instances
#
class Terminal(str):
as_ew_allowed = True
ew_combine_allowed = True
syntactic_break = True
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
def pprint(self):
print(self.__class__.__name__ + '/' + self.token_type)
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def pop_trailing_ws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
def __str__(self):
return ''
class _InvalidEwError(errors.HeaderParseError):
"""Invalid encoded word found while parsing headers."""
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATOM_ENDS)))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(TOKEN_ENDS)))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(ATTRIBUTE_ENDS)))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
re.escape(''.join(EXTENDED_ATTRIBUTE_ENDS)))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if (len(remstr) > 1 and
remstr[0] in hexdigits and
remstr[1] in hexdigits and
tok.count('?') < 2):
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except (ValueError, KeyError):
raise _InvalidEwError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
# Encoded words should be followed by a WS
if value and value[0] not in WSP:
ew.defects.append(errors.InvalidHeaderDefect(
"missing trailing whitespace after encoded-word"))
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
valid_ew = True
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except _InvalidEwError:
valid_ew = False
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
# Split in the middle of an atom if there is a rfc2047 encoded word
# which does not have WSP on both sides. The defect will be registered
# the next time through the loop.
# This needs to only be performed when the encoded word is valid;
# otherwise, performing it on an invalid encoded word can cause
# the parser to go in an infinite loop.
if valid_ew and rfc2047_matcher.search(tok):
tok, *remainder = value.partition('=?')
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
r"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
if value and value[0] == '"':
token, value = get_qcontent(value)
bare_quoted_string.append(token)
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if not value:
raise errors.HeaderParseError(
"Expected 'atom' or 'quoted-string' but found nothing.")
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if value and value[0] == '@':
raise errors.HeaderParseError('Invalid Domain')
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"addr-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
elif value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
def get_no_fold_literal(value):
""" no-fold-literal = "[" *dtext "]"
"""
no_fold_literal = NoFoldLiteral()
if not value:
raise errors.HeaderParseError(
"expected no-fold-literal but found '{}'".format(value))
if value[0] != '[':
raise errors.HeaderParseError(
"expected '[' at the start of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal('[', 'no-fold-literal-start'))
value = value[1:]
token, value = get_dtext(value)
no_fold_literal.append(token)
if not value or value[0] != ']':
raise errors.HeaderParseError(
"expected ']' at the end of no-fold-literal "
"but found '{}'".format(value))
no_fold_literal.append(ValueTerminal(']', 'no-fold-literal-end'))
return no_fold_literal, value[1:]
def get_msg_id(value):
"""msg-id = [CFWS] "<" id-left '@' id-right ">" [CFWS]
id-left = dot-atom-text / obs-id-left
id-right = dot-atom-text / no-fold-literal / obs-id-right
no-fold-literal = "[" *dtext "]"
"""
msg_id = MsgID()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected msg-id but found '{}'".format(value))
msg_id.append(ValueTerminal('<', 'msg-id-start'))
value = value[1:]
# Parse id-left.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
# obs-id-left is same as local-part of add-spec.
token, value = get_obs_local_part(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-left in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text or obs-id-left"
" but found '{}'".format(value))
msg_id.append(token)
if not value or value[0] != '@':
msg_id.defects.append(errors.InvalidHeaderDefect(
"msg-id with no id-right"))
# Even though there is no id-right, if the local part
# ends with `>` let's just parse it too and return
# along with the defect.
if value and value[0] == '>':
msg_id.append(ValueTerminal('>', 'msg-id-end'))
value = value[1:]
return msg_id, value
msg_id.append(ValueTerminal('@', 'address-at-symbol'))
value = value[1:]
# Parse id-right.
try:
token, value = get_dot_atom_text(value)
except errors.HeaderParseError:
try:
token, value = get_no_fold_literal(value)
except errors.HeaderParseError as e:
try:
token, value = get_domain(value)
msg_id.defects.append(errors.ObsoleteHeaderDefect(
"obsolete id-right in msg-id"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected dot-atom-text, no-fold-literal or obs-id-right"
" but found '{}'".format(value))
msg_id.append(token)
if value and value[0] == '>':
value = value[1:]
else:
msg_id.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on msg-id"))
msg_id.append(ValueTerminal('>', 'msg-id-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
msg_id.append(token)
return msg_id, value
def parse_message_id(value):
"""message-id = "Message-ID:" msg-id CRLF
"""
message_id = MessageID()
try:
token, value = get_msg_id(value)
except errors.HeaderParseError:
message_id.defects.append(errors.InvalidHeaderDefect(
"Expected msg-id but found {!r}".format(value)))
message_id.append(token)
return message_id
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError(
"section number has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '"':
token = ValueTerminal('"', 'DQUOTE')
value = value[1:]
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the remaining value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
#
# Header folding
#
# Header folding is complex, with lots of rules and corner cases. The
# following code does its best to obey the rules and handle the corner
# cases, but you can be sure there are few bugs:)
#
# This folder generally canonicalizes as it goes, preferring the stringified
# version of each token. The tokens contain information that supports the
# folder, including which tokens can be encoded in which ways.
#
# Folded text is accumulated in a simple list of strings ('lines'), each
# one of which should be less than policy.max_line_length ('maxlen').
#
def _steal_trailing_WSP_if_exists(lines):
wsp = ''
if lines and lines[-1] and lines[-1][-1] in WSP:
wsp = lines[-1][-1]
lines[-1] = lines[-1][:-1]
return wsp
def _refold_parse_tree(parse_tree, *, policy):
"""Return string of contents of parse_tree folded according to RFC rules.
"""
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or sys.maxsize
encoding = 'utf-8' if policy.utf8 else 'us-ascii'
lines = ['']
last_ew = None
wrap_as_ew_blocked = 0
want_encoding = False
end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
parts = list(parse_tree)
while parts:
part = parts.pop(0)
if part is end_ew_not_allowed:
wrap_as_ew_blocked -= 1
continue
tstr = str(part)
if part.token_type == 'ptext' and set(tstr) & SPECIALS:
# Encode if tstr contains special characters.
want_encoding = True
try:
tstr.encode(encoding)
charset = encoding
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# If policy.utf8 is false this should really be taken from a
# 'charset' property on the policy.
charset = 'utf-8'
want_encoding = True
if part.token_type == 'mime-parameters':
# Mime parameter folding (using RFC2231) is extra special.
_fold_mime_parameters(part, lines, maxlen, encoding)
continue
if want_encoding and not wrap_as_ew_blocked:
if not part.as_ew_allowed:
want_encoding = False
last_ew = None
if part.syntactic_break:
encoded_part = part.fold(policy=policy)[:-len(policy.linesep)]
if policy.linesep not in encoded_part:
# It fits on a single line
if len(encoded_part) > maxlen - len(lines[-1]):
# But not on this one, so start a new one.
newline = _steal_trailing_WSP_if_exists(lines)
# XXX what if encoded_part has no leading FWS?
lines.append(newline)
lines[-1] += encoded_part
continue
# Either this is not a major syntactic break, so we don't
# want it on a line by itself even if it fits, or it
# doesn't fit on a line by itself. Either way, fall through
# to unpacking the subparts and wrapping them.
if not hasattr(part, 'encode'):
# It's not a Terminal, do each piece individually.
parts = list(part) + parts
else:
# It's a terminal, wrap it as an encoded word, possibly
# combining it with previously encoded words if allowed.
last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
part.ew_combine_allowed, charset)
want_encoding = False
continue
if len(tstr) <= maxlen - len(lines[-1]):
lines[-1] += tstr
continue
# This part is too long to fit. The RFC wants us to break at
# "major syntactic breaks", so unless we don't consider this
# to be one, check if it will fit on the next line by itself.
if (part.syntactic_break and
len(tstr) + 1 <= maxlen):
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
last_ew = None
continue
if not hasattr(part, 'encode'):
# It's not a terminal, try folding the subparts.
newparts = list(part)
if not part.as_ew_allowed:
wrap_as_ew_blocked += 1
newparts.append(end_ew_not_allowed)
parts = newparts + parts
continue
if part.as_ew_allowed and not wrap_as_ew_blocked:
# It doesn't need CTE encoding, but encode it anyway so we can
# wrap it.
parts.insert(0, part)
want_encoding = True
continue
# We can't figure out how to wrap, it, so give up.
newline = _steal_trailing_WSP_if_exists(lines)
if newline or part.startswith_fws():
lines.append(newline + tstr)
else:
# We can't fold it onto the next line either...
lines[-1] += tstr
return policy.linesep.join(lines) + policy.linesep
def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset):
"""Fold string to_encode into lines as encoded word, combining if allowed.
Return the new value for last_ew, or None if ew_combine_allowed is False.
If there is already an encoded word in the last line of lines (indicated by
a non-None value for last_ew) and ew_combine_allowed is true, decode the
existing ew, combine it with to_encode, and re-encode. Otherwise, encode
to_encode. In either case, split to_encode as necessary so that the
encoded segments fit within maxlen.
"""
if last_ew is not None and ew_combine_allowed:
to_encode = str(
get_unstructured(lines[-1][last_ew:] + to_encode))
lines[-1] = lines[-1][:last_ew]
if to_encode[0] in WSP:
# We're joining this to non-encoded text, so don't encode
# the leading blank.
leading_wsp = to_encode[0]
to_encode = to_encode[1:]
if (len(lines[-1]) == maxlen):
lines.append(_steal_trailing_WSP_if_exists(lines))
lines[-1] += leading_wsp
trailing_wsp = ''
if to_encode[-1] in WSP:
# Likewise for the trailing space.
trailing_wsp = to_encode[-1]
to_encode = to_encode[:-1]
new_last_ew = len(lines[-1]) if last_ew is None else last_ew
encode_as = 'utf-8' if charset == 'us-ascii' else charset
# The RFC2047 chrome takes up 7 characters plus the length
# of the charset name.
chrome_len = len(encode_as) + 7
if (chrome_len + 1) >= maxlen:
raise errors.HeaderParseError(
"max_line_length is too small to fit an encoded word")
while to_encode:
remaining_space = maxlen - len(lines[-1])
text_space = remaining_space - chrome_len
if text_space <= 0:
lines.append(' ')
continue
to_encode_word = to_encode[:text_space]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
while excess > 0:
# Since the chunk to encode is guaranteed to fit into less than 100 characters,
# shrinking it by one at a time shouldn't take long.
to_encode_word = to_encode_word[:-1]
encoded_word = _ew.encode(to_encode_word, charset=encode_as)
excess = len(encoded_word) - remaining_space
lines[-1] += encoded_word
to_encode = to_encode[len(to_encode_word):]
if to_encode:
lines.append(' ')
new_last_ew = len(lines[-1])
lines[-1] += trailing_wsp
return new_last_ew if ew_combine_allowed else None
def _fold_mime_parameters(part, lines, maxlen, encoding):
"""Fold TokenList 'part' into the 'lines' list as mime parameters.
Using the decoded list of parameters and values, format them according to
the RFC rules, including using RFC2231 encoding if the value cannot be
expressed in 'encoding' and/or the parameter+value is too long to fit
within 'maxlen'.
"""
# Special case for RFC2231 encoding: start from decoded values and use
# RFC2231 encoding iff needed.
#
# Note that the 1 and 2s being added to the length calculations are
# accounting for the possibly-needed spaces and semicolons we'll be adding.
#
for name, value in part.params:
# XXX What if this ';' puts us over maxlen the first time through the
# loop? We should split the header value onto a newline in that case,
# but to do that we need to recognize the need earlier or reparse the
# header, so I'm going to ignore that bug for now. It'll only put us
# one character over.
if not lines[-1].rstrip().endswith(';'):
lines[-1] += ';'
charset = encoding
error_handler = 'strict'
try:
value.encode(encoding)
encoding_required = False
except UnicodeEncodeError:
encoding_required = True
if utils._has_surrogates(value):
charset = 'unknown-8bit'
error_handler = 'surrogateescape'
else:
charset = 'utf-8'
if encoding_required:
encoded_value = urllib.parse.quote(
value, safe='', errors=error_handler)
tstr = "{}*={}''{}".format(name, charset, encoded_value)
else:
tstr = '{}={}'.format(name, quote_string(value))
if len(lines[-1]) + len(tstr) + 1 < maxlen:
lines[-1] = lines[-1] + ' ' + tstr
continue
elif len(tstr) + 2 <= maxlen:
lines.append(' ' + tstr)
continue
# We need multiple sections. We are allowed to mix encoded and
# non-encoded sections, but we aren't going to. We'll encode them all.
section = 0
extra_chrome = charset + "''"
while value:
chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
if maxlen <= chrome_len + 3:
# We need room for the leading blank, the trailing semicolon,
# and at least one character of the value. If we don't
# have that, we'd be stuck, so in that case fall back to
# the RFC standard width.
maxlen = 78
splitpoint = maxchars = maxlen - chrome_len - 2
while True:
partial = value[:splitpoint]
encoded_value = urllib.parse.quote(
partial, safe='', errors=error_handler)
if len(encoded_value) <= maxchars:
break
splitpoint -= 1
lines.append(" {}*{}*={}{}".format(
name, section, extra_chrome, encoded_value))
extra_chrome = ''
section += 1
value = value[splitpoint:]
if value:
lines[-1] += ';'
| apache-2.0 | 1,475,934,885,495,649,000 | 34.587387 | 91 | 0.595536 | false |
commtrack/commtrack-old-to-del | apps/reports/custom/grameen.py | 1 | 7030 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import inspect
from django.template.loader import render_to_string
from django.db import connection
import settings
from xformmanager.models import Metadata, FormDefModel, ElementDefModel
from reports.models import Case, SqlReport
from reports.util import get_whereclause
from shared import monitoring_report, Mother
'''Report file for custom Grameen reports'''
# see mvp.py for an explanation of how these are used.
# temporarily "privatizing" the name because grameen doesn't
# want this report to show up in the UI
def _monitoring(request):
'''Safe Pregnancy Monitoring Report'''
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
return monitoring_report(request, case)
def _mother_summary(request):
'''Individual Mother Summary'''
# this is intentionally private, as it's only accessed from within other
# reports that explicitly know about it. We don't want to list it because
# we don't know what id to use.
safe_preg_case_name = "Grameen Safe Pregnancies"
try:
case = Case.objects.get(name=safe_preg_case_name)
except Case.DoesNotExist:
return '''Sorry, it doesn't look like the forms that this report
depends on have been uploaded.'''
if not "case_id" in request.GET:
return '''Sorry, you have to specify a mother using the case id
in the URL.'''
case_id = request.GET["case_id"]
data = case.get_data_map_for_case(case_id)
mom = Mother(case, case_id, data)
mother_name = request.GET["mother_name"]
if mom.mother_name != mother_name:
return '''<p class="error">Sorry it appears that this id has been used by the CHW for
more than one mother. Unfortunately, this means we can't
yet show you her data here. Please remind your CHW's to
use unique case Ids!</p>
'''
attrs = [name for name in dir(mom) if not name.startswith("_")]
attrs.remove("data_map")
display_attrs = [attr.replace("_", " ") for attr in attrs]
all_attrs = zip(attrs, display_attrs)
mom.hi_risk_reasons = _get_hi_risk_reason(mom)
return render_to_string("custom/grameen/mother_details.html",
{"mother": mom, "attrs": all_attrs,
"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
})
def _get_hi_risk_reason(mom):
reasons = []
if (mom.mother_age >= 35): reasons.append("35 or older")
if (mom.mother_age <= 18): reasons.append("18 or younger")
if (mom.mother_height == 'under_150'): reasons.append("mother height under 150cm")
if (mom.previous_csection == 'yes'): reasons.append("previous c-section")
if (mom.previous_newborn_death == 'yes'): reasons.append("previous newborn death")
if (mom.previous_bleeding == 'yes'): reasons.append("previous bleeding")
if (mom.previous_terminations >= 3): reasons.append("%s previous terminations" % mom.previous_terminations)
if (mom.previous_pregnancies >= 5): reasons.append("%s previous pregnancies" % mom.previous_pregnancies)
if (mom.heart_problems == 'yes'): reasons.append("heart problems")
if (mom.diabetes == 'yes'): reasons.append("diabetes")
if (mom.hip_problems == 'yes'): reasons.append("hip problems")
if (mom.card_results_syphilis_result == 'positive'): reasons.append("positive for syphilis")
if (mom.card_results_hepb_result == 'positive'): reasons.append("positive for hepb")
if (mom.over_5_years == 'yes'): reasons.append("over 5 years since last pregnancy")
if (mom.card_results_hb_test == 'below_normal'): reasons.append("low hb test")
if (mom.card_results_blood_group == 'onegative'): reasons.append("o-negative blood group")
if (mom.card_results_blood_group == 'anegative'): reasons.append("a-negative blood group")
if (mom.card_results_blood_group == 'abnegative'): reasons.append("ab-negative blood group")
if (mom.card_results_blood_group == 'bnegative'): reasons.append("b-negative blood group")
return ", ".join(reasons)
def hi_risk_pregnancies(request):
'''Hi-Risk Pregnancy Summary'''
# just pass on to the helper view, but ensure that hi-risk is set to yes
params = request.GET.copy()
params["sampledata_hi_risk"]="yes"
return _chw_submission_summary(request, params)
def chw_submission_details(request):
'''Health Worker Submission Details'''
return _chw_submission_summary(request, request.GET)
def _chw_submission_summary(request, params):
# this was made a private method so that we can call it from multiple reports
# with an extra parameter.
# had to move this form a sql report to get in the custom annotations
# this is a pretty ugly/hacky hybrid approach, and should certainly
# be cleaned up
extuser = request.extuser
# hard coded to our fixture. bad bad!
grameen_submission_details_id = 2
# hard coded to our schema. bad bad!
form_def = ElementDefModel.objects.get(table_name="schema_intel_grameen_safe_motherhood_registration_v0_3").form
report = SqlReport.objects.get(id=grameen_submission_details_id)
cols = ('meta_username', 'sampledata_hi_risk')
where_cols = dict([(key, val) for key, val in params.items() if key in cols])
whereclause = get_whereclause(where_cols)
follow_filter = None
if "follow" in params:
if params["follow"] == "yes":
follow_filter = True
elif params["follow"] == "no":
follow_filter = False
cols, data = report.get_data({"whereclause": whereclause})
new_data = []
for row in data:
new_row_data = dict(zip(cols, row))
row_id = new_row_data["Instance ID"]
meta = Metadata.objects.get(formdefmodel=form_def, raw_data=row_id)
follow = meta.attachment.annotations.count() > 0
if follow_filter is not None:
if follow_filter and not follow:
# filtering on true, but none found, don't include this
continue
elif not follow_filter and follow:
# filtering on false, but found follows, don't include this
continue
new_row_data["Follow up?"] = "yes" if follow else "no"
new_row_data["meta"] = meta
new_row_data["attachment"] = meta.attachment
new_data.append(new_row_data)
cols = cols[:6]
return render_to_string("custom/grameen/chw_submission_details.html",
{"MEDIA_URL": settings.MEDIA_URL, # we pretty sneakly have to explicitly pass this
"columns": cols,
"data": new_data})
| bsd-3-clause | -1,309,282,711,300,028,700 | 46.5 | 116 | 0.644381 | false |
kalamaico/FootballManager | resources/random_generator.py | 1 | 1176 | #!python
import random, time
class RandomGenerator:
def __init__(self):
random.seed(time.clock())
def generate_uniform(self, min_v, max_v):
return random.uniform(min_v, max_v)
def generate_int(self, min_v, max_v):
return random.randint(min_v, max_v)
def generate_int_sequence_no_repetitions(self, length, min_v, max_v):
ret = set()
if length > max_v - min_v + 1:
raise ValueError("Requested a length of " + str(length) + " for an interval of " + str(max_v - min_v + 1))
while len(ret) < length:
val = ret.add(self.generate_int(min_v, max_v))
return sorted(ret)
def generate_int_sequence(self, length, min_v, max_v):
ret = list()
while len(ret) < length:
ret.append(self.generate_int(min_v, max_v))
return ret
class RandomGeneratorDeterministic(RandomGenerator):
def __init__(self):
random.seed(0)
def generate_uniform(self, min_v, max_v):
return (max_v - min_v) /2
def generate_int(self, min_v, max_v):
return 42
| lgpl-3.0 | 2,506,940,250,891,998,700 | 26.348837 | 118 | 0.548469 | false |
shw700/latrace-plus | frontend/la2html.py | 1 | 15468 | #!/usr/bin/python
import sys
import re
color_map = [ "silver", "aquamarine", "tan", "skyblue", "plum", "yellowgreen", "moccasin" ]
def get_color_level(n):
return color_map[n % len(color_map)]
def enstyle_parameters(s):
result = ""
if s == "" or s == "void":
return "<b>void</b>"
nexti = s.find("=")
while (nexti != -1):
argname = s[:nexti-1].strip()
result += "<b>{}</b> = ".format(argname)
remainder = s[nexti+1:].strip()
if remainder.startswith('"'):
endstr = remainder[1:].find('"')
if endstr == -1:
result += "[decoding error]"
return result
value = '"<i>{}</i>"'.format(remainder[1:endstr+1])
s = remainder[endstr+2:]
result += value
nexti = s.find("=")
continue
else:
endval = remainder.find(",")
if endval == -1:
value = remainder
s = ""
else:
value = remainder[0:endval]
s = remainder[endval:]
isDigit = False
try:
int(value)
isDigit = True
except:
try:
int(value, 16)
isDigit = True
except:
pass
if value == "true" or value == "false" or value == "NULL":
result += "<b>{}</b>".format(value)
elif value.startswith("fn@"):
result += '<b>fn@ </b><p style="color:green" class="narrow_p">{}</p>'.format(value[3:])
elif not isDigit:
pind = value.find("+")
if pind != -1:
result += '<p style="color:brown" class="narrow_p">{}</p>'.format(value[0:pind])
result += "<b>+</b>{}".format(value[pind+1:])
else:
result += '<p style="color:brown" class="narrow_p">{}</p>'.format(value)
elif not value.startswith('"'):
result += '<p style="color:blue" class="narrow_p">{}</p>'.format(value)
else:
result += value
if s.startswith(","):
result += "<b>,</b> "
s = s[1:]
nexti = s.find("=")
return result
def get_indent(line):
count = 0
for i in line:
if i != ' ':
break
count += 1
return count
def emit_html_header():
h = "<html>"
h += "<head>"
h += "<title>latrace output</title>"
h += '<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>'
h += "<script>"
h += "$(document).ready(function(){"
h += '$("#toggle_tid").click(function(){'
h += ' console.log("Toggling TID display");'
h += ' $(".label_tid").not("#toggle_tid").not(".toggle_button").toggle();'
h += "});"
h += '$("#toggle_src_lib").click(function(){'
h += ' console.log("Toggling source library display");'
h += ' $(".label_src_lib").not("#toggle_src_lib").toggle();'
h += "});"
h += '$("#toggle_dst_lib").click(function(){'
h += ' console.log("Toggling dest library display");'
h += ' $(".label_dst_lib").not("#toggle_dst_lib").toggle();'
h += "});"
h += '$(".label_expander").click(function(){'
h += ' console.log("Toggling expansion");'
h += ' $(this).parent().parent().children(".func_call").not($(this)).slideToggle();'
h += "});"
h += '$(".label_src_lib").not("#toggle_src_lib").click(function(){'
h += ' console.log("Hiding references to source library");'
h += ' var selector = ".label_src_lib[xlib=\'\" + $(this).attr("xlib") + \"\']";'
h += ' console.log("SELECTOR: " + selector);'
h += ' $(selector).not("#toggle_src_lib").parent().parent().toggle();'
h += ' $(selector).removeClass("enabled").addClass("disabled");'
h += "});"
h += '$(".label_funcname").dblclick(function(){'
h += ' console.log("Hiding references to function name: " + $(this).attr("xfunc"));'
h += ' var selector = ".label_funcname[xfunc=\'\" + $(this).attr("xfunc") + \"\']";'
h += ' console.log("SELECTOR: " + selector);'
h += ' $(selector).not(".toggle_func").parent().parent().slideToggle();'
h += ' if ($(selector).hasClass("enabled"))'
h += ' $(selector).removeClass("enabled").addClass("disabled");'
h += ' else'
h += ' $(selector).removeClass("disabled").addClass("enabled");'
h += "});"
h += '$(".toggle_tid").dblclick(function(){'
h += ' console.log("Hiding TID contents for: " + $(this).attr("xtid"));'
h += ' var selector = ".label_tid[xtid=\'\" + $(this).attr("xtid") + \"\']";'
h += ' console.log("SELECTOR: " + selector);'
h += ' $(selector).not(".toggle_button").parent().parent().find(".func_call").toggle();'
h += ' $(this).removeClass("enabled").addClass("disabled");'
h += "});"
h += '$("#toggle_all_funcs").dblclick(function(){'
h += ' console.log("Toggling all visible functions");'
h += ' $(".func_call").toggle();'
h += ' if ($(this).hasClass("enabled")) {'
h += ' $(".toggle_func").removeClass("enabled").addClass("disabled");'
h += ' $(this).removeClass("enabled").addClass("disabled");'
h += ' } else {'
h += ' $(".toggle_func").removeClass("disabled").addClass("enabled");'
h += ' $(this).removeClass("disabled").addClass("enabled");'
h += ' }'
h += "});"
h += "});"
h += "</script>"
h += "<style>"
h += "html *"
h += "{"
h += " font-size: 1em !important;"
h += " font-family: Arial !important;"
h += "}"
h += ".func_call { padding-left: 0px; padding-top: 5px; padding-bottom: 5px; margin-bottom: 5px; border: 1px dotted black; border-left: 1px dotted black; border-right: none; margin-bottom: 0px; }"
h += ".label_src_lib { display: inline-block; cursor: hand; background-color: orange; border: 1px solid black; padding: 3px; font-size: 75%; float: right; }"
h += ".label_dst_lib { display: inline-block; cursor: hand; background-color: brown; border: 1px solid black; padding: 3px; font-size: 75% }"
h += ".label_tid { display: inline-block; cursor: hand; background-color: yellow; border: 1px solid black; padding: 3px; font-size: 75%; font-weight: bold; }"
h += ".label_funcname { display: inline-block; cursor: hand; font-weight: bold; border: 1px dotted gray; padding: 3px; padding-left: 5px; padding-right: 5px; }"
h += ".label_fparams { display: inline-block; background-color: silver; padding: 1px; }"
h += ".label_remainder { display: inline-block; color: gray; }"
h += ".label_result { display: inline-block; background-color: red; border: 1px solid black; padding-left: 10px; padding-right: 10px; margin-left: 5px; font-weight: bold; font-size: 125%; float: right; margin-right: 50px; }"
h += ".label_expander { display: inline-block; cursor: hand; background-color: gray; border: 1px solid black; padding: 3px; margin-left: 5px; margin-right: 2px; font-weight: bold; font-size: 75%; }"
h += ".label_console { display: inline-block; background-color: black; color: white; padding: 5px; width: 100%; padding-top: 5px; padding-bottom: 5px; }"
h += ".side_bar { display: inline-block; margin-right: 10px; width: 200px; }"
h += ".func_bar { display: inline-block; margin-right: 10px; width: 50%; }"
h += ".func_indent { display: inline-block; background-color: gray; margin-right: 2px; }"
h += ".toggle_button { display: inline-block; cursor: hand; margin-left: 3px; margin-right: 3px; margin-bottom: 2px; padding: 3px; }"
h += ".toggle_func { margin-left: 2px; margin-right: 2px; margin-bottom: 2px; }"
h += ".enabled { background-color: lime; }"
h += ".disabled { background-color: red; }"
h += ".narrow_p { -webkit-margin-before: 0em; -webkit-margin-after: 0em; display: inline-block; }"
h += ".div_ind_0 { padding-left: 4px; }"
h += "</style>"
h += "</head>"
h += "<body>"
return h
def emit_html_footer():
h = "</body>"
h += "</html>"
return h
if len(sys.argv) != 2:
sys.stderr.write("Error: requires an input file\n");
sys.exit(-1)
lines = open(sys.argv[1], "r").read().splitlines()
user_struct_transformers = []
user_func_transformers = []
user_intercepts = []
all_functions = []
all_functions_map = {}
all_libraries = []
all_tids = []
# borrowed from stack exchange
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
line_no = 0
first_indent = next_indent = indent_inc = indent_level = 0
last_indent = 0
main_tid = 0
first = 1
header = emit_html_header()
body = ""
for line in lines:
set_xfrm_func = set_xfrm_struct = set_u_int = False
# body += "LINE: ", line
# line = line.join([i if ord(i) >= ' ' else '' for i in line])
line = ansi_escape.sub('', line).strip()
if line.startswith("Adding user struct transformer function"):
set_xfrm_struct = True
elif line.startswith("Adding user transformer function for function"):
set_xfrm_func = True
elif line.startswith("Adding user intercept function"):
set_u_int = True
if set_xfrm_func or set_xfrm_struct or set_u_int:
pind = str.find(line, ":")
if pind == -1:
continue
param = line[pind+1:].strip()
if param.endswith("()"):
param = param[:-2]
if set_xfrm_func:
user_func_transformers.append(param)
elif set_xfrm_struct:
user_struct_transformers.append(param)
else:
user_intercepts.append(param)
continue
words = line.split(" ")
if len(words) < 2:
continue
try:
tid = int(words[0])
except:
body += '<div class="label_console">{}</div><br>'.format(line)
continue
all_tids.append(tid)
all_tids = sorted(set(all_tids))
if (main_tid == 0):
main_tid = tid
line = " ".join(words[1:])
if line_no == 0:
first_indent = get_indent(line)
# header += "FIRST INDENT: ", first_indent
line_no += 1
indent = get_indent(line)
if (first_indent > 0) and (next_indent == 0):
if (indent > first_indent):
next_indent = indent
indent_inc = next_indent - first_indent
# body += "NEXT INDENT: {} / {}".format(next_indent, indent_inc)
if (indent_inc > 0):
indent_level = (indent - first_indent) /indent_inc
else:
indent_level = 0
line = line.strip()
ltoks = str.split(line, '(')
func_name = ltoks[0]
func_params = "(".join(ltoks[1:])
aftoks = str.split(func_params, ')')
func_params = aftoks[0]
remainder = ")".join(aftoks[1:])
result = ""
lib_name = ""
ftoks = str.split(func_name, ":")
if len(ftoks) == 2:
lib_name = ftoks[0]
func_name = ftoks[1]
elif (len(ftoks) >= 2) and (func_name.find("[") != -1) and (func_name.find("]") != -1):
lidx = func_name.find("[")
ridx = func_name.find("]")
remainder = func_name[lidx+1:ridx]
lib_name = ftoks[0]
func_name = func_name[func_name.find(":")+1:lidx-1]
elif (len(ftoks) > 2) and (not ftoks[0].startswith("}")):
lib_name = ftoks[0]
func_name = ":".join(ftoks[1:])
one_liner = False
eqidx = remainder.find(" = ")
if eqidx != -1:
result = remainder[eqidx+3:]
remainder = remainder[0:eqidx-1]
one_liner = True
if first:
first = False
# elif indent_level == last_indent:
# body += "</div>"
prefix, func_indent = "", ""
i = 0
while i < indent_level:
prefix += "+"
func_indent += " "
i += 1
func_indent = '<div class="func_indent">{}</div>'.format(func_indent)
# prefix += '<div class="label_expander"><b>+</b></div>'
prefix = '<div class="label_expander"><b>{}+</b></div>'.format(indent_level)
if result != "":
result_str = "<b> </b><div class=\"label_result\">= {}</div>".format(result)
else:
result_str = "<br>"
if remainder != "":
remainder = remainder.strip()
if remainder.startswith("["):
remainder = remainder[1:]
if remainder.find("]") != -1:
remainder = remainder[:remainder.find("]")]
remainder_str = '<div class="label_dst_lib" xlib="{}">{}</div>'.format(remainder, remainder)
else:
remainder_str = ""
func_params_str = '<div class="label_fparams">{}</div>'.format(enstyle_parameters(func_params))
if lib_name == "":
lib_name_str = ""
else:
lib_name_str = '<div class="label_src_lib" xlib="{}">{}</div>'.format(lib_name, lib_name)
if lib_name == "" and func_name.startswith("}"):
body += '<div class="side_bar"><div class="label_tid" xtid="{}">{}</div> {} </div> <div class="label_result">[continued] <b>{}</b></div><br>'.format(tid, tid, prefix, func_name[2:])
body += "</div></div>"
else:
if func_name.startswith("["):
func_name = func_name[1:]
div_class = "div_ind_{}".format(indent_level)
body += '<div style="background-color: {};" class="{} func_call">'.format(get_color_level(indent_level), div_class)
body += '<div class="side_bar"><div class="label_tid" xtid="{}">{}</div> {}{} </div><div class="func_bar">{}<div class="label_funcname" xfunc="{}">{}</div> ({})</div> {} {}'.format(tid, tid, prefix, lib_name_str, func_indent, func_name, func_name, func_params_str, remainder_str, result_str)
all_functions.append(func_name)
all_functions.sort()
if (remainder != "") and (remainder.find(" ") == -1):
all_functions_map[func_name] = remainder
all_libraries.append(remainder)
all_libraries.sort()
if one_liner:
body += "</div>"
if indent_level < last_indent:
body += "</div>"
last_indent = indent_level
user_func_transformers.sort()
user_struct_transformers.sort()
user_intercepts.sort()
header += "<br><b>Loaded function transformers: {}</b><br>".format(len(user_func_transformers))
for f in user_func_transformers:
header += '<div class="toggle_func label_funcname enabled" xfunc="{}">{}()</div>'.format(f, f)
header += "<br><br>"
header += "<b>Loaded struct transformers: {}</b><br>".format(len(user_struct_transformers))
header += "\t{}<br><br>".format(", ".join(user_struct_transformers))
header += "<b>Loaded function intercepts: {}</b><br>".format(len(user_intercepts))
for f in user_intercepts:
header += '<div class="toggle_func label_funcname enabled" xfunc="{}">{}()</div>'.format(f, f)
header += "<br><br>"
all_functions = sorted(set(all_functions))
header += "<b>All called functions: {} unique</b><br>".format(len(all_functions))
header += '<div style="border: 1px solid black;" id="toggle_all_funcs" class="toggle_button toggle_func enabled">{}</div>'.format("Toggle all functions")
header += "<br><br>"
all_libraries = sorted(set(all_libraries))
header += "<b>In a total of {} libraries</b><br>".format(len(all_libraries))
for l in all_libraries:
header += '<div style="float: none; font-size: 100%;" class="label_src_lib toggle_button" xlib="{}">{}</div>'.format(l, l)
header += "<br><br>"
functions_left = all_functions
#while len(functions_left) > 0:
for l in all_libraries:
header += '<div style="display: inline-block; font-size: 100%; margin-right: 15px; " class="label_dst_lib toggle_button"><u>{}</u></div>'.format(l)
for f in functions_left:
if (f in all_functions_map) and (all_functions_map[f] == l):
header += '<div class="toggle_func label_funcname enabled" xfunc="{}">{}()</div>'.format(f, f)
# functions_left.remove(f)
header += "<br><br>"
#for f in all_functions:
# header += '<div class="toggle_func label_funcname enabled" xfunc="{}">{}()</div>'.format(f, f)
header += "<br><br>"
header += "<b>Available thread IDs: {}</b><br>".format(len(all_tids))
for t in all_tids:
header += '<div class="label_tid toggle_tid toggle_button enabled" xtid="{}">{}</div>'.format(t, t)
header += "<br><br>"
header += "<br><br>"
header += '<div id="toggle_tid" class="label_tid toggle_button">{}</div>'.format("Display TIDS")
header += '<div style="float: none;" id="toggle_src_lib" class="label_src_lib toggle_button">{}</div>'.format("Display source lib names")
header += '<div id="toggle_dst_lib" class="label_dst_lib toggle_button">{}</div>'.format("Display dst lib names")
header += '<div style="padding: 1px; border: 1px solid black; cursor: hand;" id="toggle_fparams" class="label_fparams toggle_button">{}</div>'.format("Display function parameters")
header += "<br><br><br>"
# header += '</div><div class="toggle_func label_funcname enabled" xfunc="{}">{}()</div>'.format(f, f)
# header += "<br><br>"
print header
print body
print emit_html_footer()
| gpl-3.0 | 7,565,566,668,682,800,000 | 34.315068 | 314 | 0.612749 | false |
kurennon/misc-tools | find_validator/find_validator.py | 1 | 1259 | #!/usr/bin/env python3
DIG_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def find_validator(dig_string, old_base):
dig_sum = sum_digits(dig_string, old_base)
return dig_sum[-1:].upper()
def sum_digits(dig_string, old_base):
int_sum = 0
while dig_string:
int_sum += int(dig_string[:1], base=old_base)
dig_string = dig_string[1:]
dig_sum = unint(int_sum, old_base)
return dig_sum
def unint(int_val, new_base):
if int_val < new_base:
return DIG_CHARS[int_val]
else:
return unint(int_val//new_base, new_base) + DIG_CHARS[int_val%new_base]
if __name__ == "__main__":
print("Welcome to find_validator.py!\nPlease enter an invalid base to quit" +
"\nor q at the validator to choose a new base.")
work_base = 1
while 0 < work_base < 35:
dig_string = ""
work_base = int(input("\nEnter the base of the number(s) you would like to validate: "))
if work_base <= 0 or work_base > 35:
break
while dig_string.lower() != "q":
dig_string = input("Enter a number to validate: ")
if dig_string.lower() == "q":
break
print("The validator is:", find_validator(dig_string, work_base))
| gpl-3.0 | 3,645,768,671,179,305,500 | 36.029412 | 96 | 0.590151 | false |
greenape/gem-module | gaussianemulation/uncertainty.py | 1 | 2386 | from sympy import *
from mpmath import *
from util import *
def E(r_h, b_hat, r_t, e):
return r_h.T*b_hat + r_t.T*e
def V(sigma, u, r_t, A_inv, r_h, g, w):
res = Matrix([u])
res -= r_t.T*A_inv*r_t
res += (r_h - g.T*r_t).T*w*(r_h-g.T*r_t)
res *= sigma
return sigma*(u - r_t.T*A_inv*r_t + (r_h - g.T*r_t).T*w*(r_h-g.T*r_t))
def do_E_var(i_i, i_2, V_, E_):
return (i_i-v) + (i_2 - power(E_, 2.))
def E_var():
r_tt = R_tt(D, C, B, m, v)
r_hh = R_hh(m, B)
r_ht = R_ht(D, B, C, v, m, h)
i_1 = I_1(s_hat_sq, A_inv, r_tt, w, r_hh, r_ht, g)
i_2 = I_2(b_hat, r_hh, r_ht, e_, r_tt)
return do_E_var(i_1, i_2[0,0], V_[0,0], E_[0,0])
def I_1(sigma, A_inv, r_tt, w, r_hh, r_ht, g):
return sigma*(mpf(1)-Trace(A_inv*r_tt) + Trace(w*(r_hh - 2*r_ht*g + g.T*r_tt*g)))
def I_2(beta, r_hh, r_ht, e_, r_tt):
return beta.T*r_hh*beta + 2*beta.T*r_ht*e_ + e_.T*r_tt*e_
def Q_kl(x, xk, xl, C, B, m):
return 2*(x - xk).T*C*(x - xk) + 2*(x - xl).T*C*(x - xl) + (x - m).T*B*(x - m)
def Q_k(x, xk, m, B, C):
return (2*(x - xk).T*C*(x - xk) + (x-m).T*B*(x-m))[0,0]
def m_kl(xk, xl, C, B, m):
return ((4*C + B)**-1)*(2*C*xk + 2*C*xl + B*m)
def m_k(x, C, B, m):
return ((2*C + B)**-1)*(2*C*x + B*m)
def R_h(m):
return Matrix([1]).col_join(m)
def R_hh(m, B):
#np.vstack((np.hstack(([[1]], m.T)), np.hstack((m, m.dot(m.T) + B.getI()))))
return Matrix([1]).row_join(m.T).col_join(m.row_join(m*m.T + B**-1))
def R_ht(D, B, C, v, m, h):
return reduce(lambda x, y: x.row_join(y),map(lambda k: R_ht_elem(D, k, B, C, v, m, h), range(D.cols))) #matrix
def R_ht_elem(X, k, B, C, v, m, h):
x = X[:,k]
m_prime_k = m_k(x, C, B, m)
return R_t(X, k, B, C, v, m)*Matrix([1]).col_join(m_prime_k)
def R_tt(D, C, B, m, v):
return Matrix(D.cols, D.cols, lambda i, j: R_tt_element(D, i, j, C, B, m, v))
def R_tt_element(x, k, l, C, B, m, v):
xk = x[:,k]
xl = x[:,l]
qkl = Q_kl(m_kl(xk, xl, C, B, m), xk, xl, C, B, m)[0,0]
return power(1-v, 2.)*power(det(B), 0.5)*power(det(4*C + B), -0.5)*exp(- qkl/2.)
def R_t(D, B, C, v, m):
return Matrix(map(lambda k: R_t_elem(D, k, B, C, v, m), range(D.cols)))
def R_t_elem(X, k, B, C, v, m):
X = X[:,k]
m_prime_k = m_k(X, C, B, m)
q_k = Q_k(m_prime_k, X, m, B, C)
return (1-v)*power(det(B), 0.5)*power(det(2*C + B), -0.5)*exp(-q_k/2.) | mpl-2.0 | 5,845,735,544,084,785,000 | 30.407895 | 114 | 0.475272 | false |
ESOedX/edx-platform | lms/djangoapps/discussion/django_comment_client/base/tests.py | 1 | 90626 | # pylint: skip-file
# -*- coding: utf-8 -*-
"""Tests for django comment client views."""
from __future__ import absolute_import
import json
import logging
from contextlib import contextmanager
import ddt
import mock
import six
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test.client import RequestFactory
from django.urls import reverse
from eventtracking.processors.exceptions import EventEmissionExit
from mock import ANY, Mock, patch
from opaque_keys.edx.keys import CourseKey
from six import text_type
from six.moves import range
from common.test.utils import MockSignalHandlerMixin, disable_signal
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.discussion.django_comment_client.base import views
from lms.djangoapps.discussion.django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
GroupIdAssertionMixin,
NonCohortedTopicGroupIdTestMixin
)
from lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin
from lms.djangoapps.discussion.django_comment_client.tests.utils import CohortedTestCase, ForumsEnableMixin
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.django_comment_common.comment_client import Thread
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_STUDENT,
CourseDiscussionSettings,
Role,
assign_role
)
from openedx.core.djangoapps.django_comment_common.utils import (
ThreadContext,
seed_permissions_roles,
set_course_discussion_settings
)
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from student.roles import CourseStaffRole, UserBasedRole
from student.tests.factories import CourseAccessRoleFactory, CourseEnrollmentFactory, UserFactory
from track.middleware import TrackMiddleware
from track.views import segmentio
from track.views.tests.base import SEGMENTIO_TEST_USER_ID, SegmentIOTrackingTestCaseBase
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
from .event_transformers import ForumThreadViewedEventTransformer
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
QUERY_COUNT_TABLE_BLACKLIST = WAFFLE_TABLES
# pylint: disable=missing-docstring
class MockRequestSetupMixin(object):
def _create_response_mock(self, data):
return Mock(
text=json.dumps(data),
json=Mock(return_value=data),
status_code=200
)
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_response_mock(data)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class CreateThreadGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
self._set_mock_request_data(mock_request, {})
request_data = {"body": "body", "title": "title", "thread_type": "discussion"}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().post("dummy_url", request_data)
request.user = user
request.view_name = "create_thread"
return views.create_thread(
request,
course_id=six.text_type(self.course.id),
commentable_id=commentable_id
)
def test_group_info_in_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
None
)
self._assert_json_response_contains_group_info(response)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_deleted')
class ThreadActionGroupIdTestCase(
MockRequestSetupMixin,
CohortedTestCase,
GroupIdAssertionMixin
):
def call_view(
self,
view_name,
mock_request,
user=None,
post_params=None,
view_args=None
):
self._set_mock_request_data(
mock_request,
{
"user_id": str(self.student.id),
"group_id": self.student_cohort.id,
"closed": False,
"type": "thread",
"commentable_id": "non_team_dummy_id"
}
)
request = RequestFactory().post("dummy_url", post_params or {})
request.user = user or self.student
request.view_name = view_name
return getattr(views, view_name)(
request,
course_id=six.text_type(self.course.id),
thread_id="dummy",
**(view_args or {})
)
def test_update(self, mock_request):
response = self.call_view(
"update_thread",
mock_request,
post_params={"body": "body", "title": "title"}
)
self._assert_json_response_contains_group_info(response)
def test_delete(self, mock_request):
response = self.call_view("delete_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_vote(self, mock_request):
response = self.call_view(
"vote_for_thread",
mock_request,
view_args={"value": "up"}
)
self._assert_json_response_contains_group_info(response)
response = self.call_view("undo_vote_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_flag(self, mock_request):
response = self.call_view("flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
response = self.call_view("un_flag_abuse_for_thread", mock_request)
self._assert_json_response_contains_group_info(response)
def test_pin(self, mock_request):
response = self.call_view(
"pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
response = self.call_view(
"un_pin_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(response)
def test_openclose(self, mock_request):
response = self.call_view(
"openclose_thread",
mock_request,
user=self.moderator
)
self._assert_json_response_contains_group_info(
response,
lambda d: d['content']
)
class ViewsTestCaseMixin(object):
def set_up_course(self, module_count=0):
"""
Creates a course, optionally with module_count discussion modules, and
a user with appropriate permissions.
"""
# create a course
self.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
self.course_id = self.course.id
# add some discussion modules
for i in range(module_count):
ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='id_module_{}'.format(i),
discussion_category=u'Category {}'.format(i),
discussion_target=u'Discussion {}'.format(i)
)
# seed the forums permissions and roles
call_command('seed_permissions_roles', six.text_type(self.course_id))
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
self.password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, self.password)
self.student.is_active = True
self.student.save()
# Add a discussion moderator
self.moderator = UserFactory.create(password=self.password)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
# Enroll the moderator and give them the appropriate roles
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
assert self.client.login(username='student', password=self.password)
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
data = {
"user_id": str(self.student.id),
"closed": False,
"commentable_id": "non_team_dummy_id"
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def create_thread_helper(self, mock_request, extra_request_data=None, extra_response_data=None):
"""
Issues a request to create a thread and verifies the result.
"""
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
if extra_request_data:
thread.update(extra_request_data)
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': six.text_type(self.course_id)})
response = self.client.post(url, data=thread)
assert mock_request.called
expected_data = {
'thread_type': 'discussion',
'body': u'this is a post',
'context': ThreadContext.COURSE,
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': six.text_type(self.course_id),
}
if extra_response_data:
expected_data.update(extra_response_data)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data=expected_data,
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert response.status_code == 200
def update_thread_helper(self, mock_request):
"""
Issues a request to update a thread and verifies the result.
"""
self._setup_mock_request(mock_request)
# Mock out saving in order to test that content is correctly
# updated. Otherwise, the call to thread.save() receives the
# same mocked request data that the original call to retrieve
# the thread did, overwriting any changes.
with patch.object(Thread, 'save'):
response = self.client.post(
reverse("update_thread", kwargs={
"thread_id": "dummy",
"course_id": six.text_type(self.course_id)
}),
data={"body": "foo", "title": "foo", "commentable_id": "some_topic"}
)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['body'], 'foo')
self.assertEqual(data['title'], 'foo')
self.assertEqual(data['commentable_id'], 'some_topic')
@ddt.ddt
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
@disable_signal(views, 'thread_created')
@disable_signal(views, 'thread_edited')
class ViewsQueryCountTestCase(
ForumsEnableMixin,
UrlResetMixin,
ModuleStoreTestCase,
MockRequestSetupMixin,
ViewsTestCaseMixin
):
CREATE_USER = False
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
ENABLED_SIGNALS = ['course_published']
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewsQueryCountTestCase, self).setUp()
def count_queries(func): # pylint: disable=no-self-argument
"""
Decorates test methods to count mongo and SQL calls for a
particular modulestore.
"""
def inner(self, default_store, module_count, mongo_calls, sql_queries, *args, **kwargs):
with modulestore().default_store(default_store):
self.set_up_course(module_count=module_count)
self.clear_caches()
with self.assertNumQueries(sql_queries, table_blacklist=QUERY_COUNT_TABLE_BLACKLIST):
with check_mongo_calls(mongo_calls):
func(self, *args, **kwargs)
return inner
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 4, 41),
(ModuleStoreEnum.Type.split, 3, 13, 41),
)
@ddt.unpack
@count_queries
def test_create_thread(self, mock_request):
self.create_thread_helper(mock_request)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 3, 3, 37),
(ModuleStoreEnum.Type.split, 3, 10, 37),
)
@ddt.unpack
@count_queries
def test_update_thread(self, mock_request):
self.update_thread_helper(mock_request)
@ddt.ddt
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
class ViewsTestCase(
ForumsEnableMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockRequestSetupMixin,
ViewsTestCaseMixin,
MockSignalHandlerMixin
):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ViewsTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(
org='MITx', course='999',
discussion_topics={"Some Topic": {"id": "some_topic"}},
display_name='Robot Super Course',
)
@classmethod
def setUpTestData(cls):
super(ViewsTestCase, cls).setUpTestData()
cls.course_id = cls.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', six.text_type(cls.course_id))
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp()
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
self.password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, self.password)
self.student.is_active = True
self.student.save()
# Add a discussion moderator
self.moderator = UserFactory.create(password=self.password)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
# Enroll the moderator and give them the appropriate roles
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
assert self.client.login(username='student', password=self.password)
@contextmanager
def assert_discussion_signals(self, signal, user=None):
if user is None:
user = self.student
with self.assert_signal_sent(views, signal, sender=None, user=user, exclude_args=('post',)):
yield
def test_create_thread(self, mock_request):
with self.assert_discussion_signals('thread_created'):
self.create_thread_helper(mock_request)
def test_create_thread_standalone(self, mock_request):
team = CourseTeamFactory.create(
name="A Team",
course_id=self.course_id,
topic_id='topic_id',
discussion_topic_id="i4x-MITx-999-course-Robot_Super_Course"
)
# Add the student to the team so they can post to the commentable.
team.add_user(self.student)
# create_thread_helper verifies that extra data are passed through to the comments service
self.create_thread_helper(mock_request, extra_response_data={'context': ThreadContext.STANDALONE})
@ddt.data(
('follow_thread', 'thread_followed'),
('unfollow_thread', 'thread_unfollowed'),
)
@ddt.unpack
def test_follow_unfollow_thread_signals(self, view_name, signal, mock_request):
self.create_thread_helper(mock_request)
with self.assert_discussion_signals(signal):
response = self.client.post(
reverse(
view_name,
kwargs={"course_id": six.text_type(self.course_id), "thread_id": 'i4x-MITx-999-course-Robot_Super_Course'}
)
)
self.assertEqual(response.status_code, 200)
def test_delete_thread(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_thread_id = "test_thread_id"
request = RequestFactory().post("dummy_url", {"id": test_thread_id})
request.user = self.student
request.view_name = "delete_thread"
with self.assert_discussion_signals('thread_deleted'):
response = views.delete_thread(
request,
course_id=six.text_type(self.course.id),
thread_id=test_thread_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
with self.assert_discussion_signals('comment_deleted'):
response = views.delete_comment(
request,
course_id=six.text_type(self.course.id),
comment_id=test_comment_id
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": six.text_type(self.course_id)},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_course_topic(self, mock_request):
with self.assert_discussion_signals('thread_edited'):
self.update_thread_helper(mock_request)
@patch(
'lms.djangoapps.discussion.django_comment_client.utils.get_discussion_categories_ids',
return_value=["test_commentable"],
)
def test_update_thread_wrong_commentable_id(self, mock_get_discussion_id_map, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": "foo", "title": "foo", "commentable_id": "wrong_commentable"},
mock_request
)
def test_create_comment(self, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals('comment_created'):
response = self.client.post(
reverse(
"create_comment",
kwargs={"course_id": six.text_type(self.course_id), "thread_id": "dummy"}
),
data={"body": "body"}
)
self.assertEqual(response.status_code, 200)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": six.text_type(self.course_id)},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": six.text_type(self.course_id)},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": six.text_type(self.course_id)},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
with self.assert_discussion_signals('comment_edited'):
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": six.text_type(self.course_id), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread_open(self, mock_request):
self.flag_thread(mock_request, False)
def test_flag_thread_close(self, mock_request):
self.flag_thread(mock_request, True)
def flag_thread(self, mock_request, is_closed):
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1", "username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': six.text_type(self.course_id)
})
response = self.client.post(url)
assert mock_request.called
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY, 'with_responses': False},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY, 'with_responses': False},
'headers': ANY,
'timeout': 5
}
)
]
assert mock_request.call_args_list == call_list
assert response.status_code == 200
def test_un_flag_thread_open(self, mock_request):
self.un_flag_thread(mock_request, False)
def test_un_flag_thread_close(self, mock_request):
self.un_flag_thread(mock_request, True)
def un_flag_thread(self, mock_request, is_closed):
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={
'thread_id': '518d4237b023791dca00000d',
'course_id': six.text_type(self.course_id)
})
response = self.client.post(url)
assert mock_request.called
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY, 'with_responses': False},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY, 'with_responses': False},
'headers': ANY,
'timeout': 5
}
)
]
assert mock_request.call_args_list == call_list
assert response.status_code == 200
def test_flag_comment_open(self, mock_request):
self.flag_comment(mock_request, False)
def test_flag_comment_close(self, mock_request):
self.flag_comment(mock_request, True)
def flag_comment(self, mock_request, is_closed):
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': six.text_type(self.course_id)
})
response = self.client.post(url)
assert mock_request.called
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert mock_request.call_args_list == call_list
assert response.status_code == 200
def test_un_flag_comment_open(self, mock_request):
self.un_flag_comment(mock_request, False)
def test_un_flag_comment_close(self, mock_request):
self.un_flag_comment(mock_request, True)
def un_flag_comment(self, mock_request, is_closed):
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": is_closed,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={
'comment_id': '518d4237b023791dca00000d',
'course_id': six.text_type(self.course_id)
})
response = self.client.post(url)
assert mock_request.called
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert mock_request.call_args_list == call_list
assert response.status_code == 200
@ddt.data(
('upvote_thread', 'thread_id', 'thread_voted'),
('upvote_comment', 'comment_id', 'comment_voted'),
('downvote_thread', 'thread_id', 'thread_voted'),
('downvote_comment', 'comment_id', 'comment_voted')
)
@ddt.unpack
def test_voting(self, view_name, item_id, signal, mock_request):
self._setup_mock_request(mock_request)
with self.assert_discussion_signals(signal):
response = self.client.post(
reverse(
view_name,
kwargs={item_id: 'dummy', 'course_id': six.text_type(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
def test_endorse_comment(self, mock_request):
self._setup_mock_request(mock_request)
self.client.login(username=self.moderator.username, password=self.password)
with self.assert_discussion_signals('comment_endorsed', user=self.moderator):
response = self.client.post(
reverse(
'endorse_comment',
kwargs={'comment_id': 'dummy', 'course_id': six.text_type(self.course_id)}
)
)
self.assertEqual(response.status_code, 200)
@patch("openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request", autospec=True)
@disable_signal(views, 'comment_endorsed')
class ViewPermissionsTestCase(ForumsEnableMixin, UrlResetMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ViewPermissionsTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ViewPermissionsTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.password = "test password"
cls.student = UserFactory.create(password=cls.password)
cls.moderator = UserFactory.create(password=cls.password)
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
CourseEnrollmentFactory(user=cls.moderator, course_id=cls.course.id)
cls.moderator.roles.add(Role.objects.get(name="Moderator", course_id=cls.course.id))
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": six.text_type(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": six.text_type(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": six.text_type(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": six.text_type(self.course.id), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_response_mock(thread_data)
elif "/comments/" in url:
return self._create_response_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": six.text_type(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": six.text_type(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": six.text_type(self.course.id), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
class CreateThreadUnicodeTestCase(
ForumsEnableMixin,
SharedModuleStoreTestCase,
UnicodeTestMixin,
MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateThreadUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request,):
"""
Test to make sure unicode data in a thread doesn't break it.
"""
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(
# The commentable ID contains a username, the Unicode char below ensures it works fine
request, course_id=six.text_type(self.course.id), commentable_id=u"non_tåem_dummy_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@disable_signal(views, 'thread_edited')
class UpdateThreadUnicodeTestCase(
ForumsEnableMixin,
SharedModuleStoreTestCase,
UnicodeTestMixin,
MockRequestSetupMixin
):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UpdateThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UpdateThreadUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch(
'lms.djangoapps.discussion.django_comment_client.utils.get_discussion_categories_ids',
return_value=["test_commentable"],
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request, mock_get_discussion_id_map):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text, "thread_type": "question", "commentable_id": "test_commentable"})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=six.text_type(self.course.id), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
self.assertEqual(mock_request.call_args[1]["data"]["thread_type"], "question")
self.assertEqual(mock_request.call_args[1]["data"]["commentable_id"], "test_commentable")
@disable_signal(views, 'comment_created')
class CreateCommentUnicodeTestCase(
ForumsEnableMixin,
SharedModuleStoreTestCase,
UnicodeTestMixin,
MockRequestSetupMixin
):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
commentable_id = "non_team_dummy_id"
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": commentable_id
})
# We have to get clever here due to Thread's setters and getters.
# Patch won't work with it.
try:
Thread.commentable_id = commentable_id
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(
request, course_id=six.text_type(self.course.id), thread_id="dummy_thread_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@disable_signal(views, 'comment_edited')
class UpdateCommentUnicodeTestCase(
ForumsEnableMixin,
SharedModuleStoreTestCase,
UnicodeTestMixin,
MockRequestSetupMixin
):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UpdateCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UpdateCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=six.text_type(self.course.id), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@disable_signal(views, 'comment_created')
class CreateSubCommentUnicodeTestCase(
ForumsEnableMixin,
SharedModuleStoreTestCase,
UnicodeTestMixin,
MockRequestSetupMixin
):
"""
Make sure comments under a response can handle unicode.
"""
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(CreateSubCommentUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(CreateSubCommentUnicodeTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
"""
Create a comment with unicode in it.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread",
"commentable_id": "non_team_dummy_id"
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
Thread.commentable_id = "test_commentable"
try:
response = views.create_sub_comment(
request, course_id=six.text_type(self.course.id), comment_id="dummy_comment_id"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
finally:
del Thread.commentable_id
@ddt.ddt
@patch("openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request", autospec=True)
@disable_signal(views, 'thread_voted')
@disable_signal(views, 'thread_edited')
@disable_signal(views, 'comment_created')
@disable_signal(views, 'comment_voted')
@disable_signal(views, 'comment_deleted')
class TeamsPermissionsTestCase(ForumsEnableMixin, UrlResetMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
# Most of the test points use the same ddt data.
# args: user, commentable_id, status_code
ddt_permissions_args = [
# Student in team can do operations on threads/comments within the team commentable.
('student_in_team', 'team_commentable_id', 200),
# Non-team commentables can be edited by any student.
('student_in_team', 'course_commentable_id', 200),
# Student not in team cannot do operations within the team commentable.
('student_not_in_team', 'team_commentable_id', 401),
# Non-team commentables can be edited by any student.
('student_not_in_team', 'course_commentable_id', 200),
# Moderators can always operator on threads within a team, regardless of team membership.
('moderator', 'team_commentable_id', 200),
# Group moderators have regular student privileges for creating a thread and commenting
('group_moderator', 'course_commentable_id', 200)
]
def change_divided_discussion_settings(self, scheme):
"""
Change divided discussion settings for the current course.
If dividing by cohorts, create and assign users to a cohort.
"""
enable_cohorts = True if scheme is CourseDiscussionSettings.COHORT else False
set_course_discussion_settings(
self.course.id,
enable_cohorts=enable_cohorts,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=scheme,
)
set_course_cohorted(self.course.id, enable_cohorts)
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(TeamsPermissionsTestCase, cls).setUpClassAndTestData():
teams_configuration = {
'topics': [{'id': "topic_id", 'name': 'Solar Power', 'description': 'Solar power is hot'}]
}
cls.course = CourseFactory.create(teams_configuration=teams_configuration)
@classmethod
def setUpTestData(cls):
super(TeamsPermissionsTestCase, cls).setUpTestData()
cls.course = CourseFactory.create()
cls.password = "test password"
seed_permissions_roles(cls.course.id)
# Create enrollment tracks
CourseModeFactory.create(
course_id=cls.course.id,
mode_slug=CourseMode.VERIFIED
)
CourseModeFactory.create(
course_id=cls.course.id,
mode_slug=CourseMode.AUDIT
)
# Create 6 users--
# student in team (in the team, audit)
# student not in team (not in the team, audit)
# cohorted (in the cohort, audit)
# verified (not in the cohort, verified)
# moderator (in the cohort, audit, moderator permissions)
# group moderator (in the cohort, verified, group moderator permissions)
def create_users_and_enroll(coursemode):
student = UserFactory.create(password=cls.password)
CourseEnrollmentFactory(
course_id=cls.course.id,
user=student,
mode=coursemode
)
return student
cls.student_in_team, cls.student_not_in_team, cls.moderator, cls.cohorted = (
[create_users_and_enroll(CourseMode.AUDIT) for _ in range(4)])
cls.verified, cls.group_moderator = [create_users_and_enroll(CourseMode.VERIFIED) for _ in range(2)]
# Give moderator and group moderator permissions
cls.moderator.roles.add(Role.objects.get(name="Moderator", course_id=cls.course.id))
assign_role(cls.course.id, cls.group_moderator, 'Group Moderator')
# Create a team
cls.team_commentable_id = "team_discussion_id"
cls.team = CourseTeamFactory.create(
name=u'The Only Team',
course_id=cls.course.id,
topic_id='topic_id',
discussion_topic_id=cls.team_commentable_id
)
CourseTeamMembershipFactory.create(team=cls.team, user=cls.student_in_team)
# Dummy commentable ID not linked to a team
cls.course_commentable_id = "course_level_commentable"
# Create cohort and add students to it
CohortFactory(
course_id=cls.course.id,
name='Test Cohort',
users=[cls.group_moderator, cls.cohorted]
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(TeamsPermissionsTestCase, self).setUp()
def _setup_mock(self, user, mock_request, data):
user = getattr(self, user)
self._set_mock_request_data(mock_request, data)
self.client.login(username=user.username, password=self.password)
@ddt.data(
# student_in_team will be able to update his own post, regardless of team membership
('student_in_team', 'student_in_team', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
('student_in_team', 'student_in_team', 'course_commentable_id', 200, CourseDiscussionSettings.NONE),
# students can only update their own posts
('student_in_team', 'moderator', 'team_commentable_id', 401, CourseDiscussionSettings.NONE),
# Even though student_not_in_team is not in the team, he can still modify posts he created while in the team.
('student_not_in_team', 'student_not_in_team', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
# Moderators can change their own posts and other people's posts.
('moderator', 'moderator', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
('moderator', 'student_in_team', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
# Group moderator can do operations on commentables within their group if the course is divided
('group_moderator', 'verified', 'course_commentable_id', 200, CourseDiscussionSettings.ENROLLMENT_TRACK),
('group_moderator', 'cohorted', 'course_commentable_id', 200, CourseDiscussionSettings.COHORT),
# Group moderators cannot do operations on commentables outside of their group
('group_moderator', 'verified', 'course_commentable_id', 401, CourseDiscussionSettings.COHORT),
('group_moderator', 'cohorted', 'course_commentable_id', 401, CourseDiscussionSettings.ENROLLMENT_TRACK),
# Group moderators cannot do operations when the course is not divided
('group_moderator', 'verified', 'course_commentable_id', 401, CourseDiscussionSettings.NONE),
('group_moderator', 'cohorted', 'course_commentable_id', 401, CourseDiscussionSettings.NONE)
)
@ddt.unpack
def test_update_thread(self, user, thread_author, commentable_id, status_code, division_scheme, mock_request):
"""
Verify that update_thread is limited to thread authors and privileged users (team membership does not matter).
"""
self.change_divided_discussion_settings(division_scheme)
commentable_id = getattr(self, commentable_id)
# thread_author is who is marked as the author of the thread being updated.
thread_author = getattr(self, thread_author)
self._setup_mock(
user, mock_request, # user is the person making the request.
{
"user_id": str(thread_author.id),
"closed": False, "commentable_id": commentable_id,
"context": "standalone",
"username": thread_author.username,
"course_id": six.text_type(self.course.id)
}
)
response = self.client.post(
reverse(
"update_thread",
kwargs={
"course_id": six.text_type(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo", "commentable_id": commentable_id}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(
# Students can delete their own posts
('student_in_team', 'student_in_team', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
# Moderators can delete any post
('moderator', 'student_in_team', 'team_commentable_id', 200, CourseDiscussionSettings.NONE),
# Others cannot delete posts
('student_in_team', 'moderator', 'team_commentable_id', 401, CourseDiscussionSettings.NONE),
('student_not_in_team', 'student_in_team', 'team_commentable_id', 401, CourseDiscussionSettings.NONE),
# Group moderator can do operations on commentables within their group if the course is divided
('group_moderator', 'verified', 'team_commentable_id', 200, CourseDiscussionSettings.ENROLLMENT_TRACK),
('group_moderator', 'cohorted', 'team_commentable_id', 200, CourseDiscussionSettings.COHORT),
# Group moderators cannot do operations on commentables outside of their group
('group_moderator', 'verified', 'team_commentable_id', 401, CourseDiscussionSettings.COHORT),
('group_moderator', 'cohorted', 'team_commentable_id', 401, CourseDiscussionSettings.ENROLLMENT_TRACK),
# Group moderators cannot do operations when the course is not divided
('group_moderator', 'verified', 'team_commentable_id', 401, CourseDiscussionSettings.NONE),
('group_moderator', 'cohorted', 'team_commentable_id', 401, CourseDiscussionSettings.NONE)
)
@ddt.unpack
def test_delete_comment(self, user, comment_author, commentable_id, status_code, division_scheme, mock_request):
commentable_id = getattr(self, commentable_id)
comment_author = getattr(self, comment_author)
self.change_divided_discussion_settings(division_scheme)
self._setup_mock(user, mock_request, {
"closed": False,
"commentable_id": commentable_id,
"user_id": str(comment_author.id),
"username": comment_author.username,
"course_id": six.text_type(self.course.id)
})
response = self.client.post(
reverse(
"delete_comment",
kwargs={
"course_id": six.text_type(self.course.id),
"comment_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_comment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(user, mock_request, {"closed": False, "commentable_id": commentable_id})
response = self.client.post(
reverse(
"create_comment",
kwargs={
"course_id": six.text_type(self.course.id),
"thread_id": "dummy"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_create_sub_comment(self, user, commentable_id, status_code, mock_request):
"""
Verify that create_subcomment is limited to members of the team or users with 'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
response = self.client.post(
reverse(
"create_sub_comment",
kwargs={
"course_id": six.text_type(self.course.id),
"comment_id": "dummy_comment"
}
),
data={"body": "foo", "title": "foo"}
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_comment_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting and flagging of comments is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id, "thread_id": "dummy_thread"},
)
for action in ["upvote_comment", "downvote_comment", "un_flag_abuse_for_comment", "flag_abuse_for_comment"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": six.text_type(self.course.id), "comment_id": "dummy_comment"}
)
)
self.assertEqual(response.status_code, status_code)
@ddt.data(*ddt_permissions_args)
@ddt.unpack
def test_threads_actions(self, user, commentable_id, status_code, mock_request):
"""
Verify that voting, flagging, and following of threads is limited to members of the team or users with
'edit_content' permission.
"""
commentable_id = getattr(self, commentable_id)
self._setup_mock(
user, mock_request,
{"closed": False, "commentable_id": commentable_id},
)
for action in ["upvote_thread", "downvote_thread", "un_flag_abuse_for_thread", "flag_abuse_for_thread",
"follow_thread", "unfollow_thread"]:
response = self.client.post(
reverse(
action,
kwargs={"course_id": six.text_type(self.course.id), "thread_id": "dummy_thread"}
)
)
self.assertEqual(response.status_code, status_code)
TEAM_COMMENTABLE_ID = 'test-team-discussion'
@disable_signal(views, 'comment_created')
@ddt.ddt
class ForumEventTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
"""
Forum actions are expected to launch analytics events. Test these here.
"""
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumEventTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumEventTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
cls.student.roles.add(Role.objects.get(name="Student", course_id=cls.course.id))
CourseAccessRoleFactory(course_id=cls.course.id, user=cls.student, role='Wizard')
@patch('eventtracking.tracker.emit')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_response_event(self, mock_request, mock_emit):
"""
Check to make sure an event is fired when a user responds to a thread.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"commentable_id": 'test_commentable_id',
'thread_id': 'test_thread_id',
})
request = RequestFactory().post("dummy_url", {"body": "Test comment", 'auto_subscribe': True})
request.user = self.student
request.view_name = "create_comment"
views.create_comment(request, course_id=six.text_type(self.course.id), thread_id='test_thread_id')
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.response.created')
self.assertEqual(event['body'], "Test comment")
self.assertEqual(event['commentable_id'], 'test_commentable_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['options']['followed'], True)
@patch('eventtracking.tracker.emit')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_comment_event(self, mock_request, mock_emit):
"""
Ensure an event is fired when someone comments on a response.
"""
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
"thread_id": "test_thread_id",
"commentable_id": "test_commentable_id",
"parent_id": "test_response_id"
})
request = RequestFactory().post("dummy_url", {"body": "Another comment"})
request.user = self.student
request.view_name = "create_sub_comment"
views.create_sub_comment(request, course_id=six.text_type(self.course.id), comment_id="dummy_comment_id")
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.comment.created")
self.assertEqual(event['body'], 'Another comment')
self.assertEqual(event['discussion']['id'], 'test_thread_id')
self.assertEqual(event['response']['id'], 'test_response_id')
self.assertEqual(event['user_forums_roles'], ['Student'])
self.assertEqual(event['user_course_roles'], ['Wizard'])
self.assertEqual(event['options']['followed'], False)
@patch('eventtracking.tracker.emit')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
@ddt.data((
'create_thread',
'edx.forum.thread.created', {
'thread_type': 'discussion',
'body': 'Test text',
'title': 'Test',
'auto_subscribe': True
},
{'commentable_id': TEAM_COMMENTABLE_ID}
), (
'create_comment',
'edx.forum.response.created',
{'body': 'Test comment', 'auto_subscribe': True},
{'thread_id': 'test_thread_id'}
), (
'create_sub_comment',
'edx.forum.comment.created',
{'body': 'Another comment'},
{'comment_id': 'dummy_comment_id'}
))
@ddt.unpack
def test_team_events(self, view_name, event_name, view_data, view_kwargs, mock_request, mock_emit):
user = self.student
team = CourseTeamFactory.create(discussion_topic_id=TEAM_COMMENTABLE_ID)
CourseTeamMembershipFactory.create(team=team, user=user)
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': TEAM_COMMENTABLE_ID,
'thread_id': 'test_thread_id',
})
request = RequestFactory().post('dummy_url', view_data)
request.user = user
request.view_name = view_name
getattr(views, view_name)(request, course_id=six.text_type(self.course.id), **view_kwargs)
name, event = mock_emit.call_args[0]
self.assertEqual(name, event_name)
self.assertEqual(event['team_id'], team.team_id)
@ddt.data(
('vote_for_thread', 'thread_id', 'thread'),
('undo_vote_for_thread', 'thread_id', 'thread'),
('vote_for_comment', 'comment_id', 'response'),
('undo_vote_for_comment', 'comment_id', 'response'),
)
@ddt.unpack
@patch('eventtracking.tracker.emit')
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_thread_voted_event(self, view_name, obj_id_name, obj_type, mock_request, mock_emit):
undo = view_name.startswith('undo')
self._set_mock_request_data(mock_request, {
'closed': False,
'commentable_id': 'test_commentable_id',
'username': 'gumprecht',
})
request = RequestFactory().post('dummy_url', {})
request.user = self.student
request.view_name = view_name
view_function = getattr(views, view_name)
kwargs = dict(course_id=six.text_type(self.course.id))
kwargs[obj_id_name] = obj_id_name
if not undo:
kwargs.update(value='up')
view_function(request, **kwargs)
self.assertTrue(mock_emit.called)
event_name, event = mock_emit.call_args[0]
self.assertEqual(event_name, 'edx.forum.{}.voted'.format(obj_type))
self.assertEqual(event['target_username'], 'gumprecht')
self.assertEqual(event['undo_vote'], undo)
self.assertEqual(event['vote_value'], 'up')
class UsersEndpointTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, MockRequestSetupMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UsersEndpointTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UsersEndpointTestCase, cls).setUpTestData()
seed_permissions_roles(cls.course.id)
cls.student = UserFactory.create()
cls.enrollment = CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
cls.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=cls.other_user, course_id=cls.course.id)
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=text_type(course_id))
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content.decode('utf-8'))["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf-8'))["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = CourseKey.from_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content.decode('utf-8'))
self.assertIn("errors", content)
self.assertNotIn("users", content)
@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf-8'))["users"], [])
@ddt.ddt
class SegmentIOForumThreadViewedEventTestCase(SegmentIOTrackingTestCaseBase):
def _raise_navigation_event(self, label, include_name):
middleware = TrackMiddleware()
kwargs = {'label': label}
if include_name:
kwargs['name'] = 'edx.bi.app.navigation.screen'
else:
kwargs['exclude_name'] = True
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json',
)
User.objects.create(pk=SEGMENTIO_TEST_USER_ID, username=str(mock.sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEqual(response.status_code, 200)
finally:
middleware.process_response(request, None)
@ddt.data(True, False)
def test_thread_viewed(self, include_name):
"""
Tests that a SegmentIO thread viewed event is accepted and transformed.
Only tests that the transformation happens at all; does not
comprehensively test that it happens correctly.
ForumThreadViewedEventTransformerTestCase tests for correctness.
"""
self._raise_navigation_event('Forum: View Thread', include_name)
event = self.get_event()
self.assertEqual(event['name'], 'edx.forum.thread.viewed')
self.assertEqual(event['event_type'], event['name'])
@ddt.data(True, False)
def test_non_thread_viewed(self, include_name):
"""
Tests that other BI events are thrown out.
"""
self._raise_navigation_event('Forum: Create Thread', include_name)
self.assert_no_events_emitted()
def _get_transformed_event(input_event):
transformer = ForumThreadViewedEventTransformer(**input_event)
transformer.transform()
return transformer
def _create_event(
label='Forum: View Thread',
include_context=True,
inner_context=None,
username=None,
course_id=None,
**event_data
):
result = {'name': 'edx.bi.app.navigation.screen'}
if include_context:
result['context'] = {'label': label}
if course_id:
result['context']['course_id'] = str(course_id)
if username:
result['username'] = username
if event_data:
result['event'] = event_data
if inner_context:
if not event_data:
result['event'] = {}
result['event']['context'] = inner_context
return result
def _create_and_transform_event(**kwargs):
event = _create_event(**kwargs)
return event, _get_transformed_event(event)
@ddt.ddt
class ForumThreadViewedEventTransformerTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):
"""
Test that the ForumThreadViewedEventTransformer transforms events correctly
and without raising exceptions.
Because the events passed through the transformer can come from external
sources (e.g., a mobile app), we carefully test a myriad of cases, including
those with incomplete and malformed events.
"""
CATEGORY_ID = 'i4x-edx-discussion-id'
CATEGORY_NAME = 'Discussion 1'
PARENT_CATEGORY_NAME = 'Chapter 1'
TEAM_CATEGORY_ID = 'i4x-edx-team-discussion-id'
TEAM_CATEGORY_NAME = 'Team Chat'
TEAM_PARENT_CATEGORY_NAME = PARENT_CATEGORY_NAME
DUMMY_CATEGORY_ID = 'i4x-edx-dummy-commentable-id'
DUMMY_THREAD_ID = 'dummy_thread_id'
@mock.patch.dict("student.models.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumThreadViewedEventTransformerTestCase, self).setUp()
self.courses_by_store = {
ModuleStoreEnum.Type.mongo: CourseFactory.create(
org='TestX',
course='TR-101',
run='Event_Transform_Test',
default_store=ModuleStoreEnum.Type.mongo,
),
ModuleStoreEnum.Type.split: CourseFactory.create(
org='TestX',
course='TR-101S',
run='Event_Transform_Test_Split',
default_store=ModuleStoreEnum.Type.split,
),
}
self.course = self.courses_by_store['mongo']
self.student = UserFactory.create()
self.staff = UserFactory.create(is_staff=True)
UserBasedRole(user=self.staff, role=CourseStaffRole.ROLE).add_course(self.course.id)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.category = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id=self.CATEGORY_ID,
discussion_category=self.PARENT_CATEGORY_NAME,
discussion_target=self.CATEGORY_NAME,
)
self.team_category = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id=self.TEAM_CATEGORY_ID,
discussion_category=self.TEAM_PARENT_CATEGORY_NAME,
discussion_target=self.TEAM_CATEGORY_NAME,
)
self.team = CourseTeamFactory.create(
name='Team 1',
course_id=self.course.id,
topic_id='arbitrary-topic-id',
discussion_topic_id=self.team_category.discussion_id,
)
def test_missing_context(self):
event = _create_event(include_context=False)
with self.assertRaises(EventEmissionExit):
_get_transformed_event(event)
def test_no_data(self):
event, event_trans = _create_and_transform_event()
event['name'] = 'edx.forum.thread.viewed'
event['event_type'] = event['name']
event['event'] = {}
self.assertDictEqual(event_trans, event)
def test_inner_context(self):
_, event_trans = _create_and_transform_event(inner_context={})
self.assertNotIn('context', event_trans['event'])
def test_non_thread_view(self):
event = _create_event(
label='Forum: Create Thread',
course_id=self.course.id,
topic_id=self.DUMMY_CATEGORY_ID,
thread_id=self.DUMMY_THREAD_ID,
)
with self.assertRaises(EventEmissionExit):
_get_transformed_event(event)
def test_bad_field_types(self):
event, event_trans = _create_and_transform_event(
course_id={},
topic_id=3,
thread_id=object(),
action=3.14,
)
event['name'] = 'edx.forum.thread.viewed'
event['event_type'] = event['name']
self.assertDictEqual(event_trans, event)
def test_bad_course_id(self):
event, event_trans = _create_and_transform_event(course_id='non-existent-course-id')
event_data = event_trans['event']
self.assertNotIn('category_id', event_data)
self.assertNotIn('category_name', event_data)
self.assertNotIn('url', event_data)
self.assertNotIn('user_forums_roles', event_data)
self.assertNotIn('user_course_roles', event_data)
def test_bad_username(self):
event, event_trans = _create_and_transform_event(username='non-existent-username')
event_data = event_trans['event']
self.assertNotIn('category_id', event_data)
self.assertNotIn('category_name', event_data)
self.assertNotIn('user_forums_roles', event_data)
self.assertNotIn('user_course_roles', event_data)
def test_bad_url(self):
event, event_trans = _create_and_transform_event(
course_id=self.course.id,
topic_id='malformed/commentable/id',
thread_id='malformed/thread/id',
)
self.assertNotIn('url', event_trans['event'])
def test_renamed_fields(self):
AUTHOR = 'joe-the-plumber'
event, event_trans = _create_and_transform_event(
course_id=self.course.id,
topic_id=self.DUMMY_CATEGORY_ID,
thread_id=self.DUMMY_THREAD_ID,
author=AUTHOR,
)
self.assertEqual(event_trans['event']['commentable_id'], self.DUMMY_CATEGORY_ID)
self.assertEqual(event_trans['event']['id'], self.DUMMY_THREAD_ID)
self.assertEqual(event_trans['event']['target_username'], AUTHOR)
def test_titles(self):
# No title
_, event_1_trans = _create_and_transform_event()
self.assertNotIn('title', event_1_trans['event'])
self.assertNotIn('title_truncated', event_1_trans['event'])
# Short title
_, event_2_trans = _create_and_transform_event(
action='!',
)
self.assertIn('title', event_2_trans['event'])
self.assertIn('title_truncated', event_2_trans['event'])
self.assertFalse(event_2_trans['event']['title_truncated'])
# Long title
_, event_3_trans = _create_and_transform_event(
action=('covfefe' * 200),
)
self.assertIn('title', event_3_trans['event'])
self.assertIn('title_truncated', event_3_trans['event'])
self.assertTrue(event_3_trans['event']['title_truncated'])
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_urls(self, store):
course = self.courses_by_store[store]
commentable_id = self.DUMMY_CATEGORY_ID
thread_id = self.DUMMY_THREAD_ID
_, event_trans = _create_and_transform_event(
course_id=course.id,
topic_id=commentable_id,
thread_id=thread_id,
)
expected_path = '/courses/{0}/discussion/forum/{1}/threads/{2}'.format(
course.id, commentable_id, thread_id
)
self.assertTrue(event_trans['event'].get('url').endswith(expected_path))
def test_categories(self):
# Bad category
_, event_trans_1 = _create_and_transform_event(
username=self.student.username,
course_id=self.course.id,
topic_id='non-existent-category-id',
)
self.assertNotIn('category_id', event_trans_1['event'])
self.assertNotIn('category_name', event_trans_1['event'])
# Good category
_, event_trans_2 = _create_and_transform_event(
username=self.student.username,
course_id=self.course.id,
topic_id=self.category.discussion_id,
)
self.assertEqual(event_trans_2['event'].get('category_id'), self.category.discussion_id)
full_category_name = u'{0} / {1}'.format(self.category.discussion_category, self.category.discussion_target)
self.assertEqual(event_trans_2['event'].get('category_name'), full_category_name)
def test_roles(self):
# No user
_, event_trans_1 = _create_and_transform_event(
course_id=self.course.id,
)
self.assertNotIn('user_forums_roles', event_trans_1['event'])
self.assertNotIn('user_course_roles', event_trans_1['event'])
# Student user
_, event_trans_2 = _create_and_transform_event(
course_id=self.course.id,
username=self.student.username,
)
self.assertEqual(event_trans_2['event'].get('user_forums_roles'), [FORUM_ROLE_STUDENT])
self.assertEqual(event_trans_2['event'].get('user_course_roles'), [])
# Course staff user
_, event_trans_3 = _create_and_transform_event(
course_id=self.course.id,
username=self.staff.username,
)
self.assertEqual(event_trans_3['event'].get('user_forums_roles'), [])
self.assertEqual(event_trans_3['event'].get('user_course_roles'), [CourseStaffRole.ROLE])
def test_teams(self):
# No category
_, event_trans_1 = _create_and_transform_event(
course_id=self.course.id,
)
self.assertNotIn('team_id', event_trans_1)
# Non-team category
_, event_trans_2 = _create_and_transform_event(
course_id=self.course.id,
topic_id=self.CATEGORY_ID,
)
self.assertNotIn('team_id', event_trans_2)
# Team category
_, event_trans_3 = _create_and_transform_event(
course_id=self.course.id,
topic_id=self.TEAM_CATEGORY_ID,
)
self.assertEqual(event_trans_3['event'].get('team_id'), self.team.team_id)
| agpl-3.0 | 6,711,102,984,654,132,000 | 38.870216 | 148 | 0.599713 | false |
zengchunyun/s12 | day10/ext/temp/echoclient.py | 1 | 1150 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
from twisted.internet import reactor, protocol
# a client protocol
class EchoClient(protocol.Protocol):
"""Once connected, send a message, then print the result."""
def connectionMade(self):
self.transport.write(bytes("hello alex!", "utf8"))
def dataReceived(self, data):
"As soon as any data is received, write it back."
print("Server said:", data)
self.transport.loseConnection()
def connectionLost(self, reason):
print("connection lost")
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def clientConnectionFailed(self, connector, reason):
print("Connection failed - goodbye!")
reactor.stop()
def clientConnectionLost(self, connector, reason):
print("Connection lost - goodbye!")
reactor.stop()
# this connects the protocol to a server running on port 8000
def main():
f = EchoFactory()
reactor.connectTCP("localhost", 1234, f)
reactor.run()
# this only runs if the module was *not* imported
if __name__ == '__main__':
main()
| gpl-2.0 | -5,031,809,358,392,325,000 | 24 | 64 | 0.658261 | false |
DouFM/wang_fm | storage/music.py | 1 | 1709 | #!/usr/bin/env python
# encoding: utf-8
import mongoengine
from .base import BaseMongoStorage
from config import DB_HOST, DB_PORT, DB_NAME
mongoengine.connect(DB_NAME, host=DB_HOST, port=DB_PORT)
class MusicStorage(BaseMongoStorage, mongoengine.Document):
"""store music info
key str
title str
artist str
album str
company str
public_time str
kbps str
cover file
audio file
upload_date datetime
uuid str
"""
title = mongoengine.StringField(max_length=256, default='')
artist = mongoengine.StringField(max_length=256, default='')
album = mongoengine.StringField(max_length=256, default='')
company = mongoengine.StringField(max_length=256, default='')
public_time = mongoengine.StringField(max_length=10, default='')
kbps = mongoengine.StringField(max_length=5, default='')
cover = mongoengine.FileField()
audio = mongoengine.FileField()
upload_date = mongoengine.DateTimeField()
uuid = mongoengine.StringField(unique=True)
meta = {
'ordering': ['-upload_date']
}
def __str__(self):
return 'title=%s, artist=%s, album=%s' % (self.title, self.artist, self.album)
def delete(self):
self.cover.delete()
self.audio.delete()
super(MusicStorage, self).delete()
def update(self, **kwargs):
cover = kwargs.pop('cover', None)
audio = kwargs.pop('audio', None)
if cover:
self.cover.replace(cover)
if audio:
self.audio.replace(audio)
self.save()
super(MusicStorage, self).update(**kwargs)
| mit | -6,727,684,040,444,985,000 | 28.982456 | 86 | 0.607958 | false |
MarcoVogt/basil | tests/test_RegisterHardwareLayer.py | 1 | 9389 | #
# ------------------------------------------------------------
# Copyright (c) All rights reserved
# SiLab, Institute of Physics, University of Bonn
# ------------------------------------------------------------
#
import unittest
from basil.dut import Dut
from basil.HL.RegisterHardwareLayer import RegisterHardwareLayer
import os
_test_init = {
'REG_TEST_INIT': 15,
'REG1': 120,
'REG_BYTE_ARRAY': [4, 3, 2, 1]
}
class test_RegisterHardwareLayer(RegisterHardwareLayer):
'''Register Hardware Layer.
Implementation of advanced register operations.
'''
_registers = {
'REG1': {'default': 12, 'descr': {'addr': 0, 'size': 15, 'offset': 0}},
'REG2': {'default': 1, 'descr': {'addr': 1, 'size': 1, 'offset': 7}},
'REG3': {'default': 2 ** 16 - 1, 'descr': {'addr': 2, 'size': 16, 'offset': 0}},
'REG4_RO': {'default': 0, 'descr': {'addr': 4, 'size': 8, 'properties': ['readonly']}},
'REG5_WO': {'default': 0, 'descr': {'addr': 5, 'size': 8, 'properties': ['writeonly']}},
'REG_TEST_INIT': {'descr': {'addr': 6, 'size': 8}},
'REG_BYTE_ARRAY': {'default': [1, 2, 3, 4], 'descr': {'addr': 16, 'size': 4, 'properties': ['bytearray']}}
}
class TestRegisterHardwareLayer(unittest.TestCase):
def setUp(self):
self.dut = Dut(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer.yaml'))
self.dut.init()
def test_init_non_existing(self):
with self.assertRaises(KeyError):
self.dut.init({"test_register": {"NON_EXISTING": 1}})
def test_lazy_programming(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO = 255
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 255, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO # get value from write-only register, but this will write zero instead
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_get_configuration(self):
self.dut.set_configuration(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer_configuration.yaml'))
conf = self.dut['test_register'].get_configuration()
self.assertDictEqual({'REG1': 257, 'REG2': 1, 'REG3': 2, 'REG_TEST_INIT': 0, 'REG_BYTE_ARRAY': [1, 2, 3, 4]}, conf)
def test_set_configuration(self):
self.dut.set_configuration(os.path.join(os.path.dirname(__file__), 'test_RegisterHardwareLayer_configuration.yaml'))
self.assertDictEqual({0: 1, 1: 129, 2: 2, 3: 0, 5: 5, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_configuration_non_existing(self):
with self.assertRaises(KeyError):
self.dut.set_configuration({"test_register": {"NON_EXISTING": 1}})
def test_read_only(self):
self.assertRaises(IOError, self.dut['test_register']._set, 'REG4_RO', value=0)
# def test_write_only(self):
# self.assertRaises(IOError, self.dut['test_register']._get, 'REG5_WO')
def test_write_only_lazy_programming(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO = 20
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 20, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG5_WO
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.assertIs(None, self.dut['test_register']._get('REG5_WO'))
def test_set_default(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_attribute_add(self):
val = self.dut['test_register']._registers['REG1']['default']
self.dut['test_register'].REG1 = val # 12
mem = self.dut['dummy_tl'].mem.copy()
self.dut['test_register'].REG1 += 1 # 13
mem[0] = 13
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_write_read_reg(self):
for reg in ['REG1', 'REG2', 'REG3']:
val = self.dut['test_register']._registers[reg]['default']
self.dut['test_register']._set(reg, val)
ret_val = self.dut['test_register']._get(reg)
self.assertEqual(ret_val, val)
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
def test_set_attribute_by_value(self):
self.dut['test_register'].set_default()
self.assertDictEqual({0: 12, 1: 128, 2: 255, 3: 255, 5: 0, 16: 1, 17: 2, 18: 3, 19: 4}, self.dut['dummy_tl'].mem)
self.dut['test_register'].REG2 = 0
mem = self.dut['dummy_tl'].mem.copy()
mem[1] = 0
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_set_attribute_by_string(self):
mem = self.dut['dummy_tl'].mem.copy()
self.dut['test_register'].REG3 = '1010101010101010' # dfghfghdfghgfdghf
mem[2] = 170
mem[3] = 170
self.assertDictEqual(mem, self.dut['dummy_tl'].mem)
def test_get_attribute_by_string(self):
self.dut['test_register'].REG3 = '1010101010101010' # 43690
self.assertEqual(43690, self.dut['test_register'].REG3)
def test_set_attribute_too_long_string(self):
val = '11010101010101010' # 17 bit
self.assertRaises(ValueError, self.dut['test_register']._set, 'REG3', value=val)
def test_set_attribute_dict_access(self):
self.dut['test_register']['REG1'] = 27306 # 27306
self.assertEqual(27306, self.dut['test_register']['REG1'])
def test_set_attribute_too_big_val(self):
val = 2 ** 16 # max 2 ** 16 - 1
self.assertRaises(ValueError, self.dut['test_register']._set, 'REG3', value=val)
def test_set_by_function(self):
self.dut['test_register'].set_REG1(27308)
self.assertEqual(27308, self.dut['test_register']['REG1'])
def test_get_by_function(self):
self.dut['test_register']['REG1'] = 27305 # 27306
ret = self.dut['test_register'].get_REG1()
self.assertEqual(ret, self.dut['test_register']['REG1'])
def test_init_with_dict(self):
self.dut['test_register'].set_default()
self.dut.init({'test_register': _test_init})
conf = self.dut.get_configuration()
self.assertDictEqual({'test_register': {'REG1': 120, 'REG2': 1, 'REG3': 65535, 'REG_TEST_INIT': 15, 'REG_BYTE_ARRAY': [4, 3, 2, 1]}, 'dummy_tl': {}}, conf)
def test_get_dut_configuration(self):
self.dut['test_register'].set_default()
conf = self.dut.get_configuration()
self.assertDictEqual({'test_register': {'REG1': 12, 'REG2': 1, 'REG3': 65535, 'REG_TEST_INIT': 0, 'REG_BYTE_ARRAY': [1, 2, 3, 4]}, 'dummy_tl': {}}, conf)
def test_get_set_value(self):
for val in range(256):
self.dut['test_register'].set_value(val, 0, size=8, offset=0)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=0)
self.assertEqual(ret_val, val)
def test_write_read_reg_with_bit_str(self):
val = '00110110' # 54
self.dut['test_register'].set_value(val, 0, size=8, offset=0)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=0)
self.assertEqual(ret_val, int(val, base=2))
def test_write_read_reg_with_offset(self):
for offset in range(32):
val = 131
self.dut['test_register'].set_value(val, 0, size=8, offset=offset)
ret_val = self.dut['test_register'].get_value(0, size=8, offset=offset)
self.assertEqual(ret_val, val)
def test_write_read_reg_with_size(self):
for size in range(8, 33):
val = 131
self.dut['test_register'].set_value(val, 0, size=size, offset=7)
ret_val = self.dut['test_register'].get_value(0, size=size, offset=7)
self.assertEqual(ret_val, val)
def test_read_non_existing(self):
with self.assertRaises(KeyError):
self.dut['test_register'].NON_EXISTING
with self.assertRaises(KeyError):
self.dut['test_register']['NON_EXISTING']
with self.assertRaises(KeyError):
self.dut['test_register'].get_NON_EXISTING()
def test_write_non_existing(self):
with self.assertRaises(KeyError):
self.dut['test_register'].NON_EXISTING = 42
with self.assertRaises(KeyError):
self.dut['test_register']['NON_EXISTING'] = 42
with self.assertRaises(KeyError):
self.dut['test_register'].set_NON_EXISTING(42)
def test_wrong_size(self):
self.assertRaises(ValueError, self.dut['test_register'].set_value, 131, addr=0, size=7, offset=7)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -6,639,450,954,130,615,000 | 45.180905 | 163 | 0.575248 | false |
m3z/HT | openstack_dashboard/dashboards/admin/instances/tests.py | 1 | 6158 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
max_id = max([int(flavor.id) for flavor in flavors])
for server in servers:
max_id += 1
server.flavor["id"] = max_id
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndReturn(flavors)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('server_list',)})
def test_index_server_list_exception(self):
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api: ('server_get', 'flavor_get',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:index') + \
"?action=row_update&table=instances&obj_id=" + server.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
self.assertContains(res, "server_1", 1, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "512MB RAM | 1 VCPU | 0 Disk", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
| apache-2.0 | -3,204,556,978,701,150,700 | 42.985714 | 78 | 0.603118 | false |
jbalm/ActuarialCashFlowModel | liability/liability_data/Liabilities_data.py | 1 | 3997 | ## Progam packages
from ..Model_Point import Model_Point
## Python packages
import datetime as dt
from xlrd import open_workbook
import xlrd
import numpy as np
import xlwings as xw
class Liabilities_data(object):
"""
Objective:
==========
This class is meant to build up the policyholders database
Attributes:
===========
1. model_points:
Type: array
Function: collection of the model points characterized by its id.
Methods:
========
1. update:
"""
def __init__(self):
self.model_points = []
def update(self,path):
"""
Method: update
Function: updates data from an excel file named "data\Liability_Data.xls".
Parameter:
1. path:
Type: string
Function: a single directory or a file name (By default, path = 'data\Market_Environment.xls' and the excel file must be placed in the same folder as the main executed file)
"""
wb2 = open_workbook(path)
sheet = wb2.sheet_by_name("MP_test")
number_of_rows = sheet.nrows
mdp = Model_Point()
mdp.id = str(xw.sheets['MP_test'].range('B4').value)
mdp.average_age = int(xw.sheets['MP_test'].range('B5').value)
mdp.sexe = str(xw.sheets['MP_test'].range('B6').value)
# ========================================================================================
# Souscription Date
# ========================================================================================
assert sheet.cell(6,1).ctype == 3, 'Souscription Date must be datetime type'
ms_date_number = sheet.cell(6,1).value
year, month, day, hour, minute, second = xlrd.xldate_as_tuple(ms_date_number,wb2.datemode)
mdp.subscription_date = dt.datetime(year, month, day)
# ========================================================================================
# Valuation Date
# ========================================================================================
assert sheet.cell(7,1).ctype == 3, 'Valuation Date must be datetime type'
ms_date_number = sheet.cell(7,1).value
year, month, day, hour, minute, second = xlrd.xldate_as_tuple(ms_date_number,wb2.datemode)
mdp.valuation_date = dt.datetime(year, month, day)
mdp.get_seniority()
# =======================================================================================
mdp.premium = xw.sheets['MP_test'].range('B9').value
mdp.actual_math_provision = xw.sheets['MP_test'].range('B10').value
mdp.mathematical_provision.append(mdp.actual_math_provision)
# ===============================================================
# get TMG
mdp.TMG_type = xw.sheets['MP_test'].range('B11').value
mdp.TMG = mdp.TMG_type * np.ones(100)
# ===============================================================
mdp.rate_sensibility = xw.sheets['MP_test'].range('B12').value
mdp.margin_rate = xw.sheets['MP_test'].range('B13').value
mdp.number_contract = xw.sheets['MP_test'].range('B14').value
# ===============================================================
# get lapse rate
mdp.lapse_type = xw.sheets['MP_test'].range('B15').value
mdp.lapse_rate = mdp.lapse_type * np.ones(100)
# ===============================================================
mortality_rate = []
for row in range(3, number_of_rows):
mort_rate = (sheet.cell(row, 4).value)
mortality_rate.append(mort_rate)
mdp.mortality_rate = np.concatenate((mortality_rate, [mortality_rate[-1] for t in range(100)]), axis = 0)
self.model_points.append(mdp)
def affiche(self):
for mdl_point in self.model_points:
print(mdl_point)
| gpl-3.0 | -3,551,808,631,612,540,000 | 41.084211 | 193 | 0.470603 | false |
faircloth-lab/sh_t | sh_t/core.py | 1 | 3611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2014 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 21 April 2014 20:54 PDT (-0700)
"""
import os
import sys
import glob
import shutil
import argparse
import subprocess
from Bio import AlignIO
import pdb
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
class CreateDir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# get the full path
d = os.path.abspath(os.path.expanduser(values))
# check to see if directory exists
if os.path.exists(d):
answer = raw_input("[WARNING] Output directory exists, REMOVE [Y/n]? ")
if answer == "Y":
shutil.rmtree(d)
else:
print "[QUIT]"
sys.exit()
# create the new directory
os.makedirs(d)
# return the full path
setattr(namespace, self.dest, d)
class GroupError(Exception):
def __init__(self, message, group, alignment):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
# Now for your custom code...
self.group = group
self.alignment = alignment
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
if not os.path.isfile:
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def which(prog):
cmd = ["which", prog]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if stderr:
raise EnvironmentError("Program {} does not appear to be installed")
else:
return stdout.strip()
def get_alignments(alignment_dir):
alignments = []
for ftype in ('.phylip', '.phy'):
alignments.extend(glob.glob(os.path.join(alignment_dir, "*{}".format(ftype))))
return alignments
def satisfy_one_taxon_group(taxa_in_align, taxon_group):
try:
isinstance(taxon_group, list)
except:
raise AssertionError("Taxon group is not a list.")
group_set = set(taxon_group)
# ensure there is at least one member in each group
if len(taxa_in_align.intersection(group_set)) >= 1:
return True
else:
return False
def get_taxa_in_alignment(alignment):
aln = AlignIO.read(alignment, "phylip-relaxed")
taxa_in_align = set([taxon.id for taxon in aln])
return taxa_in_align
def satisfy_all_taxon_groups(alignment, taxon_groups):
"""given an input alignment, see if any taxa in list are in file"""
taxa_in_align = get_taxa_in_alignment(alignment)
taxa_present = []
for group_name, taxon_group in taxon_groups.iteritems():
if satisfy_one_taxon_group(taxa_in_align, taxon_group):
taxa_present.append(True)
else:
taxa_present.append(False)
if all(taxa_present):
return True
else:
raise GroupError(
"Not all taxa present in Group",
group_name,
os.path.basename(alignment),
)
| bsd-3-clause | -438,796,830,149,700,200 | 26.356061 | 86 | 0.626696 | false |
common-workflow-language/cwltool | cwltool/main.py | 1 | 50216 | #!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .builder import HasReqsHints
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import ArgumentException, UnsupportedRequirement, WorkflowException
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
cidfile = [
str(arg).split("=")[1] for arg in process.args if "--cidfile" in str(arg)
]
if cidfile:
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[CWLObjectType],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[CWLObjectType]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, Mapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
input_types[index]["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[CWLObjectType],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
input_types[index]["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[CWLObjectType], entry["type"]), schema_defs
),
)
if isinstance(entry["type"], Mapping):
input_types[index]["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, input_types[index]["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
input_types[index]["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[CWLObjectType], items), schema_defs
),
)
if entry["type"] == "record":
input_types[index]["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[CWLObjectType], entry["fields"]),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in realize_input_schema(tool.tool["inputs"], tool.schemaDefs):
name = shortname(cast(str, inp["id"]))
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = YAML()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
)
if args.tool_help:
toolparser.print_help()
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
if job_order_object is None:
if process.tool["inputs"]:
if toolparser is not None:
print(f"\nOptions for {args.workflow} ")
toolparser.print_help()
_logger.error("")
_logger.error("Input object required, use --help for details")
exit(1)
else:
job_order_object = {}
if print_input_deps:
basedir = None # type: Optional[str]
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
stdout.write(json_dumps(deps, indent=4))
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(MutableSequence[CWLOutputAtomType], sfs)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def configure_logging(
args: argparse.Namespace,
stderr_handler: logging.Handler,
runtimeContext: RuntimeContext,
) -> None:
rdflib_logger = logging.getLogger("rdflib.term")
rdflib_logger.addHandler(stderr_handler)
rdflib_logger.setLevel(logging.ERROR)
if args.quiet:
# Silence STDERR, not an eventual provenance log file
stderr_handler.setLevel(logging.WARN)
if runtimeContext.debug:
# Increase to debug for both stderr and provenance log file
_logger.setLevel(logging.DEBUG)
stderr_handler.setLevel(logging.DEBUG)
rdflib_logger.setLevel(logging.DEBUG)
fmtclass = coloredlogs.ColoredFormatter if args.enable_color else logging.Formatter
formatter = fmtclass("%(levelname)s %(message)s")
if args.timestamps:
formatter = fmtclass(
"[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S"
)
stderr_handler.setFormatter(formatter)
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Union[io.TextIOWrapper, WritableBagFile]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def choose_target(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool
)
else:
extracted = get_subgraph(
[
loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_step(tool, tool.tool["id"] + "/" + args.single_step)
else:
extracted = get_step(
tool,
loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
),
)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_process(
tool,
tool.tool["id"] + "/" + args.single_process,
loadingContext.loader.idx,
)
else:
extracted = get_process(
tool,
loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
),
loadingContext.loader.idx,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
return tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler = None # type: Optional[logging.StreamHandler]
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(args, stderr_handler, runtimeContext)
if args.version:
print(versionfunc())
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)))
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help()
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream = setup_provenance(args, argsl, runtimeContext)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
stdout.write(print_pack(loadingContext, uri))
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
stdout.write(
json_dumps(
processobj, indent=4, sort_keys=True, separators=(",", ": ")
)
)
return 0
tool = make_tool(uri, loadingContext)
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.")
return 0
if args.print_rdf:
stdout.write(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer)
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
for f in ("outputs", "steps", "inputs"):
if tool.tool[f]:
_logger.info("%s%s targets:", f[0].upper(), f[1:-1])
stdout.write(
" "
+ "\n ".join([shortname(t["id"]) for t in tool.tool[f]])
+ "\n"
)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
stdout.write(
json_dumps(
tool.tool, indent=4, sort_keys=True, separators=(",", ": ")
)
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
if isinstance(out, str):
stdout.write(out)
else:
stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
stdout.write("\n")
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| apache-2.0 | 2,259,084,882,433,761,300 | 34.639461 | 88 | 0.559921 | false |
twitter/pants | src/python/pants/task/task.py | 1 | 28963 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from abc import abstractmethod
from builtins import filter, map, object, set, str, zip
from contextlib import contextmanager
from hashlib import sha1
from itertools import repeat
from future.utils import PY3
from pants.base.exceptions import TaskError
from pants.base.worker_pool import Work
from pants.build_graph.target_filter_subsystem import TargetFilter
from pants.cache.artifact_cache import UnreadableArtifact, call_insert, call_use_cached_files
from pants.cache.cache_setup import CacheSetup
from pants.invalidation.build_invalidator import (BuildInvalidator, CacheKeyGenerator,
UncacheableCacheKeyGenerator)
from pants.invalidation.cache_manager import InvalidationCacheManager, InvalidationCheck
from pants.option.optionable import Optionable
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import ScopeInfo
from pants.reporting.reporting_utils import items_to_report_element
from pants.source.source_root import SourceRootConfig
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin
from pants.util.dirutil import safe_mkdir, safe_rm_oldest_items_in_dir
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import AbstractClass, classproperty
class TaskBase(SubsystemClientMixin, Optionable, AbstractClass):
"""Defines a lifecycle that prepares a task for execution and provides the base machinery
needed to execute it.
Provides the base lifecycle methods that allow a task to interact with the command line, other
tasks and the user. The lifecycle is linear and run via the following sequence:
1. register_options - declare options configurable via cmd-line flag or config file.
2. product_types - declare the product types your task is capable of producing.
3. alternate_target_roots - propose a different set of target roots to use than those specified
via the CLI for the active pants run.
4. prepare - request any products needed from other tasks.
5. __init__ - distill configuration into the information needed to execute.
Provides access to the current run context for scoping work.
Also provides the basic facilities for doing work efficiently including providing a work directory
for scratch space on disk, an invalidator for checking which targets need work done on, and an
artifact cache for re-using previously cached work.
#TODO(John Sirois): Lifecycle is currently split between TaskBase and Task and lifecycle
(interface) and helpers (utility) are currently conflated. Tease these apart and narrow the scope
of the helpers. Ideally console tasks don't inherit a workdir, invalidator or build cache for
example.
"""
options_scope_category = ScopeInfo.TASK
# We set this explicitly on the synthetic subclass, so that it shares a stable name with
# its superclass, which is not necessary for regular use, but can be convenient in tests.
_stable_name = None
@classmethod
def implementation_version(cls):
"""
:API: public
"""
return [('TaskBase', 2)]
@classmethod
@memoized_method
def implementation_version_str(cls):
return '.'.join(['_'.join(map(str, x)) for x in cls.implementation_version()])
@classmethod
@memoized_method
def implementation_version_slug(cls):
return sha1(cls.implementation_version_str().encode('utf-8')).hexdigest()[:12]
@classmethod
def stable_name(cls):
"""The stable name of this task type.
We synthesize subclasses of the task types at runtime, and these synthesized subclasses
may have random names (e.g., in tests), so this gives us a stable name to use across runs,
e.g., in artifact cache references.
"""
return cls._stable_name or cls._compute_stable_name()
@classmethod
def _compute_stable_name(cls):
return '{}_{}'.format(cls.__module__, cls.__name__).replace('.', '_')
@classmethod
def subsystem_dependencies(cls):
return (super(TaskBase, cls).subsystem_dependencies() +
(CacheSetup.scoped(cls), BuildInvalidator.Factory, SourceRootConfig) +
((TargetFilter.scoped(cls),) if cls.target_filtering_enabled else tuple()))
@classmethod
def product_types(cls):
"""The list of products this Task produces. Set the product type(s) for this
task i.e. the product type(s) this task creates e.g ['classes'].
By default, each task is considered as creating a unique product type(s).
Subclasses that create products, should override this to specify their unique product type(s).
:API: public
"""
return []
@classmethod
def supports_passthru_args(cls):
"""Subclasses may override to indicate that they can use passthru args.
:API: public
"""
return False
@classmethod
def _scoped_options(cls, options):
return options[cls.options_scope]
@classmethod
def get_alternate_target_roots(cls, options, address_mapper, build_graph):
# Subclasses should not generally need to override this method.
return cls.alternate_target_roots(cls._scoped_options(options), address_mapper, build_graph)
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
"""Allows a Task to propose alternate target roots from those specified on the CLI.
At most 1 unique proposal is allowed amongst all tasks involved in the run. If more than 1
unique list of target roots is proposed an error is raised during task scheduling.
:API: public
:returns list: The new target roots to use or None to accept the CLI specified target roots.
"""
@classmethod
def invoke_prepare(cls, options, round_manager):
# Subclasses should not generally need to override this method.
return cls.prepare(cls._scoped_options(options), round_manager)
@classmethod
def prepare(cls, options, round_manager):
"""Prepares a task for execution.
Called before execution and prior to any tasks that may be (indirectly) depended upon.
Typically a task that requires products from other goals would register interest in those
products here and then retrieve the requested product mappings when executed.
:API: public
"""
def __init__(self, context, workdir):
"""Subclass __init__ methods, if defined, *must* follow this idiom:
class MyTask(Task):
def __init__(self, *args, **kwargs):
super(MyTask, self).__init__(*args, **kwargs)
...
This allows us to change Task.__init__()'s arguments without
changing every subclass. If the subclass does not need its own
initialization, this method can (and should) be omitted entirely.
:API: public
"""
super(TaskBase, self).__init__()
self.context = context
self._workdir = workdir
self._task_name = type(self).__name__
self._cache_key_errors = set()
self._cache_factory = CacheSetup.create_cache_factory_for_task(self)
self._force_invalidated = False
@memoized_property
def _build_invalidator(self):
return BuildInvalidator.Factory.create(build_task=self.fingerprint)
def get_options(self):
"""Returns the option values for this task's scope.
:API: public
"""
return self.context.options.for_scope(self.options_scope)
def get_passthru_args(self):
"""Returns the passthru args for this task, if it supports them.
:API: public
"""
if not self.supports_passthru_args():
raise TaskError('{0} Does not support passthru args.'.format(self.stable_name()))
else:
return self.context.options.passthru_args_for_scope(self.options_scope)
@property
def skip_execution(self):
"""Whether this task should be skipped.
Tasks can override to specify skipping behavior (e.g., based on an option).
:API: public
"""
return False
@property
def act_transitively(self):
"""Whether this task should act on the transitive closure of the target roots.
Tasks can override to specify transitivity behavior (e.g., based on an option).
Note that this property is consulted by get_targets(), but tasks that bypass that
method must make their own decision on whether to act transitively or not.
:API: public
"""
return True
@classproperty
def target_filtering_enabled(cls):
"""Whether this task should apply configured filters against targets.
Tasks can override to enable target filtering (e.g. based on tags) and must
access targets via get_targets()
:API: public
"""
return False
def get_targets(self, predicate=None):
"""Returns the candidate targets this task should act on.
This method is a convenience for processing optional transitivity. Tasks may bypass it
and make their own decisions on which targets to act on.
NOTE: This method was introduced in 2018, so at the time of writing few tasks consult it.
Instead, they query self.context.targets directly.
TODO: Fix up existing targets to consult this method, for uniformity.
Note that returned targets have not been checked for invalidation. The caller should do
so as needed, typically by calling self.invalidated().
:API: public
"""
initial_targets = (self.context.targets(predicate) if self.act_transitively
else list(filter(predicate, self.context.target_roots)))
if not self.target_filtering_enabled:
return initial_targets
else:
return self._filter_targets(initial_targets)
def _filter_targets(self, targets):
included_targets = TargetFilter.scoped_instance(self).apply(targets)
excluded_targets = set(targets).difference(included_targets)
if excluded_targets:
self.context.log.info("{} target(s) excluded".format(len(excluded_targets)))
for target in excluded_targets:
self.context.log.debug("{} excluded".format(target.address.spec))
return included_targets
@memoized_property
def workdir(self):
"""A scratch-space for this task that will be deleted by `clean-all`.
It's guaranteed that no other task has been given this workdir path to use and that the workdir
exists.
:API: public
"""
safe_mkdir(self._workdir)
return self._workdir
@memoized_property
def versioned_workdir(self):
"""The Task.workdir suffixed with a fingerprint of the Task implementation version.
When choosing whether to store values directly in `self.workdir` or below it in
the directory returned by this property, you should generally prefer this value.
:API: public
"""
versioned_workdir = os.path.join(self.workdir, self.implementation_version_slug())
safe_mkdir(versioned_workdir)
return versioned_workdir
def _options_fingerprint(self, scope):
options_hasher = sha1()
options_hasher.update(scope.encode('utf-8'))
options_fp = OptionsFingerprinter.combined_options_fingerprint_for_scope(
scope,
self.context.options,
build_graph=self.context.build_graph,
include_passthru=self.supports_passthru_args(),
)
options_hasher.update(options_fp.encode('utf-8'))
return options_hasher.hexdigest() if PY3 else options_hasher.hexdigest().decode('utf-8')
@memoized_property
def fingerprint(self):
"""Returns a fingerprint for the identity of the task.
A task fingerprint is composed of the options the task is currently running under.
Useful for invalidating unchanging targets being executed beneath changing task
options that affect outputted artifacts.
A task's fingerprint is only valid after the task has been fully initialized.
"""
hasher = sha1()
hasher.update(self.stable_name().encode('utf-8'))
hasher.update(self._options_fingerprint(self.options_scope).encode('utf-8'))
hasher.update(self.implementation_version_str().encode('utf-8'))
for dep in self.subsystem_closure_iter():
hasher.update(self._options_fingerprint(dep.options_scope).encode('utf-8'))
return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8')
def artifact_cache_reads_enabled(self):
return self._cache_factory.read_cache_available()
def artifact_cache_writes_enabled(self):
return self._cache_factory.write_cache_available()
def invalidate(self):
"""Invalidates all targets for this task."""
self._build_invalidator.force_invalidate_all()
@property
def create_target_dirs(self):
"""Whether to create a results_dir per VersionedTarget in the workdir of the Task.
This defaults to the value of `self.cache_target_dirs` (as caching them requires
creating them), but may be overridden independently to create the dirs without caching
them.
:API: public
"""
return self.cache_target_dirs
@property
def cache_target_dirs(self):
"""Whether to cache files in VersionedTarget's results_dir after exiting an invalidated block.
Subclasses may override this method to return True if they wish to use this style
of "automated" caching, where each VersionedTarget is given an associated results directory,
which will automatically be uploaded to the cache. Tasks should place the output files
for each VersionedTarget in said results directory. It is highly suggested to follow this
schema for caching, rather than manually making updates to the artifact cache.
:API: public
"""
return False
@property
def incremental(self):
"""Whether this Task implements incremental building of individual targets.
Incremental tasks with `cache_target_dirs` set will have the results_dir of the previous build
for a target cloned into the results_dir for the current build (where possible). This
copy-on-write behaviour allows for immutability of the results_dir once a target has been
marked valid.
:API: public
"""
return False
@property
def cache_incremental(self):
"""For incremental tasks, indicates whether the results of incremental builds should be cached.
Deterministic per-target incremental compilation is a relatively difficult thing to implement,
so this property provides an escape hatch to avoid caching things in that riskier case.
:API: public
"""
return False
@contextmanager
def invalidated(self,
targets,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
"""
invalidation_check = self._do_invalidation_check(fingerprint_strategy,
invalidate_dependents,
targets,
topological_order)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts, uncached_causes = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
self.context.run_tracker.artifact_cache_stats.add_hits(self._task_name, cached_targets)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
self.context.run_tracker.artifact_cache_stats.add_misses(self._task_name,
uncached_targets,
uncached_causes)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = InvalidationCheck(invalidation_check.all_vts, uncached_vts)
if not silent:
targets = []
for vt in invalidation_check.invalid_vts:
targets.extend(vt.targets)
if len(targets):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
'Invalidated ',
items_to_report_element(target_address_references, 'target'),
'.',
]
self.context.log.info(*msg_elements)
self._update_invalidation_report(invalidation_check, 'pre-check')
# Cache has been checked to create the full list of invalid VTs.
# Only copy previous_results for this subset of VTs.
if self.incremental:
for vts in invalidation_check.invalid_vts:
vts.copy_previous_results()
# This may seem odd: why would we need to invalidate a VersionedTargetSet that is already
# invalid? But the name force_invalidate() is slightly misleading in this context - what it
# actually does is delete the key file created at the end of the last successful task run.
# This is necessary to avoid the following scenario:
#
# 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator.
# 2) In state B: Task fails, but writes some output. Key is not recorded.
# 3) After reverting back to state A: The current key is the same as the one recorded at the
# end of step 1), so it looks like no work needs to be done, but actually the task
# must re-run, to overwrite the output written in step 2.
#
# Deleting the file ensures that if a task fails, there is no key for which we might think
# we're in a valid state.
for vts in invalidation_check.invalid_vts:
vts.force_invalidate()
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
self._update_invalidation_report(invalidation_check, 'post-check')
for vt in invalidation_check.invalid_vts:
vt.update()
# Background work to clean up previous builds.
if self.context.options.for_global_scope().workdir_max_build_entries is not None:
self._launch_background_workdir_cleanup(invalidation_check.all_vts)
def _update_invalidation_report(self, invalidation_check, phase):
invalidation_report = self.context.invalidation_report
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(self._task_name, vts.targets, vts.cache_key, vts.valid,
phase=phase)
def _do_invalidation_check(self,
fingerprint_strategy,
invalidate_dependents,
targets,
topological_order):
if self._cache_factory.ignore:
cache_key_generator = UncacheableCacheKeyGenerator()
else:
cache_key_generator = CacheKeyGenerator(
self.context.options.for_global_scope().cache_key_gen_version,
self.fingerprint)
cache_manager = InvalidationCacheManager(self.workdir,
cache_key_generator,
self._build_invalidator,
invalidate_dependents,
fingerprint_strategy=fingerprint_strategy,
invalidation_report=self.context.invalidation_report,
task_name=self._task_name,
task_version_slug=self.implementation_version_slug(),
artifact_write_callback=self.maybe_write_artifact)
# If this Task's execution has been forced, invalidate all our target fingerprints.
if self._cache_factory.ignore and not self._force_invalidated:
self.invalidate()
self._force_invalidated = True
return cache_manager.check(targets, topological_order=topological_order)
def maybe_write_artifact(self, vt):
if self._should_cache_target_dir(vt):
self.update_artifact_cache([(vt, [vt.current_results_dir])])
def _launch_background_workdir_cleanup(self, vts):
workdir_build_cleanup_job = Work(self._cleanup_workdir_stale_builds,
[(vts,)],
'workdir_build_cleanup')
self.context.submit_background_work_chain([workdir_build_cleanup_job])
def _cleanup_workdir_stale_builds(self, vts):
# workdir_max_build_entries has been assured of not None before invoking this method.
workdir_max_build_entries = self.context.options.for_global_scope().workdir_max_build_entries
max_entries_per_target = max(2, workdir_max_build_entries)
for vt in vts:
live_dirs = list(vt.live_dirs())
if not live_dirs:
continue
root_dir = os.path.dirname(vt.results_dir)
safe_rm_oldest_items_in_dir(root_dir, max_entries_per_target, excludes=live_dirs)
def _should_cache_target_dir(self, vt):
"""Return true if the given vt should be written to a cache (if configured)."""
return (
self.cache_target_dirs and
vt.cacheable and
(not vt.is_incremental or self.cache_incremental) and
self.artifact_cache_writes_enabled()
)
def _maybe_create_results_dirs(self, vts):
"""If `cache_target_dirs`, create results_dirs for the given versioned targets."""
if self.create_target_dirs:
for vt in vts:
vt.create_results_dir()
def check_artifact_cache_for(self, invalidation_check):
"""Decides which VTS to check the artifact cache for.
By default we check for each invalid target. Can be overridden, e.g., to
instead check only for a single artifact for the entire target set.
"""
return invalidation_check.invalid_vts
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache. Uncached VTS are also attached with their
causes for the miss: `False` indicates a legit miss while `UnreadableArtifact`
is due to either local or remote cache failures.
"""
return self.do_check_artifact_cache(vts)
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache')
def _get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
"""Create a Work instance to update an artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
cache = self._cache_factory.get_write_cache()
if cache:
if len(vts_artifactfiles_pairs) == 0:
return None
# Do some reporting.
targets = set()
for vts, _ in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets(
'Caching artifacts for ',
list(targets),
'.',
logger=self.context.log.debug,
)
always_overwrite = self._cache_factory.overwrite()
# Cache the artifacts.
args_tuples = []
for vts, artifactfiles in vts_artifactfiles_pairs:
overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))
return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
else:
return None
def _report_targets(self, prefix, targets, suffix, logger=None):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
prefix,
items_to_report_element(target_address_references, 'target'),
suffix,
]
logger = logger or self.context.log.info
logger(*msg_elements)
def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0]
def determine_target_roots(self, goal_name):
"""Helper for tasks that scan for default target roots.
:param string goal_name: The goal name to use for any warning emissions.
"""
if not self.context.target_roots:
print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr)
# For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior
# of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result.
return self.context.target_roots
class Task(TaskBase):
"""An executable task.
Tasks form the atoms of work done by pants and when executed generally produce artifacts as a
side effect whether these be files on disk (for example compilation outputs) or characters output
to the terminal (for example dependency graph metadata).
:API: public
"""
def __init__(self, context, workdir):
"""
Add pass-thru Task Constructor for public API visibility.
:API: public
"""
super(Task, self).__init__(context, workdir)
@abstractmethod
def execute(self):
"""Executes this task.
:API: public
"""
class QuietTaskMixin(object):
"""A mixin to signal that pants shouldn't print verbose progress information for this task."""
pass
| apache-2.0 | 8,560,133,421,249,656,000 | 38.621067 | 100 | 0.686497 | false |
itsnotmyfault1/kimcopter2 | crazyflie-pc-client/lib/cflib/crazyflie/__init__.py | 1 | 13576 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
The Crazyflie module is used to easily connect/send/receive data
from a Crazyflie.
Each function in the Crazyflie has a class in the module that can be used
to access that functionality. The same design is then used in the Crazyflie
firmware which makes the mapping 1:1 in most cases.
"""
__author__ = 'Bitcraze AB'
__all__ = ['Crazyflie']
import logging
logger = logging.getLogger(__name__)
import time
from threading import Thread
from threading import Timer
from .commander import Commander
from .console import Console
from .param import Param
from .log import Log
from .toccache import TocCache
import cflib.crtp
from cflib.utils.callbacks import Caller
class State:
"""Stat of the connection procedure"""
DISCONNECTED = 0
INITIALIZED = 1
CONNECTED = 2
SETUP_FINISHED = 3
class Crazyflie():
"""The Crazyflie class"""
# Callback callers
disconnected = Caller()
connectionLost = Caller()
connected = Caller()
connectionInitiated = Caller()
connectSetupFinished = Caller()
connectionFailed = Caller()
receivedPacket = Caller()
linkQuality = Caller()
state = State.DISCONNECTED
def __init__(self, link=None, ro_cache=None, rw_cache=None):
"""
Create the objects from this module and register callbacks.
ro_cache -- Path to read-only cache (string)
rw_cache -- Path to read-write cache (string)
"""
self.link = link
self._toc_cache = TocCache(ro_cache=ro_cache,
rw_cache=rw_cache)
self.incoming = _IncomingPacketHandler(self)
self.incoming.setDaemon(True)
self.incoming.start()
self.commander = Commander(self)
self.log = Log(self)
self.console = Console(self)
self.param = Param(self)
self._log_toc_updated = False
self._param_toc_updated = False
self.link_uri = ""
# Used for retry when no reply was sent back
self.receivedPacket.add_callback(self._check_for_initial_packet_cb)
self.receivedPacket.add_callback(self._check_for_answers)
self.answer_timers = {}
# Connect callbacks to logger
self.disconnected.add_callback(
lambda uri: logger.info("Callback->Disconnected from [%s]", uri))
self.connected.add_callback(
lambda uri: logger.info("Callback->Connected to [%s]", uri))
self.connectionLost.add_callback(
lambda uri, errmsg: logger.info("Callback->Connectionl ost to"
" [%s]: %s", uri, errmsg))
self.connectionFailed.add_callback(
lambda uri, errmsg: logger.info("Callback->Connected failed to"
" [%s]: %s", uri, errmsg))
self.connectionInitiated.add_callback(
lambda uri: logger.info("Callback->Connection initialized[%s]",
uri))
self.connectSetupFinished.add_callback(
lambda uri: logger.info("Callback->Connection setup finished [%s]",
uri))
def _start_connection_setup(self):
"""Start the connection setup by refreshing the TOCs"""
logger.info("We are connected[%s], request connection setup",
self.link_uri)
self.log.refresh_toc(self._log_toc_updated_cb, self._toc_cache)
def _param_toc_updated_cb(self):
"""Called when the param TOC has been fully updated"""
logger.info("Param TOC finished updating")
self._param_toc_updated = True
if (self._log_toc_updated is True and self._param_toc_updated is True):
self.connectSetupFinished.call(self.link_uri)
def _log_toc_updated_cb(self):
"""Called when the log TOC has been fully updated"""
logger.info("Log TOC finished updating")
self._log_toc_updated = True
self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache)
if (self._log_toc_updated and self._param_toc_updated):
logger.info("All TOCs finished updating")
self.connectSetupFinished.call(self.link_uri)
def _link_error_cb(self, errmsg):
"""Called from the link driver when there's an error"""
logger.warning("Got link error callback [%s] in state [%s]",
errmsg, self.state)
if (self.link is not None):
self.link.close()
self.link = None
if (self.state == State.INITIALIZED):
self.connectionFailed.call(self.link_uri, errmsg)
if (self.state == State.CONNECTED or
self.state == State.SETUP_FINISHED):
self.disconnected.call(self.link_uri)
self.connectionLost.call(self.link_uri, errmsg)
self.state = State.DISCONNECTED
def _link_quality_cb(self, percentage):
"""Called from link driver to report link quality"""
self.linkQuality.call(percentage)
def _check_for_initial_packet_cb(self, data):
"""
Called when first packet arrives from Crazyflie.
This is used to determine if we are connected to something that is
answering.
"""
self.state = State.CONNECTED
self.connected.call(self.link_uri)
self.receivedPacket.remove_callback(self._check_for_initial_packet_cb)
def open_link(self, link_uri):
"""
Open the communication link to a copter at the given URI and setup the
connection (download log/parameter TOC).
"""
self.connectionInitiated.call(link_uri)
self.state = State.INITIALIZED
self.link_uri = link_uri
self._log_toc_updated = False
self._param_toc_updated = False
try:
self.link = cflib.crtp.get_link_driver(link_uri,
self._link_quality_cb,
self._link_error_cb)
# Add a callback so we can check that any data is comming
# back from the copter
self.receivedPacket.add_callback(self._check_for_initial_packet_cb)
self._start_connection_setup()
except Exception as ex: # pylint: disable=W0703
# We want to catch every possible exception here and show
# it in the user interface
import traceback
logger.error("Couldn't load link driver: %s\n\n%s",
ex, traceback.format_exc())
exception_text = "Couldn't load link driver: %s\n\n%s" % (
ex, traceback.format_exc())
if self.link:
self.link.close()
self.link = None
self.connectionFailed.call(link_uri, exception_text)
def close_link(self):
"""Close the communication link."""
logger.info("Closing link")
if (self.link is not None):
self.commander.send_setpoint(0, 0, 0, 0, False)
if (self.link is not None):
self.link.close()
self.link = None
self.disconnected.call(self.link_uri)
def add_port_callback(self, port, cb):
"""Add a callback to cb on port"""
self.incoming.add_port_callback(port, cb)
def remove_port_callback(self, port, cb):
"""Remove the callback cb on port"""
self.incoming.remove_port_callback(port, cb)
def _no_answer_do_retry(self, pk):
"""Resend packets that we have not gotten answers to"""
logger.debug("ExpectAnswer: No answer on [%d], do retry", pk.port)
# Cancel timer before calling for retry to help bug hunting
old_timer = self.answer_timers[pk.port]
if (old_timer is not None):
old_timer.cancel()
self.send_packet(pk, True)
else:
logger.warning("ExpectAnswer: ERROR! Was doing retry but"
"timer was None")
def _check_for_answers(self, pk):
"""
Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer.
"""
try:
timer = self.answer_timers[pk.port]
if (timer is not None):
logger.debug("ExpectAnswer: Got answer back on port [%d]"
", cancelling timer", pk.port)
timer.cancel()
self.answer_timers[pk.port] = None
except KeyError:
# We are not waiting for any answer on this port, ignore..
pass
def send_packet(self, pk, expect_answer=False):
"""
Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false
"""
if (self.link is not None):
self.link.send_packet(pk)
if (expect_answer):
logger.debug("ExpectAnswer: Will expect answer on port [%d]",
pk.port)
new_timer = Timer(0.2, lambda: self._no_answer_do_retry(pk))
try:
old_timer = self.answer_timers[pk.port]
if (old_timer is not None):
old_timer.cancel()
# If we get here a second call has been made to send
# packet on this port before we have gotten the first
# one back. This is an error and might cause loss of
# packets!!
logger.warning("ExpectAnswer: ERROR! Older timer whas"
" running while scheduling new one on "
"[%d]", pk.port)
except KeyError:
pass
self.answer_timers[pk.port] = new_timer
new_timer.start()
class _IncomingPacketHandler(Thread):
"""Handles incoming packets and sends the data to the correct receivers"""
def __init__(self, cf):
Thread.__init__(self)
self.cf = cf
self.cb = []
def add_port_callback(self, port, cb):
"""Add a callback for data that comes on a specific port"""
logger.debug("Adding callback on port [%d] to [%s]", port, cb)
self.add_header_callback(cb, port, 0, 0xff, 0x0)
def remove_port_callback(self, port, cb):
"""Remove a callback for data that comes on a specific port"""
logger.debug("Removing callback on port [%d] to [%s]", port, cb)
for port_callback in self.cb:
if (port_callback[0] == port and port_callback[4] == cb):
self.cb.remove(port_callback)
def add_header_callback(self, cb, port, channel, port_mask=0xFF,
channel_mask=0xFF):
"""
Add a callback for a specific port/header callback with the
possibility to add a mask for channel and port for multiple
hits for same callback.
"""
self.cb.append([port, port_mask, channel, channel_mask, cb])
def run(self):
while(True):
if self.cf.link is None:
time.sleep(1)
continue
pk = self.cf.link.receive_packet(1)
if pk is None:
continue
#All-packet callbacks
self.cf.receivedPacket.call(pk)
found = False
for cb in self.cb:
if (cb[0] == pk.port & cb[1] and
cb[2] == pk.channel & cb[3]):
try:
cb[4](pk)
except Exception: # pylint: disable=W0703
# Disregard pylint warning since we want to catch all
# exceptions and we can't know what will happen in
# the callbacks.
import traceback
logger.warning("Exception while doing callback on port"
" [%d]\n\n%s", pk.port,
traceback.format_exc())
if (cb[0] != 0xFF):
found = True
if not found:
logger.warning("Got packet on header (%d,%d) but no callback "
"to handle it", pk.port, pk.channel)
| gpl-2.0 | 7,902,765,162,880,301,000 | 37.350282 | 79 | 0.560106 | false |
techtonik/numpy | numpy/ma/core.py | 1 | 233479 | """
numpy.ma : a package to handle missing or invalid values.
This package was initially written for numarray by Paul F. Dubois
at Lawrence Livermore National Laboratory.
In 2006, the package was completely rewritten by Pierre Gerard-Marchant
(University of Georgia) to make the MaskedArray class a subclass of ndarray,
and to improve support of structured arrays.
Copyright 1999, 2000, 2001 Regents of the University of California.
Released for unlimited redistribution.
* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois.
* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant
(pgmdevlist_AT_gmail_DOT_com)
* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com)
.. moduleauthor:: Pierre Gerard-Marchant
"""
# pylint: disable-msg=E1002
from __future__ import division, absolute_import, print_function
import sys
import warnings
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_
from numpy import array as narray
from numpy.lib.function_base import angle
from numpy.compat import getargspec, formatargspec, long, basestring
from numpy import expand_dims as n_expand_dims
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
__author__ = "Pierre GF Gerard-Marchant"
__docformat__ = "restructuredtext en"
__all__ = ['MAError', 'MaskError', 'MaskType', 'MaskedArray',
'bool_',
'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue',
'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray', 'asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'ceil', 'choose', 'clip', 'common_fill_value', 'compress',
'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh',
'count', 'cumprod', 'cumsum',
'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'dump',
'dumps',
'empty', 'empty_like', 'equal', 'exp', 'expand_dims',
'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide',
'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex',
'fromfunction',
'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal',
'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log2',
'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or',
'masked', 'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
'masked_less', 'masked_less_equal', 'masked_not_equal',
'masked_object', 'masked_outside', 'masked_print_option',
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'prod', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_', 'round',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue',
'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes',
'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
'var', 'where',
'zeros']
MaskType = np.bool_
nomask = MaskType(0)
def doc_note(initialdoc, note):
"""
Adds a Notes section to an existing docstring.
"""
if initialdoc is None:
return
if note is None:
return initialdoc
newdoc = """
%s
Notes
-----
%s
"""
return newdoc % (initialdoc, note)
def get_object_signature(obj):
"""
Get the signature from obj
"""
try:
sig = formatargspec(*getargspec(obj))
except TypeError as errmsg:
sig = ''
# msg = "Unable to retrieve the signature of %s '%s'\n"\
# "(Initial error message: %s)"
# warnings.warn(msg % (type(obj),
# getattr(obj, '__name__', '???'),
# errmsg))
return sig
#####--------------------------------------------------------------------------
#---- --- Exceptions ---
#####--------------------------------------------------------------------------
class MAError(Exception):
"""Class for masked array related errors."""
pass
class MaskError(MAError):
"Class for mask related errors."
pass
#####--------------------------------------------------------------------------
#---- --- Filling options ---
#####--------------------------------------------------------------------------
# b: boolean - c: complex - f: floats - i: integer - O: object - S: string
default_filler = {'b': True,
'c' : 1.e20 + 0.0j,
'f' : 1.e20,
'i' : 999999,
'O' : '?',
'S' : 'N/A',
'u' : 999999,
'V' : '???',
'U' : 'N/A',
'M8[D]' : np.datetime64('NaT', 'D'),
'M8[us]' : np.datetime64('NaT', 'us')
}
max_filler = ntypes._minvals
max_filler.update([(k, -np.inf) for k in [np.float32, np.float64]])
min_filler = ntypes._maxvals
min_filler.update([(k, +np.inf) for k in [np.float32, np.float64]])
if 'float128' in ntypes.typeDict:
max_filler.update([(np.float128, -np.inf)])
min_filler.update([(np.float128, +np.inf)])
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, 'dtype'):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, '?')
elif obj.kind == 'M':
defval = default_filler.get(obj.str[1:], '?')
else:
defval = default_filler.get(obj.kind, '?')
elif isinstance(obj, float):
defval = default_filler['f']
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler['i']
elif isinstance(obj, str):
defval = default_filler['S']
elif isinstance(obj, unicode):
defval = default_filler['U']
elif isinstance(obj, complex):
defval = default_filler['c']
else:
defval = default_filler['O']
return defval
def _recursive_extremum_fill_value(ndtype, extremum):
names = ndtype.names
if names:
deflist = []
for name in names:
fval = _recursive_extremum_fill_value(ndtype[name], extremum)
deflist.append(fval)
return tuple(deflist)
return extremum[ndtype]
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
def _recursive_set_default_fill_value(dtypedescr):
deflist = []
for currentdescr in dtypedescr:
currenttype = currentdescr[1]
if isinstance(currenttype, list):
deflist.append(tuple(_recursive_set_default_fill_value(currenttype)))
else:
deflist.append(default_fill_value(np.dtype(currenttype)))
return tuple(deflist)
def _recursive_set_fill_value(fillvalue, dtypedescr):
fillvalue = np.resize(fillvalue, len(dtypedescr))
output_value = []
for (fval, descr) in zip(fillvalue, dtypedescr):
cdtype = descr[1]
if isinstance(cdtype, list):
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
descr = ndtype.descr
fill_value = np.array(_recursive_set_default_fill_value(descr),
dtype=ndtype,)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
descr = ndtype.descr
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int...
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
def set_fill_value(a, fill_value):
"""
Set the filling value of a, if a is a masked array.
This function changes the fill value of the masked array `a` in place.
If `a` is not a masked array, the function returns silently, without
doing anything.
Parameters
----------
a : array_like
Input array.
fill_value : dtype
Filling value. A consistency test is performed to make sure
the value is compatible with the dtype of `a`.
Returns
-------
None
Nothing returned by this function.
See Also
--------
maximum_fill_value : Return the default fill value for a dtype.
MaskedArray.fill_value : Return current fill value.
MaskedArray.set_fill_value : Equivalent method.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
masked_array(data = [-- -- -- 3 4],
mask = [ True True True False False],
fill_value=-999)
Nothing happens if `a` is not a masked array.
>>> a = range(5)
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
>>> a
[0, 1, 2, 3, 4]
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> ma.set_fill_value(a, 100)
>>> a
array([0, 1, 2, 3, 4])
"""
if isinstance(a, MaskedArray):
a.set_fill_value(fill_value)
return
def get_fill_value(a):
"""
Return the filling value of a, if any. Otherwise, returns the
default filling value for that type.
"""
if isinstance(a, MaskedArray):
result = a.fill_value
else:
result = default_fill_value(a)
return result
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
#####--------------------------------------------------------------------------
def filled(a, fill_value=None):
"""
Return input as an array with masked data replaced by a fill value.
If `a` is not a `MaskedArray`, `a` itself is returned.
If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to
``a.fill_value``.
Parameters
----------
a : MaskedArray or array_like
An input object.
fill_value : scalar, optional
Filling value. Default is None.
Returns
-------
a : ndarray
The filled array.
See Also
--------
compressed
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x.filled()
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
return np.array(a, 'O')
else:
return np.array(a)
#####--------------------------------------------------------------------------
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
#####--------------------------------------------------------------------------
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=False, subok=subok)
if not subok:
return data.view(ndarray)
return data
get_data = getdata
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data = [-- -1.0 nan inf],
mask = [ True False False False],
fill_value = 1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data = [-- -1.0 -- --],
mask = [ True False True True],
fill_value = 1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
1.00000000e+20])
>>> x.data
array([ 1., -1., NaN, Inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
#invalid = (numpy.isnan(a._data) | numpy.isinf(a._data))
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
#####--------------------------------------------------------------------------
#---- --- Ufuncs ---
#####--------------------------------------------------------------------------
ufunc_domain = {}
ufunc_fills = {}
class _DomainCheckInterval:
"""
Define a valid interval, so that :
``domain_check_interval(a,b)(x) == True`` where
``x < a`` or ``x > b``.
"""
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
if (a > b):
(a, b) = (b, a)
self.a = a
self.b = b
def __call__ (self, x):
"Execute the call behavior."
return umath.logical_or(umath.greater (x, self.b),
umath.less(x, self.a))
class _DomainTan:
"""Define a valid interval for the `tan` function, so that:
``domain_tan(eps) = True`` where ``abs(cos(x)) < eps``
"""
def __init__(self, eps):
"domain_tan(eps) = true where abs(cos(x)) < eps)"
self.eps = eps
def __call__ (self, x):
"Executes the call behavior."
return umath.less(umath.absolute(umath.cos(x)), self.eps)
class _DomainSafeDivide:
"""Define a domain for safe division."""
def __init__ (self, tolerance=None):
self.tolerance = tolerance
def __call__ (self, a, b):
# Delay the selection of the tolerance to here in order to reduce numpy
# import times. The calculation of these parameters is a substantial
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
class _DomainGreater:
"""DomainGreater(v)(x) is True where x <= v."""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less_equal(x, self.critical_value)
class _DomainGreaterEqual:
"""DomainGreaterEqual(v)(x) is True where x < v."""
def __init__(self, critical_value):
"DomainGreaterEqual(v)(x) = true where x < v"
self.critical_value = critical_value
def __call__ (self, x):
"Executes the call behavior."
return umath.less(x, self.critical_value)
#..............................................................................
class _MaskedUnaryOperation:
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__ (self, mufunc, fill=0, domain=None):
""" _MaskedUnaryOperation(aufunc, fill=0, domain=None)
aufunc(fill) must be defined
self(x) returns aufunc(x)
with masked values where domain(x) is true or getmask(x) is true.
"""
self.f = mufunc
self.fill = fill
self.domain = domain
self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
#
def __call__ (self, a, *args, **kwargs):
"Execute the call behavior."
d = getdata(a)
# Case 1.1. : Domained function
if self.domain is not None:
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
# Case 1.2. : Function without a domain
else:
# Get the result and the mask
result = self.f(d, *args, **kwargs)
m = getmask(a)
# Case 2.1. : The result is scalarscalar
if not result.ndim:
if m:
return masked
return result
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input
# Now, that's plain silly: in C, we would just skip the element and keep
# the original, but we do have to do it that way in Python
if m is not nomask:
# In case result has a lower dtype than the inputs (as in equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
if isinstance(a, MaskedArray):
subtype = type(a)
else:
subtype = MaskedArray
result = result.view(subtype)
result._mask = m
result._update_from(a)
return result
#
def __str__ (self):
return "Masked version of %s. [Invalid values are masked]" % str(self.f)
class _MaskedBinaryOperation:
"""
Define masked version of binary operations, where invalid
values are pre-masked.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_MaskedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, mbfunc, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = mbfunc
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
def __call__ (self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data, as ndarray
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
# Get the mask
(ma, mb) = (getmask(a), getmask(b))
if ma is nomask:
if mb is nomask:
m = nomask
else:
m = umath.logical_or(getmaskarray(a), mb)
elif mb is nomask:
m = umath.logical_or(ma, getmaskarray(b))
else:
m = umath.logical_or(ma, mb)
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Case 1. : scalar
if not result.ndim:
if m:
return masked
return result
# Case 2. : array
# Revert result to da where masked
if m.any():
np.copyto(result, 0, casting='unsafe', where=m)
# This only makes sense if the operation preserved the dtype
if result.dtype == da.dtype:
result += m * da
# Transforms to a (subclass of) MaskedArray
result = result.view(get_masked_subclass(a, b))
result._mask = m
# Update the optional info from the inputs
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def reduce(self, target, axis=0, dtype=None):
"""Reduce `target` along the given `axis`."""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
m = getmask(target)
t = filled(target, self.filly)
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
m = make_mask(m, copy=1)
m.shape = (1,)
if m is nomask:
return self.f.reduce(t, axis).view(tclass)
t = t.view(tclass)
t._mask = m
tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype)
mr = umath.logical_and.reduce(m, axis)
tr = tr.view(tclass)
if mr.ndim > 0:
tr._mask = mr
return tr
elif mr:
return masked
return tr
def outer (self, a, b):
"""Return the function applied to the outer product of a and b.
"""
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
# check it worked
if d is NotImplemented:
return NotImplemented
if m is not nomask:
np.copyto(d, da, where=m)
if d.shape:
d = d.view(get_masked_subclass(a, b))
d._mask = m
return d
def accumulate (self, target, axis=0):
"""Accumulate `target` along `axis` after filling with y fill
value.
"""
if isinstance(target, MaskedArray):
tclass = type(target)
else:
tclass = MaskedArray
t = filled(target, self.filly)
return self.f.accumulate(t, axis).view(tclass)
def __str__ (self):
return "Masked version of " + str(self.f)
class _DomainedBinaryOperation:
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__ (self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
self.f = dbfunc
self.domain = domain
self.fillx = fillx
self.filly = filly
self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data and the mask
(da, db) = (getdata(a, subok=False), getdata(b, subok=False))
(ma, mb) = (getmask(a), getmask(b))
# Get the result
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = self.f(da, db, *args, **kwargs)
# check it worked
if result is NotImplemented:
return NotImplemented
# Get the mask as a combination of ma, mb and invalid
m = ~umath.isfinite(result)
m |= ma
m |= mb
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da
np.copyto(result, 0, casting='unsafe', where=m)
result += m * da
result = result.view(get_masked_subclass(a, b))
result._mask = m
if isinstance(b, MaskedArray):
if isinstance(a, MaskedArray):
result._update_from(a)
else:
result._update_from(b)
elif isinstance(a, MaskedArray):
result._update_from(a)
return result
def __str__ (self):
return "Masked version of " + str(self.f)
#..............................................................................
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.round_)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs .......................................................
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs ...............................................................
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs ......................................................
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = _DomainedBinaryOperation(umath.true_divide,
_DomainSafeDivide(), 0, 1)
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1)
#####--------------------------------------------------------------------------
#---- --- Mask creation functions ---
#####--------------------------------------------------------------------------
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = newtype
return tuple(mdescr)
else:
return newtype
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
<type 'numpy.bool_'>
"""
# Make sure we do have a dtype
if not isinstance(ndtype, np.dtype):
ndtype = np.dtype(ndtype)
return np.dtype(_recursive_make_descr(ndtype, np.bool))
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmask(a)
array([[False, True],
[False, False]], dtype=bool)
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]], dtype=bool)
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value=999999)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]], dtype=bool)
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(data =
[[1 2]
[3 4]],
mask =
False,
fill_value=999999)
>>> >ma.getmaskarray(b)
array([[False, False],
[False, False]], dtype=bool)
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getdata(arr).dtype)
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False], dtype=bool)
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
'formats':[np.bool, np.bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
dtype=dtype)
>>> m
array([(True, False), (False, True), (True, False)],
dtype=[('monty', '|b1'), ('pithon', '|b1')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has
a dtype of MaskType (bool). If the dtype is flexible, each field
has a boolean dtype.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[np.int, np.int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
# Fill the mask in case there are missing data
m = filled(m, True)
# Make sure the input dtype is valid
dtype = make_mask_descr(dtype)
if m.dtype == dtype:
if copy:
result = m.copy()
else:
result = m
else:
result = np.array(m, dtype=dtype, copy=copy)
else:
result = np.array(filled(m, True), dtype=MaskType)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False], dtype=bool)
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
'formats':[np.float32, np.int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def mask_or (m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False], dtype=bool)
"""
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
#
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names:
newmask = np.empty_like(m1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> mask = np.array([0, 0, 1], dtype=np.bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> flatten_mask(mask)
array([False, False, False, True], dtype=bool)
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> flatten_mask(mask)
array([False, False, False, False, False, True], dtype=bool)
"""
#
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
#
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
for f in _flatsequence(element):
yield f
else:
yield element
except TypeError:
yield sequence
#
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array([_ for _ in flattened], dtype=bool)
def _check_mask_axis(mask, axis):
"Check whether there are masked values along the given axis"
if mask is not nomask:
return mask.all(axis=axis)
return nomask
#####--------------------------------------------------------------------------
#--- --- Masking functions ---
#####--------------------------------------------------------------------------
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data = [0 1 2 --],
mask = [False False False True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data = [0 1 -- --],
mask = [False False True True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data = [-- -- 2 3],
mask = [ True True False False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where `not` equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data = [-- -- 2 --],
mask = [ True True False True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x == value). For floating point arrays,
consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
"""
# An alternative implementation relies on filling first: probably not needed.
# d = filled(x, 0)
# c = umath.equal(d, value)
# m = mask_or(c, getmask(x))
# return array(d, mask=m, copy=copy)
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
mask = [False False True True False False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data = [-- -- 0.01 0.2 -- --],
mask = [ True True False False True True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> print eat
[-- ham]
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> print eat
[cheese ham pineapple]
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data = [cheese ham pineapple],
mask = False,
fill_value=?)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, i.e. where the following condition is True
(abs(x - value) <= atol+rtol*abs(value))
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible. For integers, consider using ``masked_equal``.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol : float, optional
Tolerance parameter.
atol : float, optional
Tolerance parameter (1e-8).
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data = [1.0 -- 2.0 -- 3.0],
mask = [False True False True False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
masked_array(data = [ 1. 1.1 2. 1.1 3. ],
mask = False,
fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=2)
>>> ma.masked_equal(x, 2)
masked_array(data = [0 1 -- 3 4],
mask = [False False True False False],
fill_value=999999)
"""
mabs = umath.absolute
xnew = filled(x, value)
if issubclass(xnew.dtype.type, np.floating):
condition = umath.less_equal(mabs(xnew - value), atol + rtol * mabs(value))
mask = getattr(x, '_mask', nomask)
else:
condition = umath.equal(xnew, value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(xnew, mask=mask, copy=copy, fill_value=value)
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=np.float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
array([ 0., 1., NaN, Inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data = [0.0 1.0 -- -- 4.0],
mask = [False False True True False],
fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
mask = getattr(a, '_mask', None)
if mask is not None:
condition = ~(np.isfinite(getdata(a)))
if mask is not nomask:
condition |= mask
cls = type(a)
else:
condition = ~(np.isfinite(a))
cls = MaskedArray
result = a.view(cls)
result._mask = condition
return result
#####--------------------------------------------------------------------------
#---- --- Printing options ---
#####--------------------------------------------------------------------------
class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
"""
def __init__ (self, display):
"Create the masked_print_option object."
self._display = display
self._enabled = True
def display(self):
"Display the string to print for masked values."
return self._display
def set_display (self, s):
"Set the string to print for masked values."
self._display = s
def enabled(self):
"Is the use of the display value enabled?"
return self._enabled
def enable(self, shrink=1):
"Set the enabling shrink to `shrink`."
self._enabled = shrink
def __str__ (self):
return str(self._display)
__repr__ = __str__
#if you single index into a masked location you get this object.
masked_print_option = _MaskedPrintOption('--')
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
_print_templates = dict(long_std="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
short_std="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s)
""",
long_flx="""\
masked_%(name)s(data =
%(data)s,
%(nlen)s mask =
%(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""",
short_flx="""\
masked_%(name)s(data = %(data)s,
%(nlen)s mask = %(mask)s,
%(nlen)s fill_value = %(fill)s,
%(nlen)s dtype = %(dtype)s)
""")
#####--------------------------------------------------------------------------
#---- --- MaskedArray class ---
#####--------------------------------------------------------------------------
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
Private function
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
#
def flatten_sequence(iterable):
"""Flattens a compound of nested iterables."""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
for f in flatten_sequence(elm):
yield f
else:
yield elm
#
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
class _arraymethod(object):
"""
Define a wrapper for basic array methods.
Upon call, returns a masked array, where the new ``_data`` array is
the output of the corresponding method called on the original
``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
Attributes
----------
_onmask : bool
Holds the `onmask` parameter.
obj : object
The object calling `_arraymethod`.
Parameters
----------
funcname : str
Name of the function to apply on data.
onmask : bool
Whether the mask must be processed also (True) or left
alone (False). Default is True. Make available as `_onmask`
attribute.
"""
def __init__(self, funcname, onmask=True):
self.__name__ = funcname
self._onmask = onmask
self.obj = None
self.__doc__ = self.getdoc()
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
methdoc = getattr(ndarray, self.__name__, None) or \
getattr(np, self.__name__, None)
if methdoc is not None:
return methdoc.__doc__
#
def __get__(self, obj, objtype=None):
self.obj = obj
return self
#
def __call__(self, *args, **params):
methodname = self.__name__
instance = self.obj
# Fallback : if the instance has not been initialized, use the first arg
if instance is None:
args = list(args)
instance = args.pop(0)
data = instance._data
mask = instance._mask
cls = type(instance)
result = getattr(data, methodname)(*args, **params).view(cls)
result._update_from(instance)
if result.ndim:
if not self._onmask:
result.__setmask__(mask)
elif mask is not nomask:
result.__setmask__(getattr(mask, methodname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
class MaskedIterator(object):
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.core.MaskedIterator'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
#
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
### This won't work is ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> fl.next()
3
>>> fl.next()
masked_array(data = --,
mask = True,
fill_value = 1e+20)
>>> fl.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
d = self.dataiter.next()
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
next = __next__
class MaskedArray(ndarray):
"""
An array class with possibly masked values.
Masked values of True exclude the corresponding element from any
computation.
Construction::
x = MaskedArray(data, mask=nomask, dtype=None,
copy=False, subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True)
Parameters
----------
data : array_like
Input data.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
dtype : dtype, optional
Data type of the output.
If `dtype` is None, the type of the data argument (``data.dtype``)
is used. If `dtype` is not None and different from ``data.dtype``,
a copy is performed.
copy : bool, optional
Whether to copy the input data (True), or to use a reference instead.
Default is False.
subok : bool, optional
Whether to return a subclass of `MaskedArray` if possible (True) or a
plain `MaskedArray`. Default is True.
ndmin : int, optional
Minimum number of dimensions. Default is 0.
fill_value : scalar, optional
Value used to fill in the masked values when necessary.
If None, a default based on the data-type is used.
keep_mask : bool, optional
Whether to combine `mask` with the mask of the input data, if any
(True), or to use only `mask` for the output (False). Default is True.
hard_mask : bool, optional
Whether to use a hard mask or not. With a hard mask, masked values
cannot be unmasked. Default is False.
shrink : bool, optional
Whether to force compression of an empty mask. Default is True.
"""
__array_priority__ = 15
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
keep_mask=True, hard_mask=None, shrink=True,
**options):
"""
Create a new masked array from scratch.
Notes
-----
A masked array can also be created by taking a .view(MaskedArray).
"""
# Process data............
_data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin)
_baseclass = getattr(data, '_baseclass', type(_data))
# Check that we're not erasing the mask..........
if isinstance(data, MaskedArray) and (data.shape != _data.shape):
copy = True
# Careful, cls might not always be MaskedArray...
if not isinstance(data, cls) or not subok:
_data = ndarray.view(_data, cls)
else:
_data = ndarray.view(_data, type(data))
# Backwards compatibility w/ numpy.core.ma .......
if hasattr(data, '_mask') and not isinstance(data, ndarray):
_data._mask = data._mask
_sharedmask = True
# Process mask ...............................
# Number of named fields (or zero if none)
names_ = _data.dtype.names or ()
# Type of the mask
if names_:
mdtype = make_mask_descr(_data.dtype)
else:
mdtype = MaskType
# Case 1. : no mask in input ............
if mask is nomask:
# Erase the current mask ?
if not keep_mask:
# With a reduced version
if shrink:
_data._mask = nomask
# With full version
else:
_data._mask = np.zeros(_data.shape, dtype=mdtype)
# Check whether we missed something
elif isinstance(data, (tuple, list)):
try:
# If data is a sequence of masked array
mask = np.array([getmaskarray(m) for m in data],
dtype=mdtype)
except ValueError:
# If data is nested
mask = nomask
# Force shrinking of the mask if needed (and possible)
if (mdtype == MaskType) and mask.any():
_data._mask = mask
_data._sharedmask = False
else:
if copy:
_data._mask = _data._mask.copy()
_data._sharedmask = False
# Reset the shape of the original mask
if getmask(data) is not nomask:
data._mask.shape = data.shape
else:
_data._sharedmask = True
# Case 2. : With a mask in input ........
else:
# Read the mask with the current mdtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
if nm == 1:
mask = np.resize(mask, _data.shape)
elif nm == nd:
mask = np.reshape(mask, _data.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MaskError(msg % (nd, nm))
copy = True
# Set the mask to the new value
if _data._mask is nomask:
_data._mask = mask
_data._sharedmask = not copy
else:
if not keep_mask:
_data._mask = mask
_data._sharedmask = not copy
else:
if names_:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
if af.dtype.names:
_recursive_or(af, bf)
else:
af |= bf
return
_recursive_or(_data._mask, mask)
else:
_data._mask = np.logical_or(mask, _data._mask)
_data._sharedmask = False
# Update fill_value.......
if fill_value is None:
fill_value = getattr(data, '_fill_value', None)
# But don't run the check unless we have something to check....
if fill_value is not None:
_data._fill_value = _check_fill_value(fill_value, _data.dtype)
# Process extra options ..
if hard_mask is None:
_data._hardmask = getattr(data, '_hardmask', False)
else:
_data._hardmask = hard_mask
_data._baseclass = _baseclass
return _data
#
def _update_from(self, obj):
"""Copies some attributes of obj to self.
"""
if obj is not None and isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
# We need to copy the _basedict to avoid backward propagation
_optinfo = {}
_optinfo.update(getattr(obj, '_optinfo', {}))
_optinfo.update(getattr(obj, '_basedict', {}))
if not isinstance(obj, MaskedArray):
_optinfo.update(getattr(obj, '__dict__', {}))
_dict = dict(_fill_value=getattr(obj, '_fill_value', None),
_hardmask=getattr(obj, '_hardmask', False),
_sharedmask=getattr(obj, '_sharedmask', False),
_isfield=getattr(obj, '_isfield', False),
_baseclass=getattr(obj, '_baseclass', _baseclass),
_optinfo=_optinfo,
_basedict=_optinfo)
self.__dict__.update(_dict)
self.__dict__.update(_optinfo)
return
def __array_finalize__(self, obj):
"""Finalizes the masked array.
"""
# Get main attributes .........
self._update_from(obj)
if isinstance(obj, ndarray):
odtype = obj.dtype
if odtype.names:
_mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype))
else:
_mask = getattr(obj, '_mask', nomask)
else:
_mask = nomask
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
try:
self._mask.shape = self.shape
except ValueError:
self._mask = nomask
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
# Finalize the fill_value for structured arrays
if self.dtype.names:
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return
def __array_wrap__(self, obj, context=None):
"""
Special hook for ufuncs.
Wraps the numpy array and sets the mask according to context.
"""
result = obj.view(type(self))
result._update_from(self)
#..........
if context is not None:
result._mask = result._mask.copy()
(func, args, _) = context
m = reduce(mask_or, [getmaskarray(arg) for arg in args])
# Get the domain mask................
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
if len(args) > 2:
d = filled(reduce(domain, args), True)
else:
d = filled(domain(*args), True)
# Fill the result where the domain is wrong
try:
# Binary domain: take the last value
fill_value = ufunc_fills[func][-1]
except TypeError:
# Unary domain: just use this one
fill_value = ufunc_fills[func]
except KeyError:
# Domain not recognized, use fill_value instead
fill_value = self.fill_value
result = result.copy()
np.copyto(result, fill_value, where=d)
# Update the mask
if m is nomask:
if d is not nomask:
m = d
else:
# Don't modify inplace, we risk back-propagation
m = (m | d)
# Make sure the mask has the proper size
if result.shape == () and m:
return masked
else:
result._mask = m
result._sharedmask = False
#....
return result
def view(self, dtype=None, type=None, fill_value=None):
"""
Return a view of the MaskedArray data
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
The default, None, results in the view having the same data-type
as `a`. As with ``ndarray.view``, dtype can also be specified as
an ndarray sub-class, which then specifies the type of the
returned object (this is equivalent to setting the ``type``
parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
If `fill_value` is not specified, but `dtype` is specified (and is not
an ndarray sub-class), the `fill_value` of the MaskedArray will be
reset. If neither `fill_value` nor `dtype` are specified (or if
`dtype` is an ndarray sub-class), then the fill value is preserved.
Finally, if `fill_value` is specified, but `dtype` is not, the fill
value is set to the specified value.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
"""
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
except TypeError:
output = ndarray.view(self, dtype)
else:
output = ndarray.view(self, dtype, type)
# Should we update the mask ?
if (getattr(output, '_mask', nomask) is not nomask):
if dtype is None:
dtype = output.dtype
mdtype = make_mask_descr(dtype)
output._mask = self._mask.view(mdtype, ndarray)
# Try to reset the shape of the mask (if we don't have a void)
try:
output._mask.shape = output.shape
except (AttributeError, TypeError):
pass
# Make sure to reset the _fill_value if needed
if getattr(output, '_fill_value', None) is not None:
if fill_value is None:
if dtype is None:
pass # leave _fill_value as is
else:
output._fill_value = None
else:
output.fill_value = fill_value
return output
view.__doc__ = ndarray.view.__doc__
def astype(self, newtype):
"""
Returns a copy of the MaskedArray cast to given newtype.
Returns
-------
output : MaskedArray
A copy of self cast to input newtype.
The returned record shape matches self.shape.
Examples
--------
>>> x = np.ma.array([[1,2,3.1],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1.0 -- 3.1]
[-- 5.0 --]
[7.0 -- 9.0]]
>>> print x.astype(int32)
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
"""
newtype = np.dtype(newtype)
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
names = output.dtype.names
if names is None:
output._mask = self._mask.astype(bool)
else:
if self._mask is nomask:
output._mask = nomask
else:
output._mask = self._mask.astype([(n, bool) for n in names])
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
return output
def __getitem__(self, indx):
"""x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray)
dout = ndarray.__getitem__(_data, indx)
# We could directly use ndarray.__getitem__ on self...
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet...
# So it's easier to stick to the current version
_mask = self._mask
if not getattr(dout, 'ndim', False):
# A record ................
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar............
elif _mask is not nomask and _mask[indx]:
return masked
else:
# Force dout to MA ........
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value ....
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long...
return dout
def __setitem__(self, indx, value):
"""x.__setitem__(i, y) <==> x[i]=y
Set item described by index. If value is masked, masks those
locations.
"""
if self is masked:
raise MaskError('Cannot alter the masked element.')
# This test is useful, but we should keep things light...
# if getmask(indx) is not nomask:
# msg = "Masked arrays must be filled before they can be used as indices!"
# raise IndexError(msg)
_data = ndarray.view(self, ndarray.__getattribute__(self, '_baseclass'))
_mask = ndarray.__getattribute__(self, '_mask')
if isinstance(indx, basestring):
ndarray.__setitem__(_data, indx, value)
if _mask is nomask:
self._mask = _mask = make_mask_none(self.shape, self.dtype)
_mask[indx] = getmask(value)
return
#........................................
_dtype = ndarray.__getattribute__(_data, 'dtype')
nbfields = len(_dtype.names or ())
#........................................
if value is masked:
# The mask wasn't set: create a full version...
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
if nbfields:
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
if not self._isfield:
self._sharedmask = False
return
#........................................
# Get the _data part of the new value
dval = value
# Get the _mask part of the new value
mval = getattr(value, '_mask', nomask)
if nbfields and mval is nomask:
mval = tuple([False] * nbfields)
if _mask is nomask:
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
if mval is not nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
ndarray.__setitem__(_mask, indx, mval)
elif not self._hardmask:
# Unshare the mask if necessary to avoid propagation
if not self._isfield:
self.unshare_mask()
_mask = ndarray.__getattribute__(self, '_mask')
# Set the data, then the mask
ndarray.__setitem__(_data, indx, dval)
ndarray.__setitem__(_mask, indx, mval)
elif hasattr(indx, 'dtype') and (indx.dtype == MaskType):
indx = indx * umath.logical_not(_mask)
ndarray.__setitem__(_data, indx, dval)
else:
if nbfields:
err_msg = "Flexible 'hard' masks are not yet supported..."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
dindx = self._data[indx]
if dindx.size > 1:
np.copyto(dindx, dval, where=~mindx)
elif mindx is nomask:
dindx = dval
ndarray.__setitem__(_data, indx, dindx)
_mask[indx] = mindx
return
def __getslice__(self, i, j):
"""x.__getslice__(i, j) <==> x[i:j]
Return the slice described by (i, j). The use of negative
indices is not supported.
"""
return self.__getitem__(slice(i, j))
def __setslice__(self, i, j, value):
"""x.__setslice__(i, j, value) <==> x[i:j]=value
Set the slice (i,j) of a to value. If value is masked, mask
those locations.
"""
self.__setitem__(slice(i, j), value)
def __setmask__(self, mask, copy=False):
"""Set the mask.
"""
idtype = ndarray.__getattribute__(self, 'dtype')
current_mask = ndarray.__getattribute__(self, '_mask')
if mask is masked:
mask = True
# Make sure the mask is set
if (current_mask is nomask):
# Just don't do anything is there's nothing to do...
if mask is nomask:
return
current_mask = self._mask = make_mask_none(self.shape, idtype)
# No named fields.........
if idtype.names is None:
# Hardmask: don't unmask the data
if self._hardmask:
current_mask |= mask
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Named fields w/ ............
else:
mdtype = current_mask.dtype
mask = np.array(mask, copy=False)
# Mask is a singleton
if not mask.ndim:
# It's a boolean : make a record
if mask.dtype.kind == 'b':
mask = np.array(tuple([mask.item()]*len(mdtype)),
dtype=mdtype)
# It's a record: make sure the dtype is correct
else:
mask = mask.astype(mdtype)
# Mask is a sequence
else:
# Make sure the new mask is a ndarray with the proper dtype
try:
mask = np.array(mask, copy=copy, dtype=mdtype)
# Or assume it's a sequence of bool/int
except TypeError:
mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
# Hardmask: don't unmask the data
if self._hardmask:
for n in idtype.names:
current_mask[n] |= mask[n]
# Softmask: set everything to False
# If it's obviously a compatible scalar, use a quick update
# method...
elif isinstance(mask, (int, float, np.bool_, np.number)):
current_mask[...] = mask
# ...otherwise fall back to the slower, general purpose way.
else:
current_mask.flat = mask
# Reshape if needed
if current_mask.shape:
current_mask.shape = self.shape
return
_set_mask = __setmask__
#....
def _get_mask(self):
"""Return the current mask.
"""
# We could try to force a reshape, but that wouldn't work in some cases.
# return self._mask.reshape(self.shape)
return self._mask
mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
def _get_recordmask(self):
"""
Return the mask of the records.
A record is masked when all the fields are masked.
"""
_mask = ndarray.__getattribute__(self, '_mask').view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis= -1)
def _set_recordmask(self):
"""Return the mask of the records.
A record is masked when all the fields are masked.
"""
raise NotImplementedError("Coming soon: setting the mask per records!")
recordmask = property(fget=_get_recordmask)
#............................................
def harden_mask(self):
"""
Force the mask to hard.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `harden_mask` sets `hardmask` to True.
See Also
--------
hardmask
"""
self._hardmask = True
return self
def soften_mask(self):
"""
Force the mask to soft.
Whether the mask of a masked array is hard or soft is determined by
its `hardmask` property. `soften_mask` sets `hardmask` to False.
See Also
--------
hardmask
"""
self._hardmask = False
return self
hardmask = property(fget=lambda self: self._hardmask,
doc="Hardness of the mask")
def unshare_mask(self):
"""
Copy the mask and set the sharedmask flag to False.
Whether the mask is shared between masked arrays can be seen from
the `sharedmask` property. `unshare_mask` ensures the mask is not shared.
A copy of the mask is only made if it was shared.
See Also
--------
sharedmask
"""
if self._sharedmask:
self._mask = self._mask.copy()
self._sharedmask = False
return self
sharedmask = property(fget=lambda self: self._sharedmask,
doc="Share status of the mask (read-only).")
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]], dtype=bool)
>>> x.shrink_mask()
>>> x.mask
False
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
#............................................
baseclass = property(fget=lambda self:self._baseclass,
doc="Class of the underlying data (read-only).")
def _get_data(self):
"""Return the current data, as a view of the original
underlying data.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
def _get_flat(self):
"Return a flat iterator."
return MaskedIterator(self)
#
def _set_flat (self, value):
"Set a flattened version of self to value."
y = self.ravel()
y[:] = value
#
flat = property(fget=_get_flat, fset=_set_flat,
doc="Flat version of the array.")
def get_fill_value(self):
"""
Return the filling value of the masked array.
Returns
-------
fill_value : scalar
The filling value.
Examples
--------
>>> for dt in [np.int32, np.int64, np.float64, np.complex128]:
... np.ma.array([0, 1], dtype=dt).get_fill_value()
...
999999
999999
1e+20
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.get_fill_value()
-inf
"""
if self._fill_value is None:
self._fill_value = _check_fill_value(None, self.dtype)
return self._fill_value[()]
def set_fill_value(self, value=None):
"""
Set the filling value of the masked array.
Parameters
----------
value : scalar, optional
The new filling value. Default is None, in which case a default
based on the data type is used.
See Also
--------
ma.set_fill_value : Equivalent function.
Examples
--------
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
>>> x.fill_value
-inf
>>> x.set_fill_value(np.pi)
>>> x.fill_value
3.1415926535897931
Reset to default:
>>> x.set_fill_value()
>>> x.fill_value
1e+20
"""
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
self._fill_value = target
else:
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
fill_value = property(fget=get_fill_value, fset=set_fill_value,
doc="Filling value.")
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([1, 2, -999, 4, -999])
>>> type(x.filled())
<type 'numpy.ndarray'>
Subclassing is preserved. This means that if the data part of the masked
array is a matrix, `filled` returns a matrix:
>>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.filled()
matrix([[ 1, 999999],
[999999, 4]])
"""
m = self._mask
if m is nomask:
return self._data
#
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
#
if self is masked_singleton:
return np.asanyarray(fill_value)
#
if m.dtype.names:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
#ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<type 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
def compress(self, condition, axis=None, out=None):
"""
Return `a` where condition is ``True``.
If condition is a `MaskedArray`, missing values are considered
as ``False``.
Parameters
----------
condition : var
Boolean 1-d array selecting which entries to return. If len(condition)
is less than the size of a along the axis, then output is truncated
to length of condition array.
axis : {None, int}, optional
Axis along which the operation must be performed.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
result : MaskedArray
A :class:`MaskedArray` object.
Notes
-----
Please note the difference with :meth:`compressed` !
The output of :meth:`compress` has a mask, the output of
:meth:`compressed` does not.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.compress([1, 0, 1])
masked_array(data = [1 3],
mask = [False False],
fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
masked_array(data =
[[1 3]
[-- --]
[7 9]],
mask =
[[False False]
[ True True]
[False False]],
fill_value=999999)
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
# Force the condition to a regular ndarray (forget the missing values...)
condition = np.array(condition, copy=False, subok=False)
#
_new = _data.compress(condition, axis=axis, out=out).view(type(self))
_new._update_from(self)
if _mask is not nomask:
_new._mask = _mask.compress(condition, axis=axis)
return _new
#............................................
def __str__(self):
"""String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(tuple((f if _m else _d) for _d, _m in
zip(self._data.tolist(), m)))
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("O")
res.view(ndarray)[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
def __repr__(self):
"""Literal string representation.
"""
n = len(self.shape)
if self._baseclass is np.ndarray:
name = 'array'
else:
name = self._baseclass.__name__
parameters = dict(name=name, nlen=" " * len(name),
data=str(self), mask=str(self._mask),
fill=str(self.fill_value), dtype=str(self.dtype))
if self.dtype.names:
if n <= 1:
return _print_templates['short_flx'] % parameters
return _print_templates['long_flx'] % parameters
elif n <= 1:
return _print_templates['short_std'] % parameters
return _print_templates['long_std'] % parameters
def __eq__(self, other):
"Check whether other equals self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# Dang, we have a bool instead of an array: return the bool
return check
else:
odata = filled(other, 0)
check = ndarray.__eq__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
if self is masked:
return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other)
try:
check = check.view(type(self))
check._mask = self._mask
except AttributeError:
# In case check is a boolean (or a numpy.bool)
return check
else:
odata = filled(other, 0)
check = ndarray.__ne__(self.filled(0), odata).view(type(self))
if self._mask is nomask:
check._mask = omask
else:
mask = mask_or(self._mask, omask)
if mask.dtype.names:
if mask.size > 1:
axis = 1
else:
axis = None
try:
mask = mask.view((bool_, len(self.dtype))).all(axis)
except ValueError:
mask = np.all([[f[n].all() for n in mask.dtype.names]
for f in mask], axis=axis)
check._mask = mask
return check
#
def __add__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __radd__(self, other):
"Add other to self, and return a new masked array."
return add(self, other)
#
def __sub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(self, other)
#
def __rsub__(self, other):
"Subtract other to self, and return a new masked array."
return subtract(other, self)
#
def __mul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __rmul__(self, other):
"Multiply other by self, and return a new masked array."
return multiply(self, other)
#
def __div__(self, other):
"Divide other into self, and return a new masked array."
return divide(self, other)
#
def __truediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(self, other)
#
def __rtruediv__(self, other):
"Divide other into self, and return a new masked array."
return true_divide(other, self)
#
def __floordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(self, other)
#
def __rfloordiv__(self, other):
"Divide other into self, and return a new masked array."
return floor_divide(other, self)
#
def __pow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(self, other)
#
def __rpow__(self, other):
"Raise self to the power other, masking the potential NaNs/Infs"
return power(other, self)
#............................................
def __iadd__(self, other):
"Add other to self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
else:
if m is not nomask:
self._mask += m
ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __isub__(self, other):
"Subtract other from self in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
return self
#....
def __imul__(self, other):
"Multiply self by other in-place."
m = getmask(other)
if self._mask is nomask:
if m is not nomask and m.any():
self._mask = make_mask_none(self.shape, self.dtype)
self._mask += m
elif m is not nomask:
self._mask += m
ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
return self
#....
def __idiv__(self, other):
"Divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __ifloordiv__(self, other):
"Floor divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.floor_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
return self
#....
def __itruediv__(self, other):
"True divide self by other in-place."
other_data = getdata(other)
dom_mask = _DomainSafeDivide().__call__(self._data, other_data)
other_mask = getmask(other)
new_mask = mask_or(other_mask, dom_mask)
# The following 3 lines control the domain filling
if dom_mask.any():
(_, fval) = ufunc_fills[np.true_divide]
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
return self
#...
def __ipow__(self, other):
"Raise self to the power other, in place."
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
#............................................
def __float__(self):
"Convert to float."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
warnings.warn("Warning: converting a masked element to nan.")
return np.nan
return float(self.item())
def __int__(self):
"Convert to int."
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python int.')
return int(self.item())
def get_imag(self):
"""
Return the imaginary part of the masked array.
The returned array is a view on the imaginary part of the `MaskedArray`
whose `get_imag` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The imaginary part of the masked array.
See Also
--------
get_real, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_imag()
masked_array(data = [1.0 -- 1.6],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
imag = property(fget=get_imag, doc="Imaginary part.")
def get_real(self):
"""
Return the real part of the masked array.
The returned array is a view on the real part of the `MaskedArray`
whose `get_real` method is called.
Parameters
----------
None
Returns
-------
result : MaskedArray
The real part of the masked array.
See Also
--------
get_imag, real, imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
>>> x.get_real()
masked_array(data = [1.0 -- 3.45],
mask = [False True False],
fill_value = 1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
real = property(fget=get_real, doc="Real part")
#............................................
def count(self, axis=None):
"""
Count the non-masked elements of the array along the given axis.
Parameters
----------
axis : int, optional
Axis along which to count the non-masked elements. If `axis` is
`None`, all non-masked elements are counted.
Returns
-------
result : int or ndarray
If `axis` is `None`, an integer count is returned. When `axis` is
not `None`, an array with shape determined by the lengths of the
remaining axes, is returned.
See Also
--------
count_masked : Count masked elements in array or along a given axis.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- -- --]],
mask =
[[False False False]
[ True True True]],
fill_value = 999999)
>>> a.count()
3
When the `axis` keyword is specified an array of appropriate size is
returned.
>>> a.count(axis=0)
array([1, 1, 1])
>>> a.count(axis=1)
array([3, 0])
"""
m = self._mask
s = self.shape
if m is nomask:
if axis is None:
return self.size
else:
n = s[axis]
t = list(s)
del t[axis]
return np.full(t, n, dtype=np.intp)
n1 = np.size(m, axis)
n2 = np.sum(m, axis=axis, dtype=np.intp)
if axis is None:
return (n1 - n2)
else:
return narray(n1 - n2)
#............................................
flatten = _arraymethod('flatten')
#
def ravel(self):
"""
Returns a 1D version of self, as a view.
Returns
-------
MaskedArray
Output view is of shape ``(self.size,)`` (or
``(np.ma.product(self.shape),)``).
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.ravel()
[1 -- 3 -- 5 -- 7 -- 9]
"""
r = ndarray.ravel(self._data).view(type(self))
r._update_from(self)
if self._mask is not nomask:
r._mask = ndarray.ravel(self._mask).reshape(r.shape)
else:
r._mask = nomask
return r
#
repeat = _arraymethod('repeat')
#
def reshape (self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> print x
[[-- 2]
[3 --]]
>>> x = x.reshape((4,1))
>>> print x
[[--]
[2]
[3]
[--]]
"""
kwargs.update(order=kwargs.get('order', 'C'))
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
#
def resize(self, newshape, refcheck=True, order=False):
"""
.. warning::
This method does nothing, except raise a ValueError exception. A
masked array does not own its data and therefore cannot safely be
resized in place. Use the `numpy.ma.resize` function instead.
This method is difficult to implement safely and may be deprecated in
future releases of NumPy.
"""
# Note : the 'order' keyword looks broken, let's just drop it
# try:
# ndarray.resize(self, newshape, refcheck=refcheck)
# if self.mask is not nomask:
# self._mask.resize(newshape, refcheck=refcheck)
# except ValueError:
# raise ValueError("Cannot resize an array that has been referenced "
# "or is referencing another array in this way.\n"
# "Use the numpy.ma.resize function.")
# return None
errmsg = "A masked array does not own its data "\
"and therefore cannot be resized.\n" \
"Use the numpy.ma.resize function instead."
raise ValueError(errmsg)
#
def put(self, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
Sets self._data.flat[n] = values[n] for each n in indices.
If `values` is shorter than `indices` then it will repeat.
If `values` has some masked values, the initial mask is updated
in consequence, else the corresponding values are unmasked.
Parameters
----------
indices : 1-D array_like
Target indices, interpreted as integers.
values : array_like
Values to place in self._data copy at target indices.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
'raise' : raise an error.
'wrap' : wrap around.
'clip' : clip to the range.
Notes
-----
`values` can be a scalar or length 1 array.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> x.put([0,4,8],[10,20,30])
>>> print x
[[10 -- 3]
[-- 20 --]
[7 -- 30]]
>>> x.put(4,999)
>>> print x
[[10 -- 3]
[-- 999 --]
[7 -- 30]]
"""
m = self._mask
# Hard mask: Get rid of the values/indices that fall on masked data
if self._hardmask and self._mask is not nomask:
mask = self._mask[indices]
indices = narray(indices, copy=False)
values = narray(values, copy=False, subok=True)
values.resize(indices.shape)
indices = indices[~mask]
values = values[~mask]
#....
self._data.put(indices, values, mode=mode)
#....
if m is nomask:
m = getmask(values)
else:
m = m.copy()
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
else:
m.put(indices, values._mask, mode=mode)
m = make_mask(m, copy=False, shrink=True)
self._mask = m
#............................................
def ids (self):
"""
Return the addresses of the data and mask areas.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
(166670640, 166659832)
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
(166691080, 3083169284L)
"""
if self._mask is nomask:
return (self.ctypes.data, id(nomask))
return (self.ctypes.data, self._mask.ctypes.data)
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
return self.flags['CONTIGUOUS']
#............................................
def all(self, axis=None, out=None):
"""
Check if all of the elements of `a` are true.
Performs a :func:`logical_and` over the given axis and returns the result.
Masked values are considered as True during computation.
For convenience, the output array is masked where ALL the values along the
current axis are masked: if the output would have been a scalar and that
all the values are masked, then the output is `masked`.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
all : equivalent function
Examples
--------
>>> np.ma.array([1,2,3]).all()
True
>>> a = np.ma.array([1,2,3], mask=True)
>>> (a.all() is np.ma.masked)
True
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(True).all(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
return masked
return d
self.filled(True).all(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def any(self, axis=None, out=None):
"""
Check if any of the elements of `a` are true.
Performs a logical_or over the given axis and returns the result.
Masked values are considered as False during computation.
Parameters
----------
axis : {None, integer}
Axis to perform the operation over.
If None, perform over flattened array and return a scalar.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
See Also
--------
any : equivalent function
"""
mask = _check_mask_axis(self._mask, axis)
if out is None:
d = self.filled(False).any(axis=axis).view(type(self))
if d.ndim:
d.__setmask__(mask)
elif mask:
d = masked
return d
self.filled(False).any(axis=axis, out=out)
if isinstance(out, MaskedArray):
if out.ndim or mask:
out.__setmask__(mask)
return out
def nonzero(self):
"""
Return the indices of unmasked elements that are not zero.
Returns a tuple of arrays, one for each dimension, containing the
indices of the non-zero elements in that dimension. The corresponding
non-zero values can be obtained with::
a[a.nonzero()]
To group the indices by element, rather than dimension, use
instead::
np.transpose(a.nonzero())
The result of this is always a 2d array, with a row for each non-zero
element.
Parameters
----------
None
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero :
Function operating on ndarrays.
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
masked_array(data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]],
mask =
False,
fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
Masked elements are ignored.
>>> x[1, 1] = ma.masked
>>> x
masked_array(data =
[[1.0 0.0 0.0]
[0.0 -- 0.0]
[0.0 0.0 1.0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
Indices can also be grouped by element.
>>> np.transpose(x.nonzero())
array([[0, 0],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, ma.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
masked_array(data =
[[False False False]
[ True True True]
[ True True True]],
mask =
False,
fill_value=999999)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the condition array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return narray(self.filled(0), copy=False).nonzero()
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
(this docstring should be overwritten)
"""
#!!!: implement out + test!
m = self._mask
if m is nomask:
result = super(MaskedArray, self).trace(offset=offset, axis1=axis1,
axis2=axis2, out=out)
return result.astype(dtype)
else:
D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
Masked elements are set to 0 internally.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and
the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, ndarray}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
sum_along_axis : MaskedArray or scalar
An array with the same shape as self, with the specified
axis removed. If self is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.sum()
25
>>> print x.sum(axis=1)
[4 5 16]
>>> print x.sum(axis=0)
[8 5 12]
>>> print type(x.sum(axis=0, dtype=np.int64)[0])
<type 'numpy.int64'>
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(0).sum(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(0).sum(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
def cumsum(self, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along the given axis.
The cumulative sum is calculated over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 0 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the sum is computed. The default (`axis` = None) is to
compute over the flattened array. `axis` may be negative, in which case
it counts from the last to the first axis.
dtype : {None, dtype}, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumsum : ndarray.
A new array holding the result is returned unless ``out`` is
specified, in which case a reference to ``out`` is returned.
Notes
-----
The mask is lost if `out` is not a valid :class:`MaskedArray` !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
>>> print marr.cumsum()
[0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self.mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Masked elements are set to 1 internally for computation.
Parameters
----------
axis : {None, int}, optional
Axis over which the product is taken. If None is used, then the
product is over all the array elements.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of a is an integer type of precision less than the default
platform integer, then the default platform integer precision is
used. Otherwise, the dtype is the same as that of a.
out : {None, array}, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
product_along_axis : {array, scalar}, see dtype parameter above.
Returns an array whose shape is the same as a with the specified
axis removed. Returns a 0d array when a is 1d or axis=None.
Returns a reference to the specified output array if specified.
See Also
--------
prod : equivalent function
Notes
-----
Arithmetic is modular when using integer types, and no error is raised
on overflow.
Examples
--------
>>> np.prod([1.,2.])
2.0
>>> np.prod([1.,2.], dtype=np.int32)
2
>>> np.prod([[1.,2.],[3.,4.]])
24.0
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
# No explicit output
if out is None:
result = self.filled(1).prod(axis, dtype=dtype)
rndim = getattr(result, 'ndim', 0)
if rndim:
result = result.view(type(self))
result.__setmask__(newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(1).prod(axis, dtype=dtype, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
product = prod
def cumprod(self, axis=None, dtype=None, out=None):
"""
Return the cumulative product of the elements along the given axis.
The cumulative product is taken over the flattened array by
default, otherwise over the specified axis.
Masked values are set to 1 internally during the computation.
However, their position is saved, and the result will be masked at
the same locations.
Parameters
----------
axis : {None, -1, int}, optional
Axis along which the product is computed. The default
(`axis` = None) is to compute over the flattened array.
dtype : {None, dtype}, optional
Determines the type of the returned array and of the accumulator
where the elements are multiplied. If ``dtype`` has the value ``None``
and the type of ``a`` is an integer type of precision less than the
default platform integer, then the default platform integer precision
is used. Otherwise, the dtype is the same as that of ``a``.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless out is specified,
in which case a reference to out is returned.
Notes
-----
The mask is lost if `out` is not a valid MaskedArray !
Arithmetic is modular when using integer types, and no error is
raised on overflow.
"""
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
result = result.view(type(self))
result.__setmask__(self._mask)
return result
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the array elements.
Masked entries are ignored.
The average is taken over the flattened array by default, otherwise over
the specified axis. Refer to `numpy.mean` for the full documentation.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary.
Returns
-------
mean : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
numpy.ma.mean : Equivalent function.
numpy.mean : Equivalent function on non-masked arrays.
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype)
else:
dsum = self.sum(axis=axis, dtype=dtype)
cnt = self.count(axis=axis)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1. / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, '_mask', nomask)
return out
return result
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data = [-1. 0. 1.],
mask = False,
fill_value = 1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return (self - m)
else:
return (self - expand_dims(m, axis))
def var(self, axis=None, dtype=None, out=None, ddof=0):
""
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
# Some data are masked, yay!
cnt = self.count(axis=axis) - ddof
danom = self.anom(axis=axis, dtype=dtype)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, '_mask', False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or "\
"more location."
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
var.__doc__ = np.var.__doc__
def std(self, axis=None, dtype=None, out=None, ddof=0):
""
dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof)
if dvar is not masked:
if out is not None:
np.power(out, 0.5, out=out, casting='unsafe')
return out
dvar = sqrt(dvar)
return dvar
std.__doc__ = np.std.__doc__
#............................................
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
result._mask = self._mask
result._update_from(self)
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
round.__doc__ = ndarray.round.__doc__
#............................................
def argsort(self, axis=None, kind='quicksort', order=None, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. The default is -1 (last axis).
If None, the flattened array is used.
fill_value : var, optional
Value used to fill the array before sorting.
The default is the `fill_value` attribute of the input array.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data = [3 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.argsort()
array([1, 0, 2])
"""
if fill_value is None:
fill_value = default_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argsort(axis=axis, kind=kind, order=order)
def argmin(self, axis=None, fill_value=None, out=None):
"""
Return array of indices to the minimum values along the given axis.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
minimum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
{ndarray, scalar}
If multi-dimension input, returns a new ndarray of indices to the
minimum values along the given axis. Otherwise, returns a scalar
of index to the minimum values along the given axis.
Examples
--------
>>> x = np.ma.array(arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
>>> print x
[[-- --]
[2 3]]
>>> print x.argmin(axis=0, fill_value=-1)
[0 0]
>>> print x.argmin(axis=0, fill_value=9)
[1 1]
"""
if fill_value is None:
fill_value = minimum_fill_value(self)
d = self.filled(fill_value).view(ndarray)
return d.argmin(axis, out=out)
def argmax(self, axis=None, fill_value=None, out=None):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : {var}, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
def sort(self, axis= -1, kind='quicksort', order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
Parameters
----------
a : array_like
Array to be sorted.
axis : int, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
endwith : {True, False}, optional
Whether missing values (if any) should be forced in the upper indices
(at the end of the array) (True) or lower indices (at the beginning).
fill_value : {var}, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
>>> print a
[1 3 5 -- --]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
>>> print a
[-- -- 1 3 5]
>>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
>>> print a
[1 -- -- 3 5]
"""
if self._mask is nomask:
ndarray.sort(self, axis=axis, kind=kind, order=order)
else:
if self is masked:
return self
if fill_value is None:
if endwith:
filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
filler = fill_value
idx = np.meshgrid(*[np.arange(x) for x in self.shape], sparse=True,
indexing='ij')
idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind,
order=order)
tmp_mask = self._mask[idx].flat
tmp_data = self._data[idx].flat
self._data.flat = tmp_data
self._mask.flat = tmp_mask
return
#............................................
def min(self, axis=None, out=None, fill_value=None):
"""
Return the minimum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
minimum_fill_value
Returns the minimum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).min(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def mini(self, axis=None):
"""
Return the array minimum along the specified axis.
Parameters
----------
axis : int, optional
The axis along which to find the minima. Default is None, in which case
the minimum value in the whole array is returned.
Returns
-------
min : scalar or MaskedArray
If `axis` is None, the result is a scalar. Otherwise, if `axis` is
given and the array is at least 2-D, the result is a masked array with
dimension one smaller than the array on which `mini` is called.
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
>>> print x
[[0 --]
[2 3]
[4 --]]
>>> x.mini()
0
>>> x.mini(axis=0)
masked_array(data = [0 3],
mask = [False False],
fill_value = 999999)
>>> print x.mini(axis=1)
[0 2 4]
"""
if axis is None:
return minimum(self)
else:
return minimum.reduce(self, axis)
#........................
def max(self, axis=None, out=None, fill_value=None):
"""
Return the maximum along a given axis.
Parameters
----------
axis : {None, int}, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
out : array_like, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
Returns
-------
amax : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
maximum_fill_value
Returns the maximum filling value for a given datatype.
"""
_mask = ndarray.__getattribute__(self, '_mask')
newmask = _check_mask_axis(_mask, axis)
if fill_value is None:
fill_value = maximum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).max(axis=axis, out=out).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
result = self.filled(fill_value).max(axis=axis, out=out)
if isinstance(out, MaskedArray):
outmask = getattr(out, '_mask', nomask)
if (outmask is nomask):
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
def ptp(self, axis=None, out=None, fill_value=None):
"""
Return (maximum - minimum) along the the given dimension
(i.e. peak-to-peak value).
Parameters
----------
axis : {None, int}, optional
Axis along which to find the peaks. If None (default) the
flattened array is used.
out : {None, array_like}, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
Returns
-------
ptp : ndarray.
A new array holding the result, unless ``out`` was
specified, in which case a reference to ``out`` is returned.
"""
if out is None:
result = self.max(axis=axis, fill_value=fill_value)
result -= self.min(axis=axis, fill_value=fill_value)
return result
out.flat = self.max(axis=axis, out=out, fill_value=fill_value)
min_value = self.min(axis=axis, fill_value=fill_value)
np.subtract(out, min_value, out=out, casting='unsafe')
return out
def take(self, indices, axis=None, out=None, mode='raise'):
"""
"""
(_data, _mask) = (self._data, self._mask)
cls = type(self)
# Make sure the indices are not masked
maskindices = getattr(indices, '_mask', nomask)
if maskindices is not nomask:
indices = indices.filled(0)
# Get the data
if out is None:
out = _data.take(indices, axis=axis, mode=mode).view(cls)
else:
np.take(_data, indices, axis=axis, mode=mode, out=out)
# Get the mask
if isinstance(out, MaskedArray):
if _mask is nomask:
outmask = maskindices
else:
outmask = _mask.take(indices, axis=axis, mode=mode)
outmask |= maskindices
out.__setmask__(outmask)
return out
# Array methods ---------------------------------------
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
transpose = _arraymethod('transpose')
T = property(fget=lambda self:self.transpose())
swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
squeeze = _arraymethod('squeeze')
#--------------------------------------------
def tolist(self, fill_value=None):
"""
Return the data portion of the masked array as a hierarchical Python list.
Data items are converted to the nearest compatible Python type.
Masked values are converted to `fill_value`. If `fill_value` is None,
the corresponding entries in the output list will be ``None``.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries. Default is None.
Returns
-------
result : list
The Python list representation of the masked array.
Examples
--------
>>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4)
>>> x.tolist()
[[1, None, 3], [None, 5, None], [7, None, 9]]
>>> x.tolist(-999)
[[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
"""
_mask = self._mask
# No mask ? Just return .data.tolist ?
if _mask is nomask:
return self._data.tolist()
# Explicit fill_value: fill the array and get the list
if fill_value is not None:
return self.filled(fill_value).tolist()
# Structured array .............
names = self.dtype.names
if names:
result = self._data.astype([(_, object) for _ in names])
for n in names:
result[n][_mask[n]] = None
return result.tolist()
# Standard arrays ...............
if _mask is nomask:
return [None]
# Set temps to save time when dealing w/ marrays...
inishape = self.shape
result = np.array(self._data.ravel(), dtype=object)
result[_mask.ravel()] = None
result.shape = inishape
return result.tolist()
# if fill_value is not None:
# return self.filled(fill_value).tolist()
# result = self.filled().tolist()
# # Set temps to save time when dealing w/ mrecarrays...
# _mask = self._mask
# if _mask is nomask:
# return result
# nbdims = self.ndim
# dtypesize = len(self.dtype)
# if nbdims == 0:
# return tuple([None] * dtypesize)
# elif nbdims == 1:
# maskedidx = _mask.nonzero()[0].tolist()
# if dtypesize:
# nodata = tuple([None] * dtypesize)
# else:
# nodata = None
# [operator.setitem(result, i, nodata) for i in maskedidx]
# else:
# for idx in zip(*[i.tolist() for i in _mask.nonzero()]):
# tmp = result
# for i in idx[:-1]:
# tmp = tmp[i]
# tmp[idx[-1]] = None
# return result
#........................
def tostring(self, fill_value=None, order='C'):
"""
This function is a compatibility alias for tobytes. Despite its name it
returns bytes not strings.
"""
return self.tobytes(fill_value, order='C')
#........................
def tobytes(self, fill_value=None, order='C'):
"""
Return the array data as a string containing the raw bytes in the array.
The array is filled with a fill value before the string conversion.
.. versionadded:: 1.9.0
Parameters
----------
fill_value : scalar, optional
Value used to fill in the masked values. Deafult is None, in which
case `MaskedArray.fill_value` is used.
order : {'C','F','A'}, optional
Order of the data item in the copy. Default is 'C'.
- 'C' -- C order (row major).
- 'F' -- Fortran order (column major).
- 'A' -- Any, current order of array.
- None -- Same as 'A'.
See Also
--------
ndarray.tobytes
tolist, tofile
Notes
-----
As for `ndarray.tobytes`, information about the shape, dtype, etc.,
but also about `fill_value`, will be lost.
Examples
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
'\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
#........................
def tofile(self, fid, sep="", format="%s"):
"""
Save a masked array to a file in binary format.
.. warning::
This function is not implemented yet.
Raises
------
NotImplementedError
When `tofile` is called.
"""
raise NotImplementedError("Not implemented yet, sorry...")
def toflex(self):
"""
Transforms a masked array into a flexible-type array.
The flexible type array that is returned will have two fields:
* the ``_data`` field stores the ``_data`` part of the array.
* the ``_mask`` field stores the ``_mask`` part of the array.
Parameters
----------
None
Returns
-------
record : ndarray
A new flexible-type `ndarray` with two fields: the first element
containing a value, the second element containing the corresponding
mask boolean. The returned record shape matches self.shape.
Notes
-----
A side-effect of transforming a masked array into a flexible `ndarray` is
that meta information (``fill_value``, ...) will be lost.
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
>>> print x
[[1 -- 3]
[-- 5 --]
[7 -- 9]]
>>> print x.toflex()
[[(1, False) (2, True) (3, False)]
[(4, True) (5, False) (6, True)]
[(7, False) (8, True) (9, False)]]
"""
# Get the basic dtype ....
ddtype = self.dtype
# Make sure we have a mask
_mask = self._mask
if _mask is None:
_mask = make_mask_none(self.shape, ddtype)
# And get its dtype
mdtype = self._mask.dtype
#
record = np.ndarray(shape=self.shape,
dtype=[('_data', ddtype), ('_mask', mdtype)])
record['_data'] = self._data
record['_mask'] = self._mask
return record
torecords = toflex
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling
purposes.
"""
cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(cf),
#self._data.tolist(),
getmaskarray(self).tobytes(cf),
#getmaskarray(self).tolist(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for
pickling purposes. ``state`` is typically the output of the
``__getstate__`` output, and is a 5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(_, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mareconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
#
def __deepcopy__(self, memo=None):
from copy import deepcopy
copied = MaskedArray.__new__(type(self), self, copy=True)
if memo is None:
memo = {}
memo[id(self)] = copied
for (k, v) in self.__dict__.items():
copied.__dict__[k] = deepcopy(v, memo)
return copied
def _mareconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype)
_mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype))
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
class mvoid(MaskedArray):
"""
Fake a 'void' object to use for masked array with structured dtypes.
"""
#
def __new__(self, data, mask=nomask, dtype=None, fill_value=None,
hardmask=False):
dtype = dtype or data.dtype
_data = np.array(data, dtype=dtype)
_data = _data.view(self)
_data._hardmask = hardmask
if mask is not nomask:
if isinstance(mask, np.void):
_data._mask = mask
else:
try:
# Mask is already a 0D array
_data._mask = np.void(mask)
except TypeError:
# Transform the mask to a void
mdtype = make_mask_descr(dtype)
_data._mask = np.array(mask, dtype=mdtype)[()]
if fill_value is not None:
_data.fill_value = fill_value
return _data
def _get_data(self):
# Make sure that the _data part is a np.void
return self.view(ndarray)[()]
_data = property(fget=_get_data)
def __getitem__(self, indx):
"Get the index..."
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
def __setitem__(self, indx, value):
self._data[indx] = value
if self._hardmask:
self._mask[indx] |= getattr(value, "_mask", False)
else:
self._mask[indx] = getattr(value, "_mask", False)
def __str__(self):
m = self._mask
if (m is nomask):
return self._data.__str__()
m = tuple(m)
if (not any(m)):
return self._data.__str__()
r = self._data.tolist()
p = masked_print_option
if not p.enabled():
p = 'N/A'
else:
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(r, m)]
return "(%s)" % ", ".join(r)
def __repr__(self):
m = self._mask
if (m is nomask):
return self._data.__repr__()
m = tuple(m)
if not any(m):
return self._data.__repr__()
p = masked_print_option
if not p.enabled():
return self.filled(self.fill_value).__repr__()
p = str(p)
r = [(str(_), p)[int(_m)] for (_, _m) in zip(self._data.tolist(), m)]
return "(%s)" % ", ".join(r)
def __iter__(self):
"Defines an iterator for mvoid"
(_data, _mask) = (self._data, self._mask)
if _mask is nomask:
for d in _data:
yield d
else:
for (d, m) in zip(_data, _mask):
if m:
yield masked
else:
yield d
def __len__(self):
return self._data.__len__()
def filled(self, fill_value=None):
"""
Return a copy with masked fields filled with a given value.
Parameters
----------
fill_value : scalar, optional
The value to use for invalid entries (None by default).
If None, the `fill_value` attribute is used instead.
Returns
-------
filled_void
A `np.void` object
See Also
--------
MaskedArray.filled
"""
return asarray(self).filled(fill_value)[()]
def tolist(self):
"""
Transforms the mvoid object into a tuple.
Masked fields are replaced by None.
Returns
-------
returned_tuple
Tuple of fields
"""
_mask = self._mask
if _mask is nomask:
return self._data.tolist()
result = []
for (d, m) in zip(self._data, self._mask):
if m:
result.append(None)
else:
# .item() makes sure we return a standard Python object
result.append(d.item())
return tuple(result)
#####--------------------------------------------------------------------------
#---- --- Shortcuts ---
#####---------------------------------------------------------------------------
def isMaskedArray(x):
"""
Test whether input is an instance of MaskedArray.
This function returns True if `x` is an instance of MaskedArray
and returns False otherwise. Any object is accepted as input.
Parameters
----------
x : object
Object to test.
Returns
-------
result : bool
True if `x` is a MaskedArray.
See Also
--------
isMA : Alias to isMaskedArray.
isarray : Alias to isMaskedArray.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.eye(3, 3)
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
masked_array(data =
[[1.0 -- --]
[-- 1.0 --]
[-- -- 1.0]],
mask =
[[False True True]
[ True False True]
[ True True False]],
fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
True
>>> ma.isMaskedArray([0, 1, 2])
False
"""
return isinstance(x, MaskedArray)
isarray = isMaskedArray
isMA = isMaskedArray #backward compatibility
# We define the masked singleton as a float for higher precedence...
# Note that it can be tricky sometimes w/ type comparison
class MaskedConstant(MaskedArray):
#
_data = data = np.array(0.)
_mask = mask = np.array(True)
_baseclass = ndarray
#
def __new__(self):
return self._data.view(self)
#
def __array_finalize__(self, obj):
return
#
def __array_wrap__(self, obj):
return self
#
def __str__(self):
return str(masked_print_option._display)
#
def __repr__(self):
return 'masked'
#
def flatten(self):
return masked_array([self._data], dtype=float, mask=[True])
def __reduce__(self):
"""Override of MaskedArray's __reduce__.
"""
return (self.__class__, ())
masked = masked_singleton = MaskedConstant()
masked_array = MaskedArray
def array(data, dtype=None, copy=False, order=False,
mask=nomask, fill_value=None,
keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0,
):
"""array(data, dtype=None, copy=False, order=False, mask=nomask,
fill_value=None, keep_mask=True, hard_mask=False, shrink=True,
subok=True, ndmin=0)
Acts as shortcut to MaskedArray, with options in a different order
for convenience. And backwards compatibility...
"""
#!!!: we should try to put 'order' somwehere
return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok,
keep_mask=keep_mask, hard_mask=hard_mask,
fill_value=fill_value, ndmin=ndmin, shrink=shrink)
array.__doc__ = masked_array.__doc__
def is_masked(x):
"""
Determine whether input has masked values.
Accepts any object as input, but always returns False unless the
input is a MaskedArray containing masked values.
Parameters
----------
x : array_like
Array to check for masked values.
Returns
-------
result : bool
True if `x` is a MaskedArray with masked values, False otherwise.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
masked_array(data = [-- 1 -- 2 3],
mask = [ True False True False False],
fill_value=999999)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
masked_array(data = [0 1 0 2 3],
mask = False,
fill_value=999999)
>>> ma.is_masked(x)
False
Always returns False if `x` isn't a MaskedArray.
>>> x = [False, True, False]
>>> ma.is_masked(x)
False
>>> x = 'a string'
>>> ma.is_masked(x)
False
"""
m = getmask(x)
if m is nomask:
return False
elif m.any():
return True
return False
#####---------------------------------------------------------------------------
#---- --- Extrema functions ---
#####---------------------------------------------------------------------------
class _extrema_operation(object):
"""
Generic class for maximum/minimum functions.
.. note::
This is the base class for `_maximum_operation` and
`_minimum_operation`.
"""
def __call__(self, a, b=None):
"Executes the call behavior."
if b is None:
return self.reduce(a)
return where(self.compare(a, b), a, b)
#.........
def reduce(self, target, axis=None):
"Reduce target along the given axis."
target = narray(target, copy=False, subok=True)
m = getmask(target)
if axis is not None:
kargs = { 'axis' : axis }
else:
kargs = {}
target = target.ravel()
if not (m is nomask):
m = m.ravel()
if m is nomask:
t = self.ufunc.reduce(target, **kargs)
else:
target = target.filled(self.fill_value_func(target)).view(type(target))
t = self.ufunc.reduce(target, **kargs)
m = umath.logical_and.reduce(m, **kargs)
if hasattr(t, '_mask'):
t._mask = m
elif m:
t = masked
return t
#.........
def outer (self, a, b):
"Return the function applied to the outer product of a and b."
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
result = self.ufunc.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
return result
#............................
class _minimum_operation(_extrema_operation):
"Object to calculate minima"
def __init__ (self):
"""minimum(a, b) or minimum(a)
In one argument case, returns the scalar minimum.
"""
self.ufunc = umath.minimum
self.afunc = amin
self.compare = less
self.fill_value_func = minimum_fill_value
#............................
class _maximum_operation(_extrema_operation):
"Object to calculate maxima"
def __init__ (self):
"""maximum(a, b) or maximum(a)
In one argument case returns the scalar maximum.
"""
self.ufunc = umath.maximum
self.afunc = amax
self.compare = greater
self.fill_value_func = maximum_fill_value
#..........................................................
def min(obj, axis=None, out=None, fill_value=None):
try:
return obj.min(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a min method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out)
min.__doc__ = MaskedArray.min.__doc__
def max(obj, axis=None, out=None, fill_value=None):
try:
return obj.max(axis=axis, fill_value=fill_value, out=out)
except (AttributeError, TypeError):
# If obj doesn't have a max method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out)
max.__doc__ = MaskedArray.max.__doc__
def ptp(obj, axis=None, out=None, fill_value=None):
"""a.ptp(axis=None) = a.max(axis)-a.min(axis)"""
try:
return obj.ptp(axis, out=out, fill_value=fill_value)
except (AttributeError, TypeError):
# If obj doesn't have a ptp method,
# ...or if the method doesn't accept a fill_value argument
return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out)
ptp.__doc__ = MaskedArray.ptp.__doc__
#####---------------------------------------------------------------------------
#---- --- Definition of functions from the corresponding methods ---
#####---------------------------------------------------------------------------
class _frommethod:
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
"""
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
meth = getattr(MaskedArray, self.__name__, None) or\
getattr(np, self.__name__, None)
signature = self.__name__ + get_object_signature(meth)
if meth is not None:
doc = """ %s\n%s""" % (signature, getattr(meth, '__doc__', None))
return doc
#
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
any = _frommethod('any')
compress = _frommethod('compress', reversed=True)
cumprod = _frommethod('cumprod')
cumsum = _frommethod('cumsum')
copy = _frommethod('copy')
diagonal = _frommethod('diagonal')
harden_mask = _frommethod('harden_mask')
ids = _frommethod('ids')
maximum = _maximum_operation()
mean = _frommethod('mean')
minimum = _minimum_operation()
nonzero = _frommethod('nonzero')
prod = _frommethod('prod')
product = _frommethod('prod')
ravel = _frommethod('ravel')
repeat = _frommethod('repeat')
shrink_mask = _frommethod('shrink_mask')
soften_mask = _frommethod('soften_mask')
std = _frommethod('std')
sum = _frommethod('sum')
swapaxes = _frommethod('swapaxes')
#take = _frommethod('take')
trace = _frommethod('trace')
var = _frommethod('var')
def take(a, indices, axis=None, out=None, mode='raise'):
"""
"""
a = masked_array(a)
return a.take(indices, axis=axis, out=out, mode=mode)
#..............................................................................
def power(a, b, third=None):
"""
Returns element-wise base array raised to power from second array.
This is the masked array version of `numpy.power`. For details see
`numpy.power`.
See Also
--------
numpy.power
Notes
-----
The *out* argument to `numpy.power` is not supported, `third` has to be
None.
"""
if third is not None:
raise MaskError("3-argument power not supported.")
# Get the masks
ma = getmask(a)
mb = getmask(b)
m = mask_or(ma, mb)
# Get the rawdata
fa = getdata(a)
fb = getdata(b)
# Get the type of the result (so that we preserve subclasses)
if isinstance(a, MaskedArray):
basetype = type(a)
else:
basetype = MaskedArray
# Get the result and view it as a (subclass of) MaskedArray
with np.errstate():
np.seterr(divide='ignore', invalid='ignore')
result = np.where(m, fa, umath.power(fa, fb)).view(basetype)
result._update_from(a)
# Find where we're in trouble w/ NaNs and Infs
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
if not (result.ndim):
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
if invalid.any():
if not result.ndim:
return masked
elif result._mask is nomask:
result._mask = invalid
result._data[invalid] = result.fill_value
return result
# if fb.dtype.char in typecodes["Integer"]:
# return masked_array(umath.power(fa, fb), m)
# m = mask_or(m, (fa < 0) & (fb != fb.astype(int)))
# if m is nomask:
# return masked_array(umath.power(fa, fb))
# else:
# fa = fa.copy()
# if m.all():
# fa.flat = 1
# else:
# np.copyto(fa, 1, where=m)
# return masked_array(umath.power(fa, fb), m)
#..............................................................................
def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
if axis is None:
return d.argsort(kind=kind, order=order)
return d.argsort(axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
def argmin(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
d = filled(a, fill_value)
return d.argmin(axis=axis)
argmin.__doc__ = MaskedArray.argmin.__doc__
def argmax(a, axis=None, fill_value=None):
"Function version of the eponymous method."
if fill_value is None:
fill_value = default_fill_value(a)
try:
fill_value = -fill_value
except:
pass
d = filled(a, fill_value)
return d.argmax(axis=axis)
argmax.__doc__ = MaskedArray.argmax.__doc__
def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
axis = 0
if fill_value is None:
if endwith:
filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
filler = fill_value
# return
indx = np.meshgrid(*[np.arange(x) for x in a.shape], sparse=True,
indexing='ij')
indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order)
return a[indx]
sort.__doc__ = MaskedArray.sort.__doc__
def compressed(x):
"""
Return all the non-masked data as a 1-D array.
This function is equivalent to calling the "compressed" method of a
`MaskedArray`, see `MaskedArray.compressed` for details.
See Also
--------
MaskedArray.compressed
Equivalent method.
"""
if not isinstance(x, MaskedArray):
x = asanyarray(x)
return x.compressed()
def concatenate(arrays, axis=0):
"""
Concatenate a sequence of arrays along the given axis.
Parameters
----------
arrays : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
result : MaskedArray
The concatenated array with any masked entries preserved.
See Also
--------
numpy.concatenate : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.arange(3)
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
masked_array(data = [2 3 4],
mask = False,
fill_value = 999999)
>>> ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
rcls = get_masked_subclass(*arrays)
data = d.view(rcls)
# Check whether one of the arrays has a non-empty mask...
for x in arrays:
if getmask(x) is not nomask:
break
else:
return data
# OK, so we have to concatenate the masks
dm = np.concatenate([getmaskarray(a) for a in arrays], axis)
# If we decide to keep a '_shrinkmask' option, we want to check that ...
# ... all of them are True, and then check for dm.any()
# shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays])
# if shrink and not dm.any():
if not dm.dtype.fields and not dm.any():
data._mask = nomask
else:
data._mask = dm.reshape(d.shape)
return data
def count(a, axis=None):
if isinstance(a, MaskedArray):
return a.count(axis)
return masked_array(a, copy=False).count(axis)
count.__doc__ = MaskedArray.count.__doc__
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
def expand_dims(x, axis):
"""
Expand the shape of an array.
Expands the shape of the array by including a new axis before the one
specified by the `axis` parameter. This function behaves the same as
`numpy.expand_dims` but preserves masked elements.
See Also
--------
numpy.expand_dims : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.array([1, 2, 4])
>>> x[1] = ma.masked
>>> x
masked_array(data = [1 -- 4],
mask = [False True False],
fill_value = 999999)
>>> np.expand_dims(x, axis=0)
array([[1, 2, 4]])
>>> ma.expand_dims(x, axis=0)
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
The same result can be achieved using slicing syntax with `np.newaxis`.
>>> x[np.newaxis, :]
masked_array(data =
[[1 -- 4]],
mask =
[[False True False]],
fill_value = 999999)
"""
result = n_expand_dims(x, axis)
if isinstance(x, MaskedArray):
new_shape = result.shape
result = x.view()
result.shape = new_shape
if result._mask is not nomask:
result._mask.shape = new_shape
return result
#......................................
def left_shift (a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
def right_shift (a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
#......................................
def put(a, indices, values, mode='raise'):
"""
Set storage-indexed locations to corresponding values.
This function is equivalent to `MaskedArray.put`, see that method
for details.
See Also
--------
MaskedArray.put
"""
# We can't use 'frommethod', the order of arguments is different
try:
return a.put(indices, values, mode=mode)
except AttributeError:
return narray(a, copy=False).put(indices, values, mode=mode)
def putmask(a, mask, values): #, mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
This function is exactly equivalent to `numpy.transpose`.
See Also
--------
numpy.transpose : Equivalent function in top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
>>>> x
masked_array(data =
[[0 1]
[2 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
>>> ma.transpose(x)
masked_array(data =
[[0 2]
[1 --]],
mask =
[[False False]
[False True]],
fill_value = 999999)
"""
#We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
return a.transpose(axes)
except AttributeError:
return narray(a, copy=False).transpose(axes).view(MaskedArray)
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
#We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def resize(x, new_shape):
"""
Return a new masked array with the specified size and shape.
This is the masked equivalent of the `numpy.resize` function. The new
array is filled with repeated copies of `x` (in the order that the
data are stored in memory). If `x` is masked, the new array will be
masked, and the new mask will be a repetition of the old one.
See Also
--------
numpy.resize : Equivalent function in the top level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
masked_array(data =
[[1 --]
[3 4]],
mask =
[[False True]
[False False]],
fill_value = 999999)
>>> np.resize(a, (3, 3))
array([[1, 2, 3],
[4, 1, 2],
[3, 4, 1]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 -- 3]
[4 1 --]
[3 4 1]],
mask =
[[False True False]
[False False True]
[False False False]],
fill_value = 999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
masked_array(data =
[[1 2 3]
[4 1 2]
[3 4 1]],
mask =
False,
fill_value = 999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
m = getmask(x)
if m is not nomask:
m = np.resize(m, new_shape)
result = np.resize(x, new_shape).view(get_masked_subclass(x))
if result.ndim:
result._mask = m
return result
#................................................
def rank(obj):
"maskedarray version of the numpy function."
return np.rank(getdata(obj))
rank.__doc__ = np.rank.__doc__
#
def shape(obj):
"maskedarray version of the numpy function."
return np.shape(getdata(obj))
shape.__doc__ = np.shape.__doc__
#
def size(obj, axis=None):
"maskedarray version of the numpy function."
return np.size(getdata(obj), axis)
size.__doc__ = np.size.__doc__
#................................................
#####--------------------------------------------------------------------------
#---- --- Extra functions ---
#####--------------------------------------------------------------------------
def where (condition, x=None, y=None):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print x
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print np.ma.where(x > 5, x, -3.1416)
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
if x is None and y is None:
return filled(condition, 0).nonzero()
elif x is None or y is None:
raise ValueError("Either both or neither x and y should be given.")
# Get the condition ...............
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data ......................................
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
_data = d._data
np.copyto(_data, xv.astype(ndtype), where=fc)
np.copyto(_data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
_mask = d._mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(_mask, getmask(x), where=fc)
np.copyto(_mask, getmask(y), where=notfc)
_mask |= getmaskarray(condition)
if not _mask.any():
d._mask = nomask
return d
def choose (indices, choices, out=None, mode='raise'):
"""
Use an index array to construct a new array from a set of choices.
Given an array of integers and a set of n choice arrays, this method
will create a new array that merges each of the choice arrays. Where a
value in `a` is i, the new array will have the value that choices[i]
contains in the same place.
Parameters
----------
a : ndarray of ints
This array must contain integers in ``[0, n-1]``, where n is the
number of choices.
choices : sequence of arrays
Choice arrays. The index array and all of the choices should be
broadcastable to the same shape.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and `dtype`.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' : raise an error
* 'wrap' : wrap around
* 'clip' : clip to the range
Returns
-------
merged_array : array
See Also
--------
choose : equivalent function
Examples
--------
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
masked_array(data = [3 2 1],
mask = False,
fill_value=999999)
"""
def fmask (x):
"Returns the filled array, or True if masked."
if x is masked:
return True
return filled(x)
def nmask (x):
"Returns the mask, True if ``masked``, False if ``nomask``."
if x is masked:
return True
return getmask(x)
# Get the indices......
c = filled(indices, 0)
# Get the masks........
masks = [nmask(x) for x in choices]
data = [fmask(x) for x in choices]
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
copy=0, shrink=True)
# Get the choices......
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(outputmask)
return out
d.__setmask__(outputmask)
return d
def round_(a, decimals=0, out=None):
"""
Return a copy of a, rounded to 'decimals' places.
When 'decimals' is negative, it specifies the number of positions
to the left of the decimal point. The real and imaginary parts of
complex numbers are rounded separately. Nothing is done if the
array is not of float type and 'decimals' is greater than or equal
to 0.
Parameters
----------
decimals : int
Number of decimals to round to. May be negative.
out : array_like
Existing array to use for output.
If not given, returns a default copy of a.
Notes
-----
If out is given and does not have a mask attribute, the mask of a
is lost!
"""
if out is None:
return np.round_(a, decimals, out)
else:
np.round_(getdata(a), decimals, out)
if hasattr(out, '_mask'):
out._mask = getmask(a)
return out
round = round_
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
Like the generic NumPy equivalent the product sum is over the last dimension
of a and b.
Notes
-----
The first argument is not conjugated.
"""
fa = filled(a, 0)
fb = filled(b, 0)
if len(fa.shape) == 0:
fa.shape = (1,)
if len(fb.shape) == 0:
fb.shape = (1,)
return np.inner(fa, fb).view(MaskedArray)
inner.__doc__ = doc_note(np.inner.__doc__,
"Masked values are replaced by 0.")
innerproduct = inner
def outer(a, b):
"maskedarray version of the numpy function."
fa = filled(a, 0).ravel()
fb = filled(b, 0).ravel()
d = np.outer(fa, fb)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
outerproduct = outer
def allequal (a, b, fill_value=True):
"""
Return True if all entries of a and b are equal, using
fill_value as a truth value where either or both are masked.
Parameters
----------
a, b : array_like
Input arrays to compare.
fill_value : bool, optional
Whether masked values in a or b are considered equal (True) or not
(False).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN,
then False is returned.
See Also
--------
all, any
numpy.ma.allclose
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value=1e+20)
>>> b = array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
>>> ma.allequal(a, b, fill_value=False)
False
>>> ma.allequal(a, b)
True
"""
m = mask_or(getmask(a), getmask(b))
if m is nomask:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
return d.all()
elif fill_value:
x = getdata(a)
y = getdata(b)
d = umath.equal(x, y)
dm = array(d, mask=m, copy=False)
return dm.filled(True).all(None)
else:
return False
def allclose (a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This function is equivalent to `allclose` except that masked values
are treated as equal (default) or unequal, depending on the `masked_equal`
argument.
Parameters
----------
a, b : array_like
Input arrays to compare.
masked_equal : bool, optional
Whether masked values in `a` and `b` are considered equal (True) or not
(False). They are considered equal by default.
rtol : float, optional
Relative tolerance. The relative difference is equal to ``rtol * b``.
Default is 1e-5.
atol : float, optional
Absolute tolerance. The absolute difference is equal to `atol`.
Default is 1e-8.
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance, False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any
numpy.allclose : the non-masked `allclose`.
Notes
-----
If the following equation is element-wise True, then `allclose` returns
True::
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Return True if all elements of `a` and `b` are equal subject to
given tolerances.
Examples
--------
>>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
masked_array(data = [10000000000.0 1e-07 --],
mask = [False False True],
fill_value = 1e+20)
>>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
False
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
>>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
>>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
>>> ma.allclose(a, b)
True
>>> ma.allclose(a, b, masked_equal=False)
False
"""
x = masked_array(a, copy=False)
y = masked_array(b, copy=False)
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = np.result_type(y, 1.)
if y.dtype != dtype:
y = masked_array(y, dtype=dtype, copy=False)
m = mask_or(getmask(x), getmask(y))
xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False)
# If we have some infs, they should fall at the same place.
if not np.all(xinf == filled(np.isinf(y), False)):
return False
# No infs at all
if not np.any(xinf):
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
if not np.all(filled(x[xinf] == y[xinf], masked_equal)):
return False
x = x[~xinf]
y = y[~xinf]
d = filled(umath.less_equal(umath.absolute(x - y),
atol + rtol * umath.absolute(y)),
masked_equal)
return np.all(d)
#..............................................................................
def asarray(a, dtype=None, order=None):
"""
Convert the input to a masked array of the given data-type.
No copy is performed if the input is already an `ndarray`. If `a` is
a subclass of `MaskedArray`, a base class `MaskedArray` is returned.
Parameters
----------
a : array_like
Input data, in any form that can be converted to a masked array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists, ndarrays and masked arrays.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
Masked array interpretation of `a`.
See Also
--------
asanyarray : Similar to `asarray`, but conserves subclasses.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False)
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
#####--------------------------------------------------------------------------
#---- --- Pickling ---
#####--------------------------------------------------------------------------
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def dumps(a):
"""
Return a string corresponding to the pickling of a masked array.
This is a wrapper around ``cPickle.dumps``.
Parameters
----------
a : MaskedArray
The array for which the string representation of the pickle is
returned.
"""
return pickle.dumps(a)
def load(F):
"""
Wrapper around ``cPickle.load`` which accepts either a file-like object
or a filename.
Parameters
----------
F : str or file
The file or file name to load.
See Also
--------
dump : Pickle an array
Notes
-----
This is different from `numpy.load`, which does not use cPickle but loads
the NumPy binary .npy format.
"""
if not hasattr(F, 'readline'):
F = open(F, 'r')
return pickle.load(F)
def loads(strg):
"""
Load a pickle from the current string.
The result of ``cPickle.loads(strg)`` is returned.
Parameters
----------
strg : str
The string to load.
See Also
--------
dumps : Return a string corresponding to the pickling of a masked array.
"""
return pickle.loads(strg)
################################################################################
def fromfile(file, dtype=float, count= -1, sep=''):
raise NotImplementedError("Not yet implemented. Sorry")
def fromflex(fxarray):
"""
Build a masked array from a suitable flexible-type array.
The input array has to have a data-type with ``_data`` and ``_mask``
fields. This type of array is output by `MaskedArray.toflex`.
Parameters
----------
fxarray : ndarray
The structured input array, containing ``_data`` and ``_mask``
fields. If present, other fields are discarded.
Returns
-------
result : MaskedArray
The constructed masked array.
See Also
--------
MaskedArray.toflex : Build a flexible-type array from a masked array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
array([[(0, False), (1, True), (2, False)],
[(3, True), (4, False), (5, True)],
[(6, False), (7, True), (8, False)]],
dtype=[('_data', '<i4'), ('_mask', '|b1')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
masked_array(data =
[[0 -- 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False True False]
[ True False True]
[False True False]],
fill_value = 999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
array([[(0, False, 0.0), (0, False, 0.0)],
[(0, False, 0.0), (0, False, 0.0)]],
dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
masked_array(data =
[[0 0]
[0 0]],
mask =
[[False False]
[False False]],
fill_value = 999999)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
Parameters
----------
_methodname : string
Name of the method to transform.
"""
__doc__ = None
#
def __init__(self, funcname, params=None):
self._func = getattr(np, funcname)
self.__doc__ = self.getdoc()
self._extras = params or {}
#
def getdoc(self):
"Return the doc of the function (from the doc of the method)."
doc = getattr(self._func, '__doc__', None)
sig = get_object_signature(self._func)
if doc:
# Add the signature of the function at the beginning of the doc
if sig:
sig = "%s%s\n" % (self._func.__name__, sig)
doc = sig + doc
return doc
#
def __call__(self, a, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
# Drop the common parameters from the call
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
result = self._func.__call__(a, *args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
result._hardmask = bool(_extras.get("hard_mask", False))
return result
arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False))
clip = np.clip
diff = np.diff
empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False))
empty_like = _convert2ma('empty_like')
frombuffer = _convert2ma('frombuffer')
fromfunction = _convert2ma('fromfunction')
identity = _convert2ma('identity', params=dict(fill_value=None, hardmask=False))
indices = np.indices
ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False))
ones_like = np.ones_like
squeeze = np.squeeze
zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False))
zeros_like = np.zeros_like
###############################################################################
def append(a, b, axis=None):
"""Append values to the end of an array.
.. versionadded:: 1.9.0
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : MaskedArray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
See Also
--------
numpy.append : Equivalent function in the top-level NumPy module.
Examples
--------
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
>>> print(ma.append(a, b))
[1 -- 3 4 5 6 -- 8 9]
"""
return concatenate([a, b], axis)
| bsd-3-clause | -4,298,558,549,913,678,000 | 30.91784 | 86 | 0.532446 | false |
rjw57/openni-skeleton-export | examples/labelbones.py | 1 | 4166 | #!/usr/bin/env python
#
# An example script for extracting labelled images by associating points with
# their closest bone.
"""
Usage:
labelbones.py (-h | --help)
labelbones.py [--verbose] <logfile> <frame-prefix>
Options:
-h, --help Show a brief usage summary.
-v, --verbose Increase verbosity of output.
"""
import logging
import docopt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tables
LOG = logging.getLogger()
def main():
"""Main entry point."""
opts = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if opts['--verbose'] else logging.WARN
)
LOG.info('Opening log file {0}'.format(opts['<logfile>']))
log_root = tables.open_file(opts['<logfile>']).root
for frame in log_root.frames:
frame_idx = frame._v_attrs.idx
if frame_idx % 30 == 0:
LOG.info('Processing frame {0}...'.format(frame_idx))
user = None
for tracked_user in frame.users:
try:
if tracked_user.joints.shape[0] > 0:
user = tracked_user
except AttributeError:
# it's ok
pass
# If we have a user, detect labels
if user is None:
label_im = np.asanyarray(frame.label)
else:
label_im = bone_labels(frame, user)
label_im = label_im / float(max(1.0, label_im.max()))
label_color_im = (plt.cm.jet(label_im)[...,:3] * 255).astype(np.uint8)
Image.fromarray(label_color_im).save(
'{0}-{1:05d}.png'.format(opts['<frame-prefix>'], frame_idx))
def distances_to_line_segment(pts, line):
"""pts is a Nx3 array of 3d points.
line = (p1, p2) where p1 and p2 are 3-vectors.
"""
p1, p2 = line
p1, p2 = np.asarray(p1), np.asarray(p2)
# Let bone line be a + t * n
# Compute n
n = p2 - p1
line_len = np.sqrt(np.sum(n**2))
n /= line_len
# Compute points using p1 and p2 as origin
# Note, x = p - a
x, y = np.copy(pts), np.copy(pts)
for i in range(3):
x[:,i] -= p1[i]
y[:,i] -= p2[i]
# Squared distances to p1 and p2
d1 = np.sum(x**2, axis=1)
d2 = np.sum(y**2, axis=1)
# Compute t = (p - a) . n
xdotn = np.copy(x)
for i in range(3):
xdotn[:,i] *= n[i]
xdotn = np.sum(xdotn, axis=1)
# Compute squared distance to line
dl = np.zeros_like(xdotn)
for i in range(3):
dl += (x[:,i] - xdotn * n[i]) ** 2
# Compute length along line
norm_len = xdotn / line_len
# Which distance should we use?
d = np.where(norm_len < 0, d1, np.where(norm_len > 1, d2, dl))
return np.sqrt(d)
def bone_labels(frame, user):
# Get points for this user
pts = frame.points[:]
pt_labels = frame.point_labels[:]
user_pts = pts[pt_labels == user._v_attrs.idx, :]
joint_map = {}
for joint in user.joints:
joint_map[joint['id']] = (joint['x'], joint['y'], joint['z'])
# Get bones
bones = dict(
neck = (1,2),
left_forearm = (9,7), left_arm = (7,6),
right_forearm = (13,15), right_arm = (12,13),
left_chest = (6,17), right_chest = (12,21),
left_thigh = (17,18), left_calf = (18,20),
right_thigh = (21,22), right_calf = (22,24),
left_collar = (2,6), right_collar = (2,12),
# chest = (2,3)
)
bone_lines = {}
for bone_name, bone_joints in bones.items():
j1, j2 = bone_joints
if j1 not in joint_map or j2 not in joint_map:
continue
j1_loc, j2_loc = tuple(joint_map[j] for j in (j1,j2))
bone_lines[bone_name] = np.array((j1_loc, j2_loc))
bone_names = sorted(bone_lines.keys())
bone_dists = np.zeros((user_pts.shape[0], len(bone_names)))
for i, n in enumerate(bone_names):
bone_dists[:,i] = distances_to_line_segment(user_pts, bone_lines[n])
closest_bone_indices = np.argmin(bone_dists, axis=1)
label_image = np.zeros_like(frame.depth)
label_image[frame.label == user._v_attrs.idx] = closest_bone_indices + 1
return label_image
if __name__ == '__main__':
main()
| apache-2.0 | -3,595,863,162,251,792,400 | 27.930556 | 78 | 0.56097 | false |
eriksore/sdn | Old/addflow.py | 1 | 5988 | import json
import networkx as nx
from networkx.readwrite import json_graph
import httplib2
baseUrl = 'http://192.168.231.246:8080/controller/nb/v2'
containerName = 'default'
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
def find_edge(edges, headNode, tailNode):
for edge in odlEdges:
if edge['edge']['headNodeConnector']['node']['id'] == headNode and edge['edge']['tailNodeConnector']['node']['id'] == tailNode:
return edge
return None
def find_ports(edges, headNode, tailNode):
for edge in odlEdges:
if edge['edge']['headNodeConnector']['node']['id'] == headNode and edge['edge']['tailNodeConnector']['node']['id'] == tailNode:
portId = edge['properties']['name']['value']
return portId
return None
def put_path(path, odlEdges, srcIP, dstIP, baseUrl):
for i, node in enumerate(path[1:-1]):
flowName = "fromIP" + srcIP[-1:] + "Po" + str(i)
ingressEdge = find_edge(odlEdges, shortest_path[i], node)
egressEdge = find_edge(odlEdges, node, shortest_path[i+2])
newFlow = build_flow_entry(flowName, ingressEdge, egressEdge, node, srcIP, dstIP)
switchType = newFlow['node']['type']
putUrl = build_flow_url(baseUrl, 'default', switchType, node, flowName)
# PUT the flow to the controller
resp, content = put_dict(h, putUrl, newFlow)
def build_flow_entry(flowName, ingressEdge, egressEdge, node, srcIP, dstIP):
defaultPriority = "500"
newFlow = {"installInHw":"false"}
ingressPort = ingressEdge['edge']['tailNodeConnector']['id']
egressPort = egressEdge['edge']['headNodeConnector']['id']
switchType = egressEdge['edge']['headNodeConnector']['node']['type']
newFlow.update({"name":flowName})
newFlow.update({"node":ingressEdge['edge']['tailNodeConnector']['node']})
newFlow.update({"ingressPort":ingressPort, "priority":defaultPriority})
newFlow.update({"actions":"OUTPUT=" + egressPort})
return newFlow
#Second level URL build
def build_url(baseUrl, service, containerName):
putUrl = '/'.join([baseUrl, service, containerName])
return putUrl
#Build URL to work with flows on nodes
def build_flow_url(baseUrl, containerName, switchType, switchId, flowName):
putUrl = build_url(baseUrl, 'flowprogrammer', containerName) +'/node'+ '/'.join(['', switchType, switchId,'staticFlow', flowName])
return putUrl
def put_dict(h, url, d):
resp, content = h.request(
uri = url,
method = 'PUT',
headers={'Content-Type' : 'application/json'},
body=json.dumps(d),
)
return resp, content
def build_flow_rule_for_node():
return None
# Get all the edges/links
resp, content = h.request(build_url(baseUrl, 'topology', containerName), "GET")
edgeProperties = json.loads(content)
odlEdges = edgeProperties['edgeProperties']
#print json.dumps(odlEdges, indent = 2)
# Get all the nodes/switches
resp, content = h.request(build_url(baseUrl, 'switchmanager', containerName) + '/nodes/', "GET")
nodeProperties = json.loads(content)
odlNodes = nodeProperties['nodeProperties']
#print json.dumps(odlNodes, indent = 2)
#Print information about one specific node
resp, content = h.request(build_url(baseUrl, 'switchmanager',containerName) + '/node/OF/00:00:00:00:00:00:00:03', "GET")
nodeParam = json.loads(content)
nodeParameters = nodeParam['nodeConnectorProperties']
#print json.dumps(nodeParameters, indent = 2)
# Put nodes and edges into a graph
graph = nx.Graph()
for node in odlNodes:
graph.add_node(node['node']['id'])
for edge in odlEdges:
e = (edge['edge']['headNodeConnector']['node']['id'], edge['edge']['tailNodeConnector']['node']['id'])
graph.add_edge(*e)
#print "graph.edges()"
print graph.edges()
# Print out graph info as a sanity check
#print "shortest path from 3 to 7"
shortest_path = nx.shortest_path(graph, "00:00:00:00:00:00:00:03", "00:00:00:00:00:00:00:07")
#print shortest_path
srcIP = "10.0.0.1" #raw_input('What is the source IP?> ')
dstIP = "10.0.0.8" #raw_input('What is the destination IP?> ')
put_path(shortest_path, odlEdges, srcIP, dstIP, baseUrl)
put_path(shortest_path, odlEdges, dstIP, srcIP, baseUrl)
#print h.request(build_url(baseUrl, 'topology', containerName), "GET")
#Test to GET out the flows from a node
resp, content = h.request(build_url(baseUrl, 'flowprogrammer', containerName) + '/node/OF/00:00:00:00:00:00:00:03', "GET")
flowConfig = json.loads(content)
flowConf = flowConfig['flowConfig']
#print json.dumps(flowConf, indent = 2)
#Print out the topology
resp, content = h.request(build_url(baseUrl,'topology',containerName),"GET")
allTopology = json.loads(content)
allTopo = allTopology['edgeProperties']
#print json.dumps(allTopo, indent = 2)
#headNode = "00:00:00:00:00:00:00:03"
#tailNode = "00:00:00:00:00:00:00:02"
def add_sp_flows(shortest_path):
for i in range(len(shortest_path)-1):
headNode = shortest_path[i]
tailNode = shortest_path[i+1]
#Forward flow
flowName = headNode[21:23] + 'to' + tailNode[21:23] + 'IPto' + dstIP
outPutPort = find_ports(edge, shortest_path[i], shortest_path[i+1])
flowRule = {"node":{"type":"OF", "id":headNode},"installInHw":"true","name":flowName,"etherType":"0x800", "actions":["OUTPUT="+outPutPort[-1]],"priority":"500","nwDst":dstIP}
putUrl = build_flow_url(baseUrl, 'default',"OF", headNode, flowName)
resp, content = put_dict(h, putUrl, flowRule)
#Backward flow
flowName = tailNode[21:23] + 'to' + headNode[21:23] + 'IPto' + srcIP
outPutPort = find_ports(edge, shortest_path[i+1], shortest_path[i])
flowRule = {"node":{"type":"OF", "id":tailNode},"installInHw":"true","name":flowName,"etherType":"0x800", "actions":["OUTPUT="+outPutPort[-1]],"priority":"500","nwDst":srcIP}
putUrl = build_flow_url(baseUrl, 'default',"OF", tailNode, flowName)
resp, content = put_dict(h, putUrl, flowRule)
print flowRule
print "Flows have been added!"
add_sp_flows(shortest_path)
| mit | -1,614,679,354,172,069,600 | 37.140127 | 182 | 0.679192 | false |
dbarbier/privot | python/test/t_MarginalTransformationHessian_std.py | 1 | 3320 | #! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
coll1 = DistributionCollection(0)
coll1.add(Normal(1.0, 2.5))
coll1.add(Gamma(1.5, 3.0))
pointLow = NumericalPoint(0)
pointLow.add(coll1[0].computeQuantile(0.25)[0])
pointLow.add(coll1[1].computeQuantile(0.25)[0])
pointHigh = NumericalPoint(0)
pointHigh.add(coll1[0].computeQuantile(0.75)[0])
pointHigh.add(coll1[1].computeQuantile(0.75)[0])
coll2 = DistributionCollection(0)
coll2.add(Gamma(2.5, 2.0))
coll2.add(Normal(3.0, 1.5))
# First, check the old constructor
evaluation = MarginalTransformationEvaluation(coll1)
transformation = MarginalTransformationHessian(evaluation)
print "transformation=", repr(transformation)
print "transformation.hessian(", repr(pointLow), ")=", repr(transformation.hessian(pointLow))
print "finite difference hessian(", repr(pointLow), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(pointLow))
print "transformation.hessian(", repr(pointHigh), ")=", repr(transformation.hessian(pointHigh))
print "finite difference hessian(", repr(pointHigh), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(pointHigh))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transformation.getOutputDimension()
# Second, check the constructor for old inverse transformation
evaluation = MarginalTransformationEvaluation(coll1, MarginalTransformationEvaluation.TO)
transformation = MarginalTransformationHessian(evaluation)
print "transformation=", repr(transformation)
uLow = NumericalPoint(coll1.getSize(), 0.25)
uHigh = NumericalPoint(coll1.getSize(), 0.75)
print "transformation.hessian(", repr(uLow), ")=", repr(transformation.hessian(uLow))
print "finite difference hessian(", repr(uLow), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(uLow))
print "transformation.hessian(", repr(uHigh), ")=", repr(transformation.hessian(uHigh))
print "finite difference hessian(", repr(uHigh), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(uHigh))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transformation.getOutputDimension()
# Third, check the constructor for the new transformation
evaluation = MarginalTransformationEvaluation(coll1, coll2)
transformation = MarginalTransformationHessian(evaluation)
print "transformation=", repr(transformation)
print "transformation.hessian(", repr(pointLow), ")=", repr(transformation.hessian(pointLow))
print "finite difference hessian(", repr(pointLow), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(pointLow))
print "transformation.hessian(", repr(pointHigh), ")=", repr(transformation.hessian(pointHigh))
print "finite difference hessian(", repr(pointHigh), ")=", repr(CenteredFiniteDifferenceHessian(1.0e-4, evaluation).hessian(pointHigh))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transformation.getOutputDimension()
except :
import sys
print "t_MarginalTransformationHessian_std.py", sys.exc_type, sys.exc_value
| lgpl-3.0 | 2,855,770,965,371,125,000 | 53.42623 | 139 | 0.740964 | false |
linuxlewis/channels | channels/generic/base.py | 1 | 2261 | from __future__ import unicode_literals
from ..routing import route_class
from ..sessions import channel_session
from ..auth import channel_session_user
class BaseConsumer(object):
"""
Base class-based consumer class. Provides the mechanisms to be a direct
routing object and a few other things.
Class-based consumers should be used with route_class in routing, like so::
from channels import route_class
routing = [
route_class(JsonWebsocketConsumer, path=r"^/liveblog/(?P<slug>[^/]+)/"),
]
"""
method_mapping = {}
channel_session = False
channel_session_user = False
def __init__(self, message, **kwargs):
"""
Constructor, called when a new message comes in (the consumer is
the uninstantiated class, so calling it creates it)
"""
self.message = message
self.kwargs = kwargs
self.dispatch(message, **kwargs)
@classmethod
def channel_names(cls):
"""
Returns a list of channels this consumer will respond to, in our case
derived from the method_mapping class attribute.
"""
return set(cls.method_mapping.keys())
@classmethod
def as_route(cls, attrs=None, **kwargs):
"""
Shortcut function to create route with filters (kwargs)
to direct to a class-based consumer with given class attributes (attrs)
"""
_cls = cls
if attrs:
assert isinstance(attrs, dict), 'attrs must be a dict'
_cls = type(cls.__name__, (cls,), attrs)
return route_class(_cls, **kwargs)
def get_handler(self, message, **kwargs):
"""
Return handler uses method_mapping to return the right method to call.
"""
handler = getattr(self, self.method_mapping[message.channel.name])
if self.channel_session_user:
return channel_session_user(handler)
elif self.channel_session:
return channel_session(handler)
else:
return handler
def dispatch(self, message, **kwargs):
"""
Call handler with the message and all keyword arguments.
"""
return self.get_handler(message, **kwargs)(message, **kwargs)
| bsd-3-clause | 5,622,295,942,873,659,000 | 31.768116 | 84 | 0.616099 | false |
gr33ndata/dysl | dysl/corpora/corpuslib/train.py | 1 | 3488 | import os
import codecs
import time
from datetime import datetime
class Train:
def __init__(self, root=''):
# Setting root directory for training data
if root:
self.root = root
self.using_builtin_training = False
else:
#self.root = 'corpora/corpus-esaren'
self.root = __file__.rsplit('/',2)[0] + '/corpus-esaren'
self.using_builtin_training = True
#print self.root
self.root_depth = len(self.root.split('/'))
# Set of languages
self.lang_set = set()
# Temp Training Samples
# These are sample adding in run-time
# self.temp_train_data = {
# 'en': ['hello world', 'this is sparta'],
# 'es': ['hasta la vista', 'hola amigos']
# }
self.temp_train_data = {}
def get_corpus(self):
self.corpus = []
self.load()
return self.corpus
def get_corpus_path(self):
return self.root
def get_lang_set(self):
return list(self.lang_set)
def add(self, text=u'', lang=''):
if self.using_builtin_training:
print "Warning: Cannot add training samples to builtin training-set."
return
elif not text or not lang:
raise Exception("Error: No input text given!")
if not lang in self.temp_train_data:
self.temp_train_data[lang] = [text]
else:
self.temp_train_data[lang].append(text)
def save(self, domain='', filename=''):
if self.using_builtin_training:
raise Exception("Failed to save data, use custom training-set instead.")
if not domain:
timestamp = datetime.now().strftime("%y%m%d%H%M%S")
folder_path = self.root + '/batchTS' + timestamp
else:
folder_path = self.root + '/' + domain
try:
os.mkdir(folder_path)
except:
pass
for lang in self.temp_train_data:
lang_folder_path = folder_path + '/' + lang
try:
os.mkdir(lang_folder_path)
except:
pass
if not filename:
filename_and_path = lang_folder_path + '/file.txt'
else:
filename_and_path = lang_folder_path + '/' + filename
f = codecs.open(filename_and_path, mode='w', encoding='utf-8')
for sample in self.temp_train_data[lang]:
text = sample + u'\n'
f.write(text)
f.close()
def get_last_modified(self):
# Get corpus last modified timestamp
if self.using_builtin_training:
return 0
else:
return os.path.getmtime(self.root)
def visit(self, arg, dirname, names):
#print dirname
path = dirname.split('/')
#print 'path:', path, len(path)
if len(path) == self.root_depth + 2:
lang = path[-1]
# Update Language Set
self.lang_set.add(lang)
# Ignore hidden files
names = [name for name in names if not name.startswith('.')]
for name in names:
self.corpus.append((lang, dirname + '/' + name))
#print lang, path, dirname + '/' + name
def load(self):
os.path.walk(self.root, self.visit, '') | mit | -8,860,003,400,784,678,000 | 28.820513 | 84 | 0.509461 | false |
JeffRoy/mi-dataset | mi/dataset/driver/cg_dcl_eng/dcl/test/test_cg_dcl_eng_dcl_recovered_driver.py | 1 | 1070 | #!/home/mworden/uframes/ooi/uframe-1.0/python/bin/python
__author__ = 'mworden'
from mi.core.log import get_logger
log = get_logger()
from mi.idk.config import Config
import unittest
import os
from mi.dataset.driver.cg_dcl_eng.dcl.cg_dcl_eng_dcl_recovered_driver import parse
from mi.dataset.dataset_driver import ParticleDataHandler
class DriverTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_one(self):
sourceFilePath = os.path.join('mi','dataset','driver','cg_dcl_eng','dcl','resource',
'20140915.syslog.log')
particle_data_hdlr_obj = ParticleDataHandler()
particle_data_hdlr_obj = parse(Config().base_dir(), sourceFilePath, particle_data_hdlr_obj)
log.debug("SAMPLES: %s", particle_data_hdlr_obj._samples)
log.debug("FAILURE: %s", particle_data_hdlr_obj._failure)
self.assertEquals(particle_data_hdlr_obj._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one() | bsd-2-clause | -7,268,057,656,039,230,000 | 24.5 | 99 | 0.654206 | false |
sjorek/geoanonymizer.py | geoanonymizer/spatial/projection.py | 1 | 2642 | # -*- coding: utf-8 -*-
u"""
Functions dealing with geodesic projection systems.
WGS84 (EPSG 4326) projection system
“OpenStreetMap uses the WGS84 spatial reference system used by the
Global Positioning System (GPS). It uses geographic coordinates
between -180° and 180° longitude and -90° and 90° latitude. So
this is the "native" OSM format.
This is the right choice for you if you need geographical coordinates
or want to transform the coordinates into some other spatial reference
system or projection.”
-- from `Projections/Spatial reference systems: WGS84 (EPSG 4326)
<http://openstreetmapdata.com/info/projections#wgs84>`_
Mercator (EPSG 3857) projection system
“Most tiled web maps (such as the standard OSM maps and Google Maps)
use this Mercator projection.
The map area of such maps is a square with x and y coordiates both
between -20,037,508.34 and 20,037,508.34 meters. As a result data
north of about 85.1° and south of about -85.1° latitude can not be
shown and has been cut off. …
This is the right choice for you if you are creating tiled web maps.”
-- from `Projections/Spatial reference systems: Mercator (EPSG 3857)
<http://openstreetmapdata.com/info/projections#mercator>`_
Hint: Apple™ iOS or Google™ Android tracked coordinates use WGS84 (EPSG 4326)
projection and nearly all geomap-services, like google-maps, return this too,
although they're utilizing Mercator (EPSG 3857) projection internally.
"""
import math
def _generate_epsg_4326_to_epsg_3857_converter():
factor1 = 20037508.34 / 180
factor2 = math.pi / 360
factor3 = math.pi / 180
def convert_epsg_4326_to_epsg_3857(latitude, longitude):
"""
Convert WGS84 (EPSG 4326) to Mercator (EPSG 3857) projection.
"""
x = longitude * factor1
y = (math.log(math.tan((90 + latitude) * factor2)) / factor3) * factor1
return x, y
return convert_epsg_4326_to_epsg_3857
convert_gps_to_map_coordinates = _generate_epsg_4326_to_epsg_3857_converter()
def _generate_epsg_3857_to_epsg_4326_converter():
factor1 = 180 / 20037508.34
factor2 = 360 / math.pi
factor3 = math.pi / 20037508.34
def convert_epsg_3857_to_epsg_4326(x, y):
"""
Convert Mercator (EPSG 3857) to WGS84 (EPSG 4326) projection.
"""
longitude = x * factor1
latitude = factor2 * math.atan(math.exp(y * factor3)) - 90
return latitude, longitude
return convert_epsg_3857_to_epsg_4326
convert_map_to_gps_coordinates = _generate_epsg_3857_to_epsg_4326_converter()
| mit | -186,810,838,438,010,940 | 33.051948 | 79 | 0.692601 | false |
minlexx/pyevemon | esi_client/apis/__init__.py | 1 | 1221 | from __future__ import absolute_import
# import apis into api package
from .alliance_api import AllianceApi
from .assets_api import AssetsApi
from .bookmarks_api import BookmarksApi
from .calendar_api import CalendarApi
from .character_api import CharacterApi
from .clones_api import ClonesApi
from .contacts_api import ContactsApi
from .corporation_api import CorporationApi
from .dogma_api import DogmaApi
from .fittings_api import FittingsApi
from .fleets_api import FleetsApi
from .incursions_api import IncursionsApi
from .industry_api import IndustryApi
from .insurance_api import InsuranceApi
from .killmails_api import KillmailsApi
from .location_api import LocationApi
from .loyalty_api import LoyaltyApi
from .mail_api import MailApi
from .market_api import MarketApi
from .opportunities_api import OpportunitiesApi
from .planetary_interaction_api import PlanetaryInteractionApi
from .routes_api import RoutesApi
from .search_api import SearchApi
from .skills_api import SkillsApi
from .sovereignty_api import SovereigntyApi
from .status_api import StatusApi
from .universe_api import UniverseApi
from .user_interface_api import UserInterfaceApi
from .wallet_api import WalletApi
from .wars_api import WarsApi
| gpl-3.0 | -7,706,126,048,192,302,000 | 36 | 62 | 0.837019 | false |
widdowquinn/THAPBI | ITS_region_genomic_coverage/get_genes_from_GFF.py | 1 | 1976 | #!/usr/bin/env python
#author: Peter Thorpe September 2016. The James Hutton Insitute,Dundee,UK.
#Title:
#script to get the gene columns only from GFF"
#imports
import os
import sys
from sys import stdin,argv
import sys
import datetime
from optparse import OptionParser
###########################################################################
def write_out_ITS_GFF(gff, out):
"""function parse and print GFF lines that
correspond to gene only """
gff_file = open(gff, "r")
out_file = open(out, "w")
for line in gff_file:
if line.startswith("#"):
continue
assert len(line.split("\t")) ==9 ,"GFF fields wrong length should be 9"
scaffold,aug,cds_type,start,stop,e,f,g,gene_info = line.split("\t")
if cds_type =="gene":
out_file.write(line)
gff_file.close()
out_file.close()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
###########################################################################
if "-v" in sys.argv or "--version" in sys.argv:
print ("v0.0.1")
sys.exit(0)
usage = """Use as follows:
Title:
script to get the gene columns only from GFF
$ get_genes_from_GFF.py --gff gff.out -o out.gff
"""
parser = OptionParser(usage=usage)
parser.add_option("-g", "--gff", dest="gff", default=None,
help="predicted gene in gff3 format",
metavar="FILE")
parser.add_option("-o", "--out_file", dest="out_file",
default="ITS_GFF.out",
help="output line corresponding to genes only.")
(options, args) = parser.parse_args()
gff = options.gff
out_file = options.out_file
#run the program
if not os.path.isfile(gff):
print("sorry, couldn't open the file: " + ex.strerror + "\n")
print ("current working directory is :", os.getcwd() + "\n")
print ("files are :", [f for f in os.listdir('.')])
sys_exit("\n\nInput blast file not found: %s" % gff)
# call the top function
write_out_ITS_GFF(gff, out_file)
| mit | 6,784,813,885,300,704,000 | 23.395062 | 79 | 0.566296 | false |
discoapi/discotech | discotech/discoAPI/keywordManager.py | 1 | 3203 | __package__ = 'discotech.discoAPI'
from discotech import discotechError
class KeywordManager(object):
"""
Simple object to store and queue keyword to search in social media providers
"""
def __init__(self,keywords = [],convertToSearchPhrases = False):
"""
@type keywords: list
@param keywords: the keyword you want search for
@type convertToSearchPhrases: bool
@param convertToSearchPhrases: whether keyword should be conveted to matching search phrases for example 'spider man' => ['spider','man','spiderman','spider_man']
"""
if keywords:
self.keywords = self._keyworsToSearchPhrases(keywords) if convertToSearchPhrases else list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
self.keywords = keywords
def dequque(self):
"""
dequque a keyword from the queue, the keyword is then moved to the end of the queue
@return: the next keyword in queue
"""
if not self.keywords:
raise discotechError("you don't any keywords")
retValue = self.keywords[self._headLocation]
# move head next
self._headLocation = (self._headLocation + 1) % self._keywordCount
return retValue
def _updateFromList(self,keywords):
self.keywords = list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
def _updateFromDict(self,config):
if 'keywords' in config:
convertToSearchPhrases = False
if 'search_phrase' in config and config['search_phrase'] is True:
convertToSearchPhrases = True
self.keywords = self._keyworsToSearchPhrases(config['keywords']) if convertToSearchPhrases else list(config['keywords'])
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
raise discotechError("no keywords were given")
def _keyworToSearchPhrases(self,keyword):
words = keyword.split(' ')
#edge case
if len(words) == 1:
return words
cleanWords = []
#cleanup stage
for word in words:
word = word.strip()
if word != '':
cleanWords.append(word)
#combinator stage
combinators = ['','_']
combinedWords = []
for combinator in combinators:
combinedWords.append(combinator.join(cleanWords))
return cleanWords + combinedWords
def _keyworsToSearchPhrases(self,keywords):
retList = []
for keyword in keywords:
retList += self._keyworToSearchPhrases(keyword)
return retList
def loadConfig(self,config):
"""
load keywords from a configuation
@type config: list | str
@param config: a list of keywords or a path or address of JSON configuration file
"""
#if it's list
if type(config) is list:
self._updateFromList(config)
#if it's a dict
if type(config) is dict:
self._updateFromDict(config)
#if it's string
if type(config) is str:
#could be an address
if config.startswith('http://') or config.startswith('https://'):
configFile = getUrlContents(config)
confList = json.loads(configFile['response_text'])
#recursivly call yourself
return self.loadConfig(confList)
#could be file name
confFile = open(config,'r')
confLisr = json.loads(confFile.read())
#recursivly call yourself
return self.loadConfig(confList)
| gpl-2.0 | -1,382,077,788,170,863,000 | 27.345133 | 163 | 0.70153 | false |
Kortemme-Lab/covariation | analysis/profile_similarity/profile_similarity.py | 1 | 6125 | # The MIT License (MIT)
#
# Copyright (c) 2015 Noah Ollikainen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
example usage:
python profile_similarity.py ../../output/fixed_backbone/ ../../output/backrub/ > profile_similarity.txt
"""
import os
import sys
import math
import operator
import numpy
from scipy.stats import hypergeom
def get_natural_sequences(fastafile):
f = open(fastafile, 'r')
gapped_sequences = []
gapped_length = 0
length = 0
aa = 'ACDEFGHIKLMNPQRSTVWY'
name = ''
seq = ''
test = []
for line in f:
if line[0] == ">":
if name != '':
gapped_sequences.append(seq)
#print seq[:10]
name = line[1:].strip()
seq = ''
else:
seq += line.strip()
gapped_sequences.append(seq)
gapped_length = len(seq)
return gapped_sequences
def get_rosetta_sequence(fastafile, indicefile):
f = open(indicefile, 'r')
pfam_indices = {}
for line in f:
pfam_indices[int(line.split()[2])-1] = line.split()[0]
f.close()
f = open(fastafile, 'r')
gapped_sequences = set()
gapped_length = 0
length = 0
aa = 'ACDEFGHIKLMNPQRSTVWY'
name = ''
seq = ''
test = []
for line in f:
if line[0] == ">":
if name != '':
gapped_sequences.add(seq)
#print seq[:10]
name = line[1:].strip()
seq = ''
else:
seq += line.strip()
gapped_sequences.add(seq)
gapped_length = len(seq)
sequences = []
ungapped = []
to_remove = set()
for seq in gapped_sequences:
if len(seq) != gapped_length:
to_remove.add(seq)
for seq in to_remove:
gapped_sequences.remove(seq)
for i in range(0, gapped_length):
counter = 0
for seq in gapped_sequences:
char = seq[i]
if char not in aa:
counter += 1
if counter < 1:
ungapped.append(i)
length = len(ungapped)
ungapped_indices = {}
designed_indices = {}
for seq in gapped_sequences:
ungapped_seq = ''
count = 0
for i in ungapped:
if i in pfam_indices:
ungapped_seq += seq[i]
ungapped_indices[count] = pfam_indices[i]
designed_indices[int(pfam_indices[i])] = count
count += 1
sequences.append(ungapped_seq)
length = len(ungapped_seq)
return sequences, designed_indices
def get_covarying_pairs(nature_mi, design_mi):
pairs1 = {}
pairs2 = {}
f = open(nature_mi)
threshold1 = 0
to_sort = []
dists = {}
natural_scores = []
for line in f:
i = line.split()[3]
j = line.split()[4]
z = float(line.split()[7])
dist = float(line.split()[8])
if z > 0:
z = math.sqrt(z)
else:
z = math.sqrt(z*-1)*-1
#if dist <= 6:
dists[i+" "+j] = dist
pairs1[i+" "+j] = z
natural_scores.append(z)
f = open(design_mi, 'r')
threshold2 = 0
designed_positions = set()
for line in f:
i = line.split()[0]
j = line.split()[1]
z = float(line.split()[4])
designed_positions.add(i)
designed_positions.add(j)
if z > 0:
z = math.sqrt(z)
else:
z = math.sqrt(z*-1)*-1
pairs2[i+" "+j] = z
a_count = 0
b_count = 0
overlap = 0
total = 0
success = set()
failure = set()
to_sort1 = []
to_sort2 = []
positions = set()
for p in pairs1:
if p in pairs2:
i = p.split()[0]
j = p.split()[1]
to_sort1.append(pairs1[p])
to_sort2.append(pairs2[p])
threshold1 = numpy.mean(to_sort1) + numpy.std(to_sort1)*2
threshold2 = numpy.mean(to_sort2) + numpy.std(to_sort2)*2
designed = []
natural = []
overlapping = []
for p in pairs1:
if p in pairs2:
i = p.split()[0]
j = p.split()[1]
if pairs1[p] > threshold1:
a_count += 1
natural.append(p)
positions.add(int(i))
positions.add(int(j))
if pairs2[p] > threshold2:
b_count += 1
designed.append(p)
if pairs1[p] > threshold1 and pairs2[p] > threshold2:
overlap += 1
overlapping.append(p)
total += 1
return natural, positions
background = {
'A' : 0.0853130414059,
'C' : 0.0145091808885,
'E' : 0.0697042211031,
'D' : 0.0576610517405,
'G' : 0.0677683836625,
'F' : 0.0368651011894,
'I' : 0.0658157819481,
'H' : 0.0211289643495,
'K' : 0.0581917850968,
'M' : 0.0190262038642,
'L' : 0.0958638899794,
'N' : 0.0369395202374,
'Q' : 0.036293485414,
'P' : 0.0391082335344,
'S' : 0.0594265039867,
'R' : 0.0562652852139,
'T' : 0.0541996845528,
'W' : 0.0108604669712,
'V' : 0.0866667775459,
'Y' : 0.0283924373158
}
| mit | 3,561,183,417,832,896,000 | 26.466368 | 104 | 0.552327 | false |
Clinical-Genomics/scout | scout/adapter/mongo/panel.py | 1 | 18087 | """Code to handle panels in the mongo database"""
import datetime as dt
import logging
import math
from copy import deepcopy
import pymongo
from bson import ObjectId
from scout.build import build_panel
from scout.exceptions import IntegrityError
from scout.parse.panel import get_omim_panel_genes
from scout.utils.date import get_date
LOG = logging.getLogger(__name__)
class PanelHandler:
"""Code to handle interactions with the mongo database for panels"""
def load_panel(self, parsed_panel):
"""Load a gene panel based on the info sent
A panel object is built and integrity checks are made.
The panel object is then loaded into the database.
Args:
path(str): Path to panel file
institute(str): Name of institute that owns the panel
panel_id(str): Panel id
date(datetime.datetime): Date of creation
version(float)
full_name(str): Option to have a long name
panel_info(dict): {
'file': <path to panel file>(str),
'institute': <institute>(str),
'type': <panel type>(str),
'date': date,
'version': version,
'panel_name': panel_id,
'full_name': name,
}
"""
panel_obj = build_panel(parsed_panel, self)
self.add_gene_panel(panel_obj)
def load_omim_panel(self, genemap2_lines, mim2gene_lines, institute=None):
"""Create and load the OMIM-AUTO panel
If the panel already exists, update with new information and increase version
Args:
genemap_lines(iterable(str)): The genemap2 file information
mim2gene_lines(iterable(str)): The mim2genes file information
institute(str): What institute that is responsible. Default: 'cust002'
"""
institute = institute or "cust002"
existing_panel = self.gene_panel(panel_id="OMIM-AUTO")
if not existing_panel:
LOG.warning("OMIM-AUTO does not exists in database")
LOG.info("Creating a first version")
version = 1.0
if existing_panel:
version = float(math.floor(existing_panel["version"]) + 1)
LOG.info("Setting version to %s", version)
date_string = None
# Get the correct date when omim files where released
for line in genemap2_lines:
if "Generated" in line:
date_string = line.split(":")[-1].strip()
break
date_obj = get_date(date_string)
if existing_panel:
if existing_panel["date"] == date_obj:
LOG.warning("There is no new version of OMIM")
return
panel_data = {
"path": None,
"type": "clinical",
"date": date_obj,
"panel_id": "OMIM-AUTO",
"institute": institute,
"version": version,
"display_name": "OMIM-AUTO",
"genes": [],
}
alias_genes = self.genes_by_alias()
genes = get_omim_panel_genes(
genemap2_lines=genemap2_lines,
mim2gene_lines=mim2gene_lines,
alias_genes=alias_genes,
)
for gene in genes:
panel_data["genes"].append(gene)
panel_obj = build_panel(panel_data, self)
if existing_panel:
new_genes = self.compare_mim_panels(existing_panel, panel_obj)
if not new_genes:
LOG.info("The new version of omim does not differ from the old one")
LOG.info("No update is added")
return
self.update_mim_version(new_genes, panel_obj, old_version=existing_panel["version"])
self.add_gene_panel(panel_obj)
@staticmethod
def compare_mim_panels(existing_panel, new_panel):
"""Check if the latest version of OMIM differs from the most recent in database
Return all genes that where not in the previous version.
Args:
existing_panel(dict)
new_panel(dict)
Returns:
new_genes(set(str))
"""
existing_genes = {gene["hgnc_id"] for gene in existing_panel["genes"]}
new_genes = {gene["hgnc_id"] for gene in new_panel["genes"]}
return new_genes.difference(existing_genes)
@staticmethod
def update_mim_version(new_genes, new_panel, old_version):
"""Set the correct version for each gene
Loop over the genes in the new panel
Args:
new_genes(set(str)): Set with the new gene symbols
new_panel(dict)
"""
LOG.info("Updating versions for new genes")
version = new_panel["version"]
nr_genes = 0
for nr_genes, gene in enumerate(new_panel["genes"]):
gene_symbol = gene["hgnc_id"]
# If the gene is new we add the version
if gene_symbol in new_genes:
gene["database_entry_version"] = version
continue
# If the gene is old it will have the previous version
gene["database_entry_version"] = old_version
LOG.info("Updated %s genes", nr_genes)
def add_gene_panel(self, panel_obj):
"""Add a gene panel to the database
Args:
panel_obj(dict)
"""
panel_name = panel_obj["panel_name"]
panel_version = panel_obj["version"]
display_name = panel_obj.get("display_name", panel_name)
if self.gene_panel(panel_name, panel_version):
raise IntegrityError(
"Panel {0} with version {1} already"
" exist in database".format(panel_name, panel_version)
)
LOG.info("loading panel %s, version %s to database", display_name, panel_version)
LOG.info("Nr genes in panel: %s", len(panel_obj.get("genes", [])))
result = self.panel_collection.insert_one(panel_obj)
LOG.debug("Panel saved")
return result.inserted_id
def panel(self, panel_id):
"""Fetch a gene panel by '_id'.
Args:
panel_id (str, ObjectId): str or ObjectId of document ObjectId
Returns:
dict: panel object or `None` if panel not found
"""
if not isinstance(panel_id, ObjectId):
panel_id = ObjectId(panel_id)
panel_obj = self.panel_collection.find_one({"_id": panel_id})
return panel_obj
def delete_panel(self, panel_obj):
"""Delete a panel by '_id'.
Args:
panel_obj(dict)
Returns:
res(pymongo.DeleteResult)
"""
res = self.panel_collection.delete_one({"_id": panel_obj["_id"]})
LOG.warning(
"Deleting panel %s, version %s" % (panel_obj["panel_name"], panel_obj["version"])
)
return res
def gene_panel(self, panel_id, version=None):
"""Fetch a gene panel.
If no panel is sent return all panels
Args:
panel_id (str): unique id for the panel
version (str): version of the panel. If 'None' latest version will be returned
Returns:
gene_panel: gene panel object
"""
query = {"panel_name": panel_id}
if version:
LOG.info("Fetch gene panel {0}, version {1} from database".format(panel_id, version))
query["version"] = version
return self.panel_collection.find_one(query)
LOG.info("Fetching gene panels %s from database", panel_id)
res = self.panel_collection.find(query).sort("version", -1)
for panel in res:
return panel
LOG.info("No gene panel found")
return None
def gene_panels(self, panel_id=None, institute_id=None, version=None):
"""Return all gene panels
If panel_id return all versions of panels by that panel name
Args:
panel_id(str)
Returns:
cursor(pymongo.cursor)
"""
query = {}
if panel_id:
query["panel_name"] = panel_id
if version:
query["version"] = version
if institute_id:
query["institute"] = institute_id
return self.panel_collection.find(query)
def hgnc_to_panels(self, hgnc_id):
"""Get a list of gene panel objects for a hgnc_id
Args:
hgnc_id(int)
Returns:
hgnc_panels(dict): A dictionary with hgnc as keys and lists of
gene panel objects as values
"""
return self.panel_collection.find({"genes.hgnc_id": hgnc_id})
def gene_to_panels(self, case_obj):
"""Fetch all gene panels and group them by gene
Args:
case_obj(scout.models.Case)
Returns:
gene_dict(dict): A dictionary with gene as keys and a set of
panel names as value
"""
LOG.info("Building gene to panels")
gene_dict = {}
for panel_info in case_obj.get("panels", []):
panel_name = panel_info["panel_name"]
panel_version = panel_info["version"]
panel_obj = self.gene_panel(panel_name, version=panel_version)
if not panel_obj:
## Raise exception here???
LOG.warning(
"Panel: {0}, version {1} does not exist in database".format(
panel_name, panel_version
)
)
for gene in panel_obj["genes"]:
hgnc_id = gene["hgnc_id"]
if hgnc_id not in gene_dict:
gene_dict[hgnc_id] = set([panel_name])
continue
gene_dict[hgnc_id].add(panel_name)
LOG.info("Gene to panels done")
return gene_dict
def panel_to_genes(self, panel_id=None, panel_name=None, gene_format="symbol"):
"""Return all hgnc_ids for a given gene panel
Args:
panel_id(ObjectId): _id of a gene panel (to collect specific version of a panel)
panel_name(str): Name of a gene panel (to collect latest version of a panel)
gene_format(str): either "symbol" or "hgnc_id"
Returns:
gene_list(list): a list of hgnc terms (either symbols or HGNC ids)
"""
panel_obj = None
if panel_id:
panel_obj = self.panel(panel_id)
elif panel_name:
panel_obj = self.gene_panel(panel_name)
if panel_obj is None:
return []
gene_list = [gene_obj.get(gene_format, "") for gene_obj in panel_obj.get("genes", [])]
return gene_list
def update_panel(self, panel_obj, version=None, date_obj=None, maintainer=None):
"""Replace a existing gene panel with a new one
Keeps the object id
Args:
panel_obj(dict)
version(float)
date_obj(datetime.datetime)
maintainer(list(user._id))
Returns:
updated_panel(dict)
"""
LOG.info("Updating panel %s", panel_obj["panel_name"])
# update date of panel to "today"
date = panel_obj["date"]
if version:
LOG.info("Updating version from %s to version %s", panel_obj["version"], version)
panel_obj["version"] = version
# Updating version should not update date
if date_obj:
date = date_obj
elif maintainer is not None:
LOG.info(
"Updating maintainer from {} to {}".format(panel_obj.get("maintainer"), maintainer)
)
panel_obj["maintainer"] = maintainer
else:
date = date_obj or dt.datetime.now()
panel_obj["date"] = date
updated_panel = self.panel_collection.find_one_and_replace(
{"_id": panel_obj["_id"]},
panel_obj,
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_panel
def add_pending(self, panel_obj, hgnc_gene, action, info=None):
"""Add a pending action to a gene panel
Store the pending actions in panel.pending
Args:
panel_obj(dict): The panel that is about to be updated
hgnc_gene(dict)
action(str): choices=['add','delete','edit']
info(dict): additional gene info (disease_associated_transcripts,
reduced_penetrance, mosaicism, database_entry_version,
inheritance_models, custom_inheritance_models, comment)
Returns:
updated_panel(dict):
"""
valid_actions = ["add", "delete", "edit"]
if action not in valid_actions:
raise ValueError("Invalid action {0}".format(action))
info = info or {}
pending_action = {
"hgnc_id": hgnc_gene["hgnc_id"],
"action": action,
"info": info,
"symbol": hgnc_gene["hgnc_symbol"],
}
updated_panel = self.panel_collection.find_one_and_update(
{"_id": panel_obj["_id"]},
{"$addToSet": {"pending": pending_action}},
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_panel
def reset_pending(self, panel_obj):
"""Reset the pending status of a gene panel
Args:
panel_obj(dict): panel in database to update
Returns:
updated_panel(dict): the updated gene panel
"""
if "pending" in panel_obj:
del panel_obj["pending"]
updated_panel = self.panel_collection.find_one_and_replace(
{"_id": panel_obj["_id"]},
panel_obj,
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_panel
def apply_pending(self, panel_obj, version):
"""Apply the pending changes to an existing gene panel or create a new version of the same panel.
Args:
panel_obj(dict): panel in database to update
version(double): panel version to update
Returns:
inserted_id(str): id of updated panel or the new one
"""
updates = {}
new_panel = deepcopy(panel_obj)
new_panel["pending"] = []
new_panel["date"] = dt.datetime.now()
info_fields = [
"disease_associated_transcripts",
"inheritance_models",
"custom_inheritance_models",
"reduced_penetrance",
"mosaicism",
"database_entry_version",
"comment",
]
new_genes = []
for update in panel_obj.get("pending", []):
hgnc_id = update["hgnc_id"]
# If action is add we create a new gene object
if update["action"] != "add":
updates[hgnc_id] = update
continue
info = update.get("info", {})
gene_obj = {"hgnc_id": hgnc_id, "symbol": update["symbol"]}
for field in info_fields:
if field in info:
gene_obj[field] = info[field]
new_genes.append(gene_obj)
for gene in panel_obj.get("genes", []):
hgnc_id = gene["hgnc_id"]
if hgnc_id not in updates:
new_genes.append(gene)
continue
current_update = updates[hgnc_id]
action = current_update["action"]
info = current_update["info"]
# If action is delete we do not add the gene to new genes
if action == "delete":
continue
if action == "edit":
for field in info_fields:
if field in info:
gene[field] = info[field]
new_genes.append(gene)
new_panel["genes"] = new_genes
new_panel["version"] = float(version)
inserted_id = None
# if the same version of the panel should be updated
if new_panel["version"] == panel_obj["version"]:
# replace panel_obj with new_panel
result = self.panel_collection.find_one_and_replace(
{"_id": panel_obj["_id"]},
new_panel,
return_document=pymongo.ReturnDocument.AFTER,
)
inserted_id = result["_id"]
else: # create a new version of the same panel
new_panel.pop("_id")
# archive the old panel
panel_obj["is_archived"] = True
self.update_panel(panel_obj=panel_obj, date_obj=panel_obj["date"])
# insert the new panel
inserted_id = self.panel_collection.insert_one(new_panel).inserted_id
return inserted_id
def latest_panels(self, institute_id):
"""Return the latest version of each panel."""
panel_names = self.gene_panels(institute_id=institute_id).distinct("panel_name")
for panel_name in panel_names:
panel_obj = self.gene_panel(panel_name)
yield panel_obj
def clinical_symbols(self, case_obj):
"""Return all the clinical gene symbols for a case."""
panel_ids = [panel["panel_id"] for panel in case_obj["panels"]]
query = self.panel_collection.aggregate(
[
{"$match": {"_id": {"$in": panel_ids}}},
{"$unwind": "$genes"},
{"$group": {"_id": "$genes.symbol"}},
]
)
return set(item["_id"] for item in query)
def clinical_hgnc_ids(self, case_obj):
"""Return all the clinical gene hgnc IDs for a case."""
panel_ids = [panel["panel_id"] for panel in case_obj["panels"]]
query = self.panel_collection.aggregate(
[
{"$match": {"_id": {"$in": panel_ids}}},
{"$unwind": "$genes"},
{"$group": {"_id": "$genes.hgnc_id"}},
]
)
return set(item["_id"] for item in query)
| bsd-3-clause | 5,755,674,329,866,806,000 | 32.126374 | 105 | 0.545475 | false |
YetAnotherTimeTracker/yatt | components/keyboard_builder.py | 1 | 8578 | """
Created by anthony on 09.12.2017
keyboard_builder
"""
import json
import logging
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from emoji import emojize
from abc import ABC, abstractmethod
from components.message_source import message_source
from config.state_config import Action, CallbackData, Language, CommandType
from models.project import ProjectType
log = logging.getLogger(__name__)
BTN_LABEL = 'button_label'
BTN_DATA = 'button_data'
BTN_COMMAND = 'button_command_analogue'
# Actions describe the fact that something happened, but don't specify how the app's state changes in response.
# This is the job of reducers. (c) React Redux
BTN_ACTION = 'button_action'
DATA_LIMIT_IN_BYTES = 64
class Button:
def __init__(self, label, lang, action, command, data=None):
if type(action) is not Action:
raise ValueError('Action provided to button is not of Action type: ' + action)
if type(command) is not CommandType:
raise ValueError('Command provided to button is not of Command type: ' + command)
self.label = str(label) if lang is None else message_source[lang][label] # in case of task as button
self.data = str(data) if data is not None else 'sao'
self.action = action.value
self.command = command.value
def set_label(self, new_label):
self.label = new_label
def set_data(self, new_data):
if new_data is None or '' == new_data:
raise ValueError('Explicitly defined data cannot be empty or null')
self.data = new_data
def build(self):
# this data is passed to callback and is accepted by action reducer
data = {
CallbackData.ACTION.value: self.action,
CallbackData.DATA.value: self.data,
CallbackData.COMMAND.value: self.command
}
serialized_data = json.dumps(data)
encoded = serialized_data.encode('utf-8')
if len(encoded) > DATA_LIMIT_IN_BYTES:
raise ValueError(f'Too large data is going to be passed to to callback: '
f'{len(encoded)} bytes. Limit: {DATA_LIMIT_IN_BYTES} bytes')
new_button = InlineKeyboardButton(emojize(self.label, use_aliases=True), callback_data=serialized_data)
return new_button
class ViewTaskCommandButton(Button):
def __init__(self, label, lang, action, data):
super().__init__(label, lang, action, CommandType.VIEW, data)
class StartCommandButton(Button):
def __init__(self, label, lang):
super().__init__(label, lang, Action.START, CommandType.START)
class AllTasksCommandButton(Button):
def __init__(self, label, lang, action):
super().__init__(label, lang, action, CommandType.ALL)
class ViewLangCommandButton(Button):
def __init__(self, label, lang, action):
super().__init__(label, lang, action, CommandType.LANG)
class SelectLangButton(ViewLangCommandButton):
"""
Command type and action are already set up
"""
def __init__(self, label, lang, lang_data):
super().__init__(label, lang, Action.SELECTED_LANG)
self.set_data(lang_data.value)
class SelectProjectButton(ViewTaskCommandButton):
"""
Command type, data and action are already set up
"""
def __init__(self, lang, project):
def build_title(str):
return f'btn.new_task.project.{str}.label'
super().__init__(build_title(project.value), lang, Action.TASK_PROJECT_SELECTED, data=project.value)
class Keyboard(ABC):
def __init__(self, button_grid):
self.button_grid = button_grid
def set_button_grid(self, new_button_grid):
self.button_grid = new_button_grid
def set_button_at_position(self, new_button, row, col=None):
if list == type(self.button_grid[row]):
if col is None or col >= len(self.button_grid[row]):
raise ValueError('Column is null or out of range')
self.button_grid[row][col] = new_button
else:
self.button_grid[row] = new_button
def build(self):
"""
Creates _inline_ keyboard and returns it's markup with grid of buttons like this:
[
{ English },
{ Русский }
],
{ Exit },
[
{ X },
{ AB },
{ Y }
]
-->
[ English ][ Русский ]
[ Exit ]
[ X ][ AB ][ Y ]
"""
def is_singleton_list(obj):
return list == type(obj) and 1 == len(obj)
if self.button_grid is None:
raise ValueError('Button grid is empty. Cannot build keyboard')
buttons = []
for grid_element in self.button_grid:
# nested element can be a sub-grid (list with buttons)
if list == type(grid_element) and 1 < len(grid_element):
button_row = []
for element in grid_element:
new_button = element.build()
button_row.append(new_button)
buttons.append(button_row)
# or single button (dict or singleton list of single button)
elif issubclass(grid_element.__class__, Button) or is_singleton_list(grid_element):
if list == type(grid_element):
grid_element = grid_element[0]
new_button = grid_element.build()
buttons.append([new_button])
else:
raise ValueError('Incorrect type of grid or sub-grid provided')
kb = InlineKeyboardMarkup(buttons)
return kb
class ViewTaskKb(Keyboard):
def __init__(self, task_id, lang):
super().__init__([
[
ViewTaskCommandButton('btn.view_task.mark_as_done.label', lang, Action.TASK_MARK_AS_DONE, data=task_id),
ViewTaskCommandButton('btn.view_task.disable_notify.label', lang, Action.TASK_DISABLE, data=task_id),
ViewTaskCommandButton('btn.view_task.delete_task.label', lang, Action.TASK_DELETE, data=task_id)
],
[
AllTasksCommandButton('btn.view_task.upcoming.label', lang, Action.LIST_UPCOMING),
AllTasksCommandButton('btn.all_tasks.completed', lang, Action.LIST_COMPLETED),
StartCommandButton('btn.all_tasks.home', lang)
]
])
class SelectLangKb(Keyboard):
def __init__(self, lang):
super().__init__([
[
SelectLangButton('btn.select_lang.eng.label', lang, lang_data=Language.ENG),
SelectLangButton('btn.select_lang.rus.label', lang, lang_data=Language.RUS)
]
])
class StartStateKb(Keyboard):
def __init__(self, lang):
super().__init__([
AllTasksCommandButton('btn.start_state.to_tasks.upcoming.label', lang, Action.LIST_UPCOMING),
AllTasksCommandButton('btn.start_state.to_tasks.completed.label', lang, Action.LIST_COMPLETED),
AllTasksCommandButton('btn.start_state.to_tasks.all.label', lang, Action.LIST_ALL),
ViewLangCommandButton('btn.start_state.select_lang.label', lang, Action.VIEW_LANG)
])
class SelectProjectKb(Keyboard):
def __init__(self, lang):
super().__init__([
[
SelectProjectButton(lang, ProjectType.PERSONAL),
SelectProjectButton(lang, ProjectType.STUDY),
SelectProjectButton(lang, ProjectType.WORK),
SelectProjectButton(lang, ProjectType.OTHER),
]
])
class TasksAsButtons(Keyboard):
def __init__(self, tasks, lang):
super().__init__(None)
button_grid = []
for task in tasks:
btn_label = (':white_check_mark: ' if task.is_task_completed() else ':black_square_button: ') + task.get_description()
task_as_button = ViewTaskCommandButton(btn_label, None, Action.TASK_VIEW, task.get_id())
button_grid.append(task_as_button)
# add navigation buttons
# TODO add refresh button
button_grid.append([
AllTasksCommandButton('btn.all_tasks.upcoming', lang, Action.LIST_UPCOMING),
AllTasksCommandButton('btn.all_tasks.completed', lang, Action.LIST_COMPLETED),
StartCommandButton('btn.all_tasks.home', lang)
])
self.set_button_grid(button_grid)
| mit | 322,103,586,581,024,400 | 34.098361 | 130 | 0.59645 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/lib/shape_base.py | 1 | 22670 | __all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack',
'column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
def apply_along_axis(func1d,axis,arr,*args):
""" Execute func1d(arr[i],*args) where func1d takes 1-D arrays
and arr is an N-d array. i varies so as to apply the function
along the given axis for each 1-d subarray in arr.
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""Apply a function repeatedly over multiple axes, keeping the same shape
for the resulting array.
func is called as res = func(a, axis). The result is assumed
to be either the same shape as a or have one less dimension.
This call is repeated for each axis in the axes sequence.
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res,axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError, "function is not returning"\
" an array of correct shape"
return val
def expand_dims(a, axis):
"""Expand the shape of a by including newaxis before given axis.
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
def atleast_1d(*arys):
""" Force a sequence of arrays to each be at least 1D.
Description:
Force an array to be at least 1D. If an array is 0D, the
array is converted to a single row of values. Otherwise,
the array is unaltered.
Arguments:
*arys -- arrays to be converted to 1 or more dimensional array.
Returns:
input array converted to at least 1D array.
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=1))
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
""" Force a sequence of arrays to each be at least 2D.
Description:
Force an array to each be at least 2D. If the array
is 0D or 1D, the array is converted to a single
row of values. Otherwise, the array is unaltered.
Arguments:
arys -- arrays to be converted to 2 or more dimensional array.
Returns:
input array converted to at least 2D array.
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=2))
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
""" Force a sequence of arrays to each be at least 3D.
Description:
Force an array each be at least 3D. If the array is 0D or 1D,
the array is converted to a single 1xNx1 array of values where
N is the orginal length of the array. If the array is 2D, the
array is converted to a single MxNx1 array of values where MxN
is the orginal shape of the array. Otherwise, the array is
unaltered.
Arguments:
arys -- arrays to be converted to 3 or more dimensional array.
Returns:
input array converted to at least 3D array.
"""
res = []
for ary in arys:
ary = asarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
""" Stack arrays in sequence vertically (row wise)
Description:
Take a sequence of arrays and stack them vertically
to make a single array. All arrays in the sequence
must have the same shape along all but the first axis.
vstack will rebuild arrays divided by vsplit.
Arguments:
tup -- sequence of arrays. All arrays must have the same
shape.
Examples:
>>> import numpy
>>> a = array((1,2,3))
>>> b = array((2,3,4))
>>> numpy.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = array([[1],[2],[3]])
>>> b = array([[2],[3],[4]])
>>> numpy.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
""" Stack arrays in sequence horizontally (column wise)
Description:
Take a sequence of arrays and stack them horizontally
to make a single array. All arrays in the sequence
must have the same shape along all but the second axis.
hstack will rebuild arrays divided by hsplit.
Arguments:
tup -- sequence of arrays. All arrays must have the same
shape.
Examples:
>>> import numpy
>>> a = array((1,2,3))
>>> b = array((2,3,4))
>>> numpy.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = array([[1],[2],[3]])
>>> b = array([[2],[3],[4]])
>>> numpy.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
return _nx.concatenate(map(atleast_1d,tup),1)
row_stack = vstack
def column_stack(tup):
""" Stack 1D arrays as columns into a 2D array
Description:
Take a sequence of 1D arrays and stack them as columns
to make a single 2D array. All arrays in the sequence
must have the same first dimension. 2D arrays are
stacked as-is, just like with hstack. 1D arrays are turned
into 2D columns first.
Arguments:
tup -- sequence of 1D or 2D arrays. All arrays must have the same
first dimension.
Examples:
>>> import numpy
>>> a = array((1,2,3))
>>> b = array((2,3,4))
>>> numpy.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v,copy=False,subok=True)
if arr.ndim < 2:
arr = array(arr,copy=False,subok=True,ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays,1)
def dstack(tup):
""" Stack arrays in sequence depth wise (along third dimension)
Description:
Take a sequence of arrays and stack them along the third axis.
All arrays in the sequence must have the same shape along all
but the third axis. This is a simple way to stack 2D arrays
(images) into a single 3D array for processing.
dstack will rebuild arrays divided by dsplit.
Arguments:
tup -- sequence of arrays. All arrays must have the same
shape.
Examples:
>>> import numpy
>>> a = array((1,2,3))
>>> b = array((2,3,4))
>>> numpy.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = array([[1],[2],[3]])
>>> b = array([[2],[3],[4]])
>>> numpy.dstack((a,b))
array([[[1, 2]],
<BLANKLINE>
[[2, 3]],
<BLANKLINE>
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.array([])
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
sub_arys[i] = _nx.array([])
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
""" Divide an array into a list of sub-arrays.
Description:
Divide ary into a list of sub-arrays along the
specified axis. If indices_or_sections is an integer,
ary is divided into that many equally sized arrays.
If it is impossible to make an equal split, each of the
leading arrays in the list have one additional member. If
indices_or_sections is a list of sorted integers, its
entries define the indexes where ary is split.
Arguments:
ary -- N-D array.
Array to be divided into sub-arrays.
indices_or_sections -- integer or 1D array.
If integer, defines the number of (close to) equal sized
sub-arrays. If it is a 1D array of sorted indices, it
defines the indexes at which ary is divided. Any empty
list results in a single sub-array equal to the original
array.
axis -- integer. default=0.
Specifies the axis along which to split ary.
Caveats:
Currently, the default for axis is 0. This
means a 2D array is divided into multiple groups
of rows. This seems like the appropriate default,
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError, 'number sections must be larger than 0.'
Neach_section,extras = divmod(Ntotal,Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary,axis,0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
# there is a wierd issue with array slicing that allows
# 0x10 arrays and other such things. The following cluge is needed
# to get around this issue.
sub_arys = _replace_zero_by_x_arrays(sub_arys)
# end cluge.
return sub_arys
def split(ary,indices_or_sections,axis=0):
""" Divide an array into a list of sub-arrays.
Description:
Divide ary into a list of sub-arrays along the
specified axis. If indices_or_sections is an integer,
ary is divided into that many equally sized arrays.
If it is impossible to make an equal split, an error is
raised. This is the only way this function differs from
the array_split() function. If indices_or_sections is a
list of sorted integers, its entries define the indexes
where ary is split.
Arguments:
ary -- N-D array.
Array to be divided into sub-arrays.
indices_or_sections -- integer or 1D array.
If integer, defines the number of (close to) equal sized
sub-arrays. If it is a 1D array of sorted indices, it
defines the indexes at which ary is divided. Any empty
list results in a single sub-array equal to the original
array.
axis -- integer. default=0.
Specifies the axis along which to split ary.
Caveats:
Currently, the default for axis is 0. This
means a 2D array is divided into multiple groups
of rows. This seems like the appropriate default
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError, 'array split does not result in an equal division'
res = array_split(ary,indices_or_sections,axis)
return res
def hsplit(ary,indices_or_sections):
""" Split ary into multiple columns of sub-arrays
Description:
Split a single array into multiple sub arrays. The array is
divided into groups of columns. If indices_or_sections is
an integer, ary is divided into that many equally sized sub arrays.
If it is impossible to make the sub-arrays equally sized, the
operation throws a ValueError exception. See array_split and
split for other options on indices_or_sections.
Arguments:
ary -- N-D array.
Array to be divided into sub-arrays.
indices_or_sections -- integer or 1D array.
If integer, defines the number of (close to) equal sized
sub-arrays. If it is a 1D array of sorted indices, it
defines the indexes at which ary is divided. Any empty
list results in a single sub-array equal to the original
array.
Returns:
sequence of sub-arrays. The returned arrays have the same
number of dimensions as the input array.
Related:
hstack, split, array_split, vsplit, dsplit.
Examples:
>>> import numpy
>>> a= array((1,2,3,4))
>>> numpy.hsplit(a,2)
[array([1, 2]), array([3, 4])]
>>> a = array([[1,2,3,4],[1,2,3,4]])
>>> hsplit(a,2)
[array([[1, 2],
[1, 2]]), array([[3, 4],
[3, 4]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError, 'hsplit only works on arrays of 1 or more dimensions'
if len(ary.shape) > 1:
return split(ary,indices_or_sections,1)
else:
return split(ary,indices_or_sections,0)
def vsplit(ary,indices_or_sections):
""" Split ary into multiple rows of sub-arrays
Description:
Split a single array into multiple sub arrays. The array is
divided into groups of rows. If indices_or_sections is
an integer, ary is divided into that many equally sized sub arrays.
If it is impossible to make the sub-arrays equally sized, the
operation throws a ValueError exception. See array_split and
split for other options on indices_or_sections.
Arguments:
ary -- N-D array.
Array to be divided into sub-arrays.
indices_or_sections -- integer or 1D array.
If integer, defines the number of (close to) equal sized
sub-arrays. If it is a 1D array of sorted indices, it
defines the indexes at which ary is divided. Any empty
list results in a single sub-array equal to the original
array.
Returns:
sequence of sub-arrays. The returned arrays have the same
number of dimensions as the input array.
Caveats:
How should we handle 1D arrays here? I am currently raising
an error when I encounter them. Any better approach?
Should we reduce the returned array to their minium dimensions
by getting rid of any dimensions that are 1?
Related:
vstack, split, array_split, hsplit, dsplit.
Examples:
import numpy
>>> a = array([[1,2,3,4],
... [1,2,3,4]])
>>> numpy.vsplit(a,2)
[array([[1, 2, 3, 4]]), array([[1, 2, 3, 4]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError, 'vsplit only works on arrays of 2 or more dimensions'
return split(ary,indices_or_sections,0)
def dsplit(ary,indices_or_sections):
""" Split ary into multiple sub-arrays along the 3rd axis (depth)
Description:
Split a single array into multiple sub arrays. The array is
divided into groups along the 3rd axis. If indices_or_sections is
an integer, ary is divided into that many equally sized sub arrays.
If it is impossible to make the sub-arrays equally sized, the
operation throws a ValueError exception. See array_split and
split for other options on indices_or_sections.
Arguments:
ary -- N-D array.
Array to be divided into sub-arrays.
indices_or_sections -- integer or 1D array.
If integer, defines the number of (close to) equal sized
sub-arrays. If it is a 1D array of sorted indices, it
defines the indexes at which ary is divided. Any empty
list results in a single sub-array equal to the original
array.
Returns:
sequence of sub-arrays. The returned arrays have the same
number of dimensions as the input array.
Caveats:
See vsplit caveats.
Related:
dstack, split, array_split, hsplit, vsplit.
Examples:
>>> a = array([[[1,2,3,4],[1,2,3,4]]])
>>> dsplit(a,2)
[array([[[1, 2],
[1, 2]]]), array([[[3, 4],
[3, 4]]])]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
return split(ary,indices_or_sections,2)
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def kron(a,b):
"""kronecker product of a and b
Kronecker product of two arrays is block array
[[ a[ 0 ,0]*b, a[ 0 ,1]*b, ... , a[ 0 ,n-1]*b ],
[ ... ... ],
[ a[m-1,0]*b, a[m-1,1]*b, ... , a[m-1,n-1]*b ]]
"""
wrapper = get_array_wrap(a, b)
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a,b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""Repeat an array the number of times given in the integer tuple, reps.
If reps has length d, the result will have dimension of max(d, A.ndim).
If reps is scalar it is treated as a 1-tuple.
If A.ndim < d, A is promoted to be d-dimensional by prepending new axes.
So a shape (3,) array is promoted to (1,3) for 2-D replication,
or shape (1,1,3) for 3-D replication.
If this is not the desired behavior, promote A to d-dimensions manually
before calling this function.
If d < A.ndim, tup is promoted to A.ndim by pre-pending 1's to it. Thus
for an A.shape of (2,3,4,5), a tup of (2,2) is treated as (1,1,2,2)
Examples:
>>> a = array([0,1,2])
>>> tile(a,2)
array([0, 1, 2, 0, 1, 2])
>>> tile(a,(1,2))
array([[0, 1, 2, 0, 1, 2]])
>>> tile(a,(2,2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> tile(a,(2,1,2))
array([[[0, 1, 2, 0, 1, 2]],
<BLANKLINE>
[[0, 1, 2, 0, 1, 2]]])
See Also:
repeat
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A,copy=False,subok=True,ndmin=d)
shape = list(c.shape)
n = max(c.size,1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1,n).repeat(nrep,0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n /= max(dim_in,1)
return c.reshape(shape)
| bsd-3-clause | 3,260,722,240,737,224,700 | 34.813586 | 80 | 0.545126 | false |
kizbitz/train | train/vpc/config.py | 2 | 3380 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import amis
def check_env(env, default=None):
"""Check/Set environment variables"""
if not os.environ.get(env) and not default:
print "Error: '{0}' environment variable not set".format(env)
sys.exit()
return os.environ.get(env, default)
def check_user_file(VPC, user_file):
"""Check/create USER_FILE"""
if user_file:
return user_file
elif os.path.exists('/host/{0}/users.cfg'.format(VPC)):
return '/host/{0}/users.cfg'.format(VPC)
else:
if not os.path.exists('/host/{0}'.format(VPC)):
os.makedirs('/host/{0}'.format(VPC))
with open('/host/{0}/users.cfg'.format(VPC), 'w') as f:
f.write(TRAINER + '\n')
return '/host/{0}/users.cfg'.format(VPC)
def get_email_template(VPC, template):
"""Check EMAIL_TEMPLATE"""
if template:
return template
elif os.path.exists('/host/{0}/email.py'.format(VPC)):
return '/host/{0}/email.py'.format(VPC)
else:
return '/home/train/train/templates/email.py'
def check_ses_region(env):
"""Check/Set SES_REGION environment variable"""
# Available SES Regions: http://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region
SES_REGIONS = ['us-east-1', 'us-west-2', 'eu-west-1']
if not os.environ.get(env):
print "Error: '{0}' environment variable not set".format(env)
sys.exit()
else:
if not os.environ.get(env) in SES_REGIONS:
print "Error: The '{0}' region specified is not one of the available SES regions".format(os.environ.get(env))
print " See: http://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region"
sys.exit()
else:
return os.environ.get(env)
# Required environment variables
# ==============================
# Trainer name. Used to tag VPC, Security Groups, etc...
TRAINER = check_env('TRAINER')
# AWS region, id, and key
AWS_REGION = check_env('AWS_REGION')
AWS_ACCESS_KEY_ID = check_env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = check_env('AWS_SECRET_ACCESS_KEY')
# Optional environment variables
# ==============================
# Tag for VPC, labs, instances, etc...
VPC = check_env('VPC', 'train')
# Root lab directory
LAB_DIR = check_env('LAB_DIR', '/home/train/train/labs/')
# Full path to user configuration file
USER_FILE = check_user_file(VPC, os.environ.get('USER_FILE'))
# Email Template
EMAIL_TEMPLATE = get_email_template(VPC, os.environ.get('EMAIL_TEMPLATE'))
# Note: Checked in ses.py
# SES_REGION
# SES_FROM_EMAIL
# SES_FROM_NAME
# Other
# =====
# AWS AMI dictionary
AMIS = getattr(amis, AWS_REGION.upper().replace('-', '_'))
# AWS IAM Profile
IAM_PROFILE = TRAINER + '-{0}'.format(VPC)
# AWS Gateway
IGW = TRAINER + '-{0}-igw'.format(VPC)
# IAM Policy
POLICY = """{
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeAvailabilityZones",
"ec2:DescribeTags"
],
"Resource": [
"*"
]
}
]
}"""
# AWS Network ACL
NETWORK_ACL = TRAINER + '-{0}-network-acl'.format(VPC)
# AWS Route Table
ROUTE_TABLE = TRAINER + '-{0}-route-table'.format(VPC)
# AWS VPC CIDR
VPC_CIDR = "10.0.0.0/16"
# AWS VPC Tag
VPC_TAG = TRAINER + '-{0}'.format(VPC)
# AWS Zones
ZONES=['a', 'b', 'c', 'd', 'e', 'f']
| apache-2.0 | -8,407,919,099,661,274,000 | 23.671533 | 121 | 0.602959 | false |
yosshy/bergenholm | tests/views/test_hosts.py | 1 | 3286 | import copy
from flask import json
from tests import base
from bergenholm.views import hosts
API = "/api/1.0/hosts/"
class HostsViewTestCase(base.TestCase):
def test_get_hosts(self):
expected = {u"hosts": [self.host_id]}
result = self.client.get(API)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), expected)
def test_get_host(self):
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params)
def test_create_host(self):
result = self.client.post(API + self.host_id2,
data=json.dumps(self.host_params2),
headers=self.headers)
self.assertEqual(result.status_code, 201)
result = self.client.get(API + self.host_id2)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params2)
def test_update_host(self):
result = self.client.put(API + self.host_id,
data=json.dumps(self.host_params2),
headers=self.headers)
self.assertEqual(result.status_code, 202)
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), self.host_params2)
def test_delete_host(self):
result = self.client.delete(API + self.host_id)
self.assertEqual(result.status_code, 204)
result = self.client.get(API + self.host_id)
self.assertEqual(result.status_code, 404)
def test_get_host_params(self):
expected = {
u'base_url': u'http://127.0.0.1',
u'groups': [u'ubuntu', u'default'],
u'hostname': u'test-200',
u'image_base_url': u'http://127.0.0.1/images',
u'ipaddr': u'192.168.10.200',
u'ipxe_script': u'ubuntu.temp',
u'kernel': u'http://127.0.0.1/images/linux',
u'kernel_opts': u'quiet',
u'mirror_host': u'jp.archive.ubuntu.com',
u'mirror_path': u'/ubuntu',
u'mirror_scheme': u'http',
u'module': u'http://127.0.0.1/images/initrd.gz',
u'module1': u'http://127.0.0.1/images/initrd1.gz',
u'power_driver': 'dummy',
u'test': u'test',
u'uuid': self.host_id}
result = self.client.get(API + self.host_id + "?params=all")
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data), expected)
def test_mark_host_installed(self):
params = copy.deepcopy(self.host_params)
params["groups"].append("installed")
result = self.client.get(API + self.host_id + "?installed=mark")
self.assertEqual(result.status_code, 200)
result = self.client.get(API + self.host_id)
self.assertEqual(json.loads(result.data), params)
result = self.client.get(API + self.host_id + "?installed=unmark")
self.assertEqual(result.status_code, 200)
result = self.client.get(API + self.host_id)
self.assertEqual(json.loads(result.data), self.host_params)
| apache-2.0 | -1,911,109,451,051,491,000 | 38.590361 | 74 | 0.593427 | false |
sveetch/boussole | tests/010_inspector/007_parents.py | 1 | 4810 | # -*- coding: utf-8 -*-
import os
def test_001_basic(settings, inspector):
"""Looking for parents of basic sample"""
sources = [
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
]
sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_002_vendor(settings, inspector):
"""Looking for parents of vendor component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.sample_path, '_vendor.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.sample_path, '_sass_filetest.sass'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
def test_003_library(settings, inspector):
"""Looking for parents of a library component"""
sources = [
os.path.join(settings.sample_path, 'main_syntax.scss'),
os.path.join(settings.sample_path, 'main_commented.scss'),
os.path.join(settings.sample_path, 'main_basic.scss'),
os.path.join(settings.sample_path, 'main_depth_import-1.scss'),
os.path.join(settings.sample_path, 'main_depth_import-2.scss'),
os.path.join(settings.sample_path, 'main_depth_import-3.scss'),
os.path.join(settings.sample_path, 'main_with_subimports.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
os.path.join(settings.sample_path, 'main_circular_0.scss'),
os.path.join(settings.sample_path, 'main_circular_1.scss'),
os.path.join(settings.sample_path, 'main_circular_2.scss'),
os.path.join(settings.sample_path, 'main_circular_3.scss'),
os.path.join(settings.sample_path, 'main_circular_4.scss'),
os.path.join(settings.sample_path, 'main_circular_bridge.scss'),
os.path.join(settings.sample_path, 'main_circular_5.scss'),
]
sourcepath = os.path.join(settings.lib1_path, 'components/_panels.scss')
inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)
parents = inspector.parents(sourcepath)
assert parents == set([
os.path.join(settings.lib1_path, 'library_1_fullstack.scss'),
os.path.join(settings.sample_path, 'main_using_libs.scss'),
])
| mit | -5,602,037,356,110,481,000 | 49.631579 | 79 | 0.668815 | false |
wpreimes/gldas | src/gldas/grid.py | 1 | 3720 | # The MIT License (MIT)
#
# Copyright (c) 2018, TU Wien
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from pygeogrids.grids import BasicGrid
from netCDF4 import Dataset
import os
def subgrid4bbox(grid, min_lon, min_lat, max_lon, max_lat):
gpis, lons, lats, _ = grid.get_grid_points()
assert len(gpis) == len(lats) == len(lons)
bbox_gpis = gpis[np.where((lons <= max_lon) & (lons >= min_lon) &
(lats <= max_lat) & (lats >= min_lat))]
return grid.subgrid_from_gpis(bbox_gpis)
def GLDAS025Grids(only_land=False):
"""
Create global 0.25 DEG gldas grids (origin in bottom left)
Parameters
---------
only_land : bool, optional (default: False)
Uses the land mask to reduce the GLDAS 0.25DEG land grid to land points
only.
Returns
--------
grid : pygeogrids.CellGrid
Either a land grid or a global grid
"""
resolution = 0.25
glob_lons = np.arange(-180 + resolution / 2, 180 + resolution / 2, resolution)
glob_lats = np.arange(-90 + resolution / 2, 90 + resolution / 2, resolution)
lon, lat = np.meshgrid(glob_lons, glob_lats)
glob_grid = BasicGrid(lon.flatten(), lat.flatten()).to_cell_grid(cellsize=5.)
if only_land:
ds = Dataset(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'GLDASp4_landmask_025d.nc4'))
land_lats = ds.variables['lat'][:]
land_mask = ds.variables['GLDAS_mask'][:].flatten().filled() == 0.
dlat = glob_lats.size - land_lats.size
land_mask = np.concatenate((np.ones(dlat * glob_lons.size), land_mask))
land_points = np.ma.masked_array(glob_grid.get_grid_points()[0], land_mask)
land_grid = glob_grid.subgrid_from_gpis(land_points[~land_points.mask].filled())
return land_grid
else:
return glob_grid
def GLDAS025Cellgrid():
return GLDAS025Grids(only_land=False)
def GLDAS025LandGrid():
return GLDAS025Grids(only_land=True)
if __name__ == '__main__':
GLDAS025LandGrid()
def load_grid(land_points=True, bbox=None):
"""
Load gldas grid.
Parameters
----------
land_points : bool, optional (default: True)
Reshuffle only land points
bbox : tuple, optional (default: True)
(min_lat, min_lon, max_lat, max_lon)
Bounding box to limit reshuffling to.
"""
if land_points:
subgrid = GLDAS025LandGrid()
if bbox is not None:
subgrid = subgrid4bbox(subgrid, *bbox)
else:
if bbox is not None:
subgrid = subgrid4bbox(GLDAS025Cellgrid(), *bbox)
else:
subgrid = None
return subgrid | bsd-3-clause | 4,018,085,457,937,867,300 | 32.827273 | 88 | 0.654839 | false |
gr3yman/TileControl | TileCtrl.py | 1 | 2772 | #!/usr/env/python3
import subprocess
import tinydb
from tinydb.storages import MemoryStorage
import tkinter as tk
## who knows if this is working or not?
def keypress(event):
if event.keysym == 'Escape':
root.destroy()
x = event.char
if x == "Ctrl-Z":
print('c z')
elif x == "Ctrl-X":
print('c x')
else:
pass
## function to get all active windows
## see man wmctrl for info on wmctrl command
def buildwindb():
windodb = tinydb.TinyDB(storage=MemoryStorage)
windodb.purge()
with subprocess.Popen(['wmctrl -lG | tr -s " "'], shell = True, stdout=subprocess.PIPE, universal_newlines=True) as wmctrlg:
winout = wmctrlg.stdout.read().splitlines()
for line in winout:
winline = line.split(' ')
hexid = winline[0]
dsktp = winline[1]
xpos = winline[2]
ypos = winline[3]
width = winline[4]
height = winline[5]
windodb.insert({'hexid': hexid, 'desktop': dsktp, 'xpos': xpos, 'ypos': ypos, 'width': width, 'height': height})
print(windodb.all())
return windodb
## Function to get screen dimensions and active desktop
## see man wmctrl for info on wmctrl command
## this is also working
def getscreen():
with subprocess.Popen(['wmctrl', '-d'], stdout=subprocess.PIPE, universal_newlines=True) as wmctrld:
wmctrlout = wmctrld.stdout.read().splitlines()
for line in wmctrlout:
if "*" in line:
values = line.split(' ')
deskid = values[0]
print (deskid)
screensize = values[11]
screenx, screeny = screensize.split('x')
return deskid, screenx, screeny
def move(winhxid, sizstrng):
subprocess.call('wmctrl', '-i', '-r', winhxid, '-e', sizstrng)
##this is being called and working
##returns int
def tilemeasure(strngxdim, strngydim, strngpnlhght):
screeny = int(strngydim)
panelheight = int(strngpnlhght)
screeny = (screeny - panelheight) - 30
mastery = screeny + 10
screenx = int(strngxdim)
screenx = screenx - 30
masterx = int(round(screenx / 5 * 3))
subx = screenx - masterx
suby = int(round(screeny / 5 * 3))
return screeny, mastery, screenx, masterx, subx, suby
root = tk.Tk()
root.bind_all('<Key>', keypress)
# don't show the tk window
root.withdraw()
root.mainloop()
while True:
windodb = buildwindb()
deskid, screenx, screeny = getscreen()
panelquery = tinydb.Query()
panel = windodb.get(panelquery.desktop == '-1')
panelheight = panel['height']
mastery, screeny, screenx, masterx, subx, suby = tilemeasure(screenx, screeny, panelheight)
print(screeny, mastery, screenx, masterx, subx, suby)
| gpl-2.0 | -4,507,359,509,074,542,000 | 29.461538 | 128 | 0.621212 | false |
drtuxwang/system-config | bin/fls.py | 1 | 5121 | #!/usr/bin/env python3
"""
Show full list of files.
"""
import argparse
import glob
import os
import signal
import sys
from typing import Iterator, List, Union
import file_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_files(self) -> List[str]:
"""
Return list of files.
"""
return self._files
def get_order(self) -> str:
"""
Return display order.
"""
return self._args.order
def get_recursive_flag(self) -> bool:
"""
Return recursive flag.
"""
return self._args.recursive_flag
def get_reverse_flag(self) -> bool:
"""
Return reverse flag.
"""
return self._args.reverse_flag
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Show full list of files.',
)
parser.add_argument(
'-R',
dest='recursive_flag',
action='store_true',
help='Show directories recursively.'
)
parser.add_argument(
'-s',
action='store_const',
const='size',
dest='order',
default='name',
help='Sort by size of file.'
)
parser.add_argument(
'-t',
action='store_const',
const='mtime',
dest='order',
default='name',
help='Sort by modification time of file.'
)
parser.add_argument(
'-c',
action='store_const',
const='ctime',
dest='order',
default='name',
help='Sort by meta data change time of file.'
)
parser.add_argument(
'-r',
dest='reverse_flag',
action='store_true',
help='Reverse order.'
)
parser.add_argument(
'files',
nargs='*',
metavar='file',
help='File or directory.'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
if self._args.files:
self._files = self._args.files
else:
self._files = sorted(os.listdir())
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
def _list(self, options: Options, files: List[str]) -> None:
file_stats = []
for file in files:
if os.path.islink(file):
file_stats.append(file_mod.FileStat(file, size=0))
elif os.path.isdir(file):
file_stats.append(file_mod.FileStat(file + os.sep))
elif os.path.isfile(file):
file_stats.append(file_mod.FileStat(file))
for file_stat in self._sorted(options, file_stats):
print("{0:10d} [{1:s}] {2:s}".format(
file_stat.get_size(),
file_stat.get_time_local(),
file_stat.get_file()
))
if (options.get_recursive_flag() and
file_stat.get_file().endswith(os.sep)):
self._list(options, sorted(
glob.glob(file_stat.get_file() + '.*') +
glob.glob(file_stat.get_file() + '*')
))
@staticmethod
def _sorted(
options: Options,
file_stats: List[file_mod.FileStat],
) -> Union[Iterator[file_mod.FileStat], List[file_mod.FileStat]]:
order = options.get_order()
if order == 'ctime':
file_stats = sorted(file_stats, key=lambda s: s.get_time_change())
elif order == 'mtime':
file_stats = sorted(file_stats, key=lambda s: s.get_time())
elif order == 'size':
file_stats = sorted(file_stats, key=lambda s: s.get_size())
if options.get_reverse_flag():
return reversed(file_stats)
return file_stats
def run(self) -> int:
"""
Start program
"""
options = Options()
self._list(options, options.get_files())
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | 926,564,172,749,786,800 | 25.261538 | 78 | 0.486428 | false |
SamiHiltunen/invenio-accounts | examples/app.py | 1 | 4081 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
u"""Minimal Flask application example for development.
Install the Invenio default theme
You should execute these commands in the examples-directory.
.. code-block:: console
$ pip install invenio-theme
$ pip install invenio-assets
$ flask -a app.py npm
$ cd static
$ npm install
$ cd ..
$ flask -a app.py collect -v
$ flask -a app.py assets build
Create database and tables:
.. code-block:: console
$ flask -a app.py db init
$ flask -a app.py db create
Create a user:
.. code-block:: console
$ flask -a app.py users create info@invenio-software.org -a
$ flask -a app.py users activate info@invenio-software.org
Run the development server:
.. code-block:: console
$ flask -a app.py --debug run
$ flask -a app.py shell
"""
from __future__ import absolute_import, print_function
import os
import pkg_resources
from flask import Flask, render_template
from flask.ext.menu import Menu
from flask_babelex import Babel
from flask_cli import FlaskCLI
from flask_mail import Mail
from flask_security import current_user
from invenio_db import InvenioDB
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint
try:
pkg_resources.get_distribution('invenio_assets')
from invenio_assets import InvenioAssets
INVENIO_ASSETS_AVAILABLE = True
except pkg_resources.DistributionNotFound:
INVENIO_ASSETS_AVAILABLE = False
try:
pkg_resources.get_distribution('invenio_theme')
from invenio_theme import InvenioTheme
INVENIO_THEME_AVAILABLE = True
except pkg_resources.DistributionNotFound:
INVENIO_THEME_AVAILABLE = False
try:
pkg_resources.get_distribution('invenio_admin')
from invenio_admin import InvenioAdmin
INVENIO_ADMIN_AVAILABLE = True
except pkg_resources.DistributionNotFound:
INVENIO_ADMIN_AVAILABLE = False
# Create Flask application
app = Flask(__name__)
app.config.update(
ACCOUNTS_USE_CELERY=False,
CELERY_ALWAYS_EAGER=True,
CELERY_CACHE_BACKEND="memory",
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_RESULT_BACKEND="cache",
MAIL_SUPPRESS_SEND=True,
SECRET_KEY="CHANGE_ME",
SECURITY_PASSWORD_SALT="CHANGE_ME_ALSO",
)
if os.environ.get('RECAPTCHA_PUBLIC_KEY') is not None \
and os.environ.get('RECAPTCHA_PRIVATE_KEY') is not None:
app.config.setdefault('RECAPTCHA_PUBLIC_KEY',
os.environ['RECAPTCHA_PUBLIC_KEY'])
app.config.setdefault('RECAPTCHA_PRIVATE_KEY',
os.environ['RECAPTCHA_PRIVATE_KEY'])
FlaskCLI(app)
Babel(app)
Mail(app)
InvenioDB(app)
Menu(app)
InvenioAccounts(app)
if INVENIO_ASSETS_AVAILABLE:
InvenioAssets(app)
if INVENIO_THEME_AVAILABLE:
InvenioTheme(app)
if INVENIO_ADMIN_AVAILABLE:
InvenioAdmin(app, permission_factory=lambda x: x,
view_class_factory=lambda x: x)
app.register_blueprint(blueprint)
@app.route("/")
def index():
"""Basic test view."""
if current_user.is_authenticated:
return render_template("authenticated.html")
else:
return render_template("anonymous.html")
| gpl-2.0 | -6,010,426,815,830,053,000 | 26.567568 | 76 | 0.725 | false |
akkana/pytopo | pytopo/__init__.py | 1 | 1157 | #!/usr/bin/env python
'''pytopo module: display tiled maps from a variety of sources,
along with trackpoints, waypoints and other useful information.
Copyright 2005-2021 by Akkana Peck.
Feel free to use, distribute or modify this program under the terms
of the GPL v2 or, at your option, a later GPL version.
I'd appreciate hearing about it if you make any changes.
'''
__version__ = "1.7"
__author__ = "Akkana Peck <akkana@shallowsky.com>"
__license__ = "GPL v2+"
# Hack to make relative imports work in Python 3 as well as Python 2:
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from .MapCollection import MapCollection
from .GenericMapCollection import GenericMapCollection
from .TopoMapCollection import TopoMapCollection
from .TopoMapCollection import Topo1MapCollection
from .TopoMapCollection import Topo2MapCollection
from .TiledMapCollection import TiledMapCollection
from .OSMMapCollection import OSMMapCollection
from .MapWindow import MapWindow
from .TrackPoints import TrackPoints
from .MapViewer import MapViewer, ArgParseException
# import trackstats
user_agent = "PyTopo " + __version__
| gpl-2.0 | -806,987,653,849,583,600 | 34.060606 | 76 | 0.779602 | false |
Eric89GXL/scikit-learn | doc/sphinxext/gen_rst.py | 1 | 39133 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import re
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_1.png': (1, 600),
'plot_outlier_detection_1.png': (3, 372),
'plot_gp_regression_1.png': (2, 250),
'plot_adaboost_twoclass_1.png': (1, 372),
'plot_compare_methods_1.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
background-color: white;
/* --> Thumbnail image size */
width: 150px;
height: 100px;
-webkit-background-size: 150px 100px; /* Saf3-4 */
-moz-background-size: 150px 100px; /* FF3.6 */
}
.figure img {
display: inline;
}
div.docstringWrapper p.caption {
display: block;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, .0); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 0px;
border: white;
}
div.docstringWrapper p {
display: none;
background-color: white;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 13px;
margin-top: 0px;
border-style: solid;
border-width: 1px;
}
</style>
.. raw:: html
<script type="text/javascript">
function animateClone(e){
var position;
position = $(this).position();
var clone = $(this).closest('.thumbnailContainer').find('.clonedItem');
var clone_fig = clone.find('.figure');
clone.css("left", position.left - 70).css("top", position.top - 70).css("position", "absolute").css("z-index", 1000).css("background-color", "white");
var cloneImg = clone_fig.find('img');
clone.show();
clone.animate({
height: "270px",
width: "320px"
}, 0
);
cloneImg.css({
'max-height': "200px",
'max-width': "280px"
});
cloneImg.animate({
height: "200px",
width: "280px"
}, 0
);
clone_fig.css({
'margin-top': '20px',
});
clone_fig.show();
clone.find('p').css("display", "block");
clone_fig.css({
height: "240",
width: "305px"
});
cloneP_height = clone.find('p.caption').height();
clone_fig.animate({
height: (200 + cloneP_height)
}, 0
);
clone.bind("mouseleave", function(e){
clone.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
clone_fig.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
});
} //end animateClone()
$(window).load(function () {
$(".figure").css("z-index", 1);
$(".docstringWrapper").each(function(i, obj){
var clone;
var $obj = $(obj);
clone = $obj.clone();
clone.addClass("clonedItem");
clone.appendTo($obj.closest(".thumbnailContainer"));
clone.hide();
$obj.bind("mouseenter", animateClone);
}); // end each
}); // end
</script>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file' %
src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, fdocstring, _ = extract_docstring(new_fname, True)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, fdocstring, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
os.system("optipng -quiet -o 9 '{0}'".format(out_fname))
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception as err:
# Here, we were not able to execute the
# previous statement, either because the
# fun_name was not a function but a statement
# (print), or because the regexp didn't
# catch the whole function name :
# eg:
# X = something().blah()
# will work for something, but not blah.
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
for name, link in str_repl.iteritems():
line = line.replace(name, link)
fid.write(line.encode('utf-8'))
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| bsd-3-clause | -8,002,446,403,779,385,000 | 35.33519 | 160 | 0.523727 | false |
namboy94/kudubot | kudubot/helper.py | 1 | 3631 | """LICENSE
Copyright 2015 Hermann Krumrey <hermann@krumreyh.com>
This file is part of kudubot.
kudubot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kudubot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kudubot. If not, see <http://www.gnu.org/licenses/>.
LICENSE"""
import os
import argparse
import logging
import traceback
from typing import Type, Optional
from sentry_sdk import init as init_sentry
from sentry_sdk.integrations.logging import ignore_logger
from bokkichat.connection.Connection import Connection
from kudubot.Bot import Bot
from kudubot.exceptions import ConfigurationError
def cli_bot_start(
bot_cls: Type[Bot],
connection_cls: Type[Connection],
sentry_dsn: Optional[str] = None
):
"""
Implements a standard CLI interface for kudubot implementations
:param bot_cls: The class of the bot to start
:param connection_cls: The connection to use with the bot
:param sentry_dsn: Optional sentry DSN for exception logging
:return: None
"""
if sentry_dsn is not None:
init_sentry(sentry_dsn)
default_config_path = os.path.join(
os.path.expanduser("~"),
".config/{}".format(bot_cls.name())
)
parser = argparse.ArgumentParser()
parser.add_argument("--initialize", action="store_true",
help="Initializes the {} bot".format(bot_cls.name()))
parser.add_argument("--verbose", "-v", action="store_true",
help="Shows more output (INFO level)")
parser.add_argument("--debug", "-d", action="store_true",
help="Shows even more output (DEBUG level)")
parser.add_argument("--custom-dir", default=default_config_path,
help="Specifies a custom configuration directory")
args = parser.parse_args()
config_path = args.custom_dir
if args.verbose:
logging.basicConfig(level=logging.INFO)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.initialize:
if not os.path.isdir(config_path):
os.makedirs(config_path)
bot_cls.create_config(connection_cls, config_path)
print("Successfully generated configuration in " + config_path)
elif not os.path.isdir(config_path):
print("Missing Configuration directory " + config_path)
else:
try:
bot = bot_cls.load(connection_cls, config_path)
except ConfigurationError as e:
print("Invalid Configuration: {}".format(e))
return
# Disable sentry notifications for error-level logging messages
# in kudubot, those will be sent another way
if sentry_dsn is not None:
init_sentry(sentry_dsn, release="{}-{}".format(
bot.name(), bot.version()
))
ignore_logger(bot.logger.name)
try:
bot.start()
except KeyboardInterrupt:
print("Execution aborted")
except BaseException as e:
bot.logger.error(
"Fatal Exception: {}\n{}".format(
e,
"\n".join(traceback.format_tb(e.__traceback__))
)
)
| gpl-3.0 | 2,426,591,795,977,430,500 | 33.580952 | 77 | 0.646654 | false |