code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db import models
from django.utils import simplejson as json
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from philo.forms.fields import JSONFormField
from philo.utils.registry import RegistryIterator
from philo.validators import TemplateValidator, json_validator
#from philo.models.fields.entities import *
class TemplateField(models.TextField):
"""A :class:`TextField` which is validated with a :class:`.TemplateValidator`. ``allow``, ``disallow``, and ``secure`` will be passed into the validator's construction."""
def __init__(self, allow=None, disallow=None, secure=True, *args, **kwargs):
super(TemplateField, self).__init__(*args, **kwargs)
self.validators.append(TemplateValidator(allow, disallow, secure))
class JSONDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance, owner):
if instance is None:
raise AttributeError # ?
if self.field.name not in instance.__dict__:
json_string = getattr(instance, self.field.attname)
instance.__dict__[self.field.name] = json.loads(json_string)
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
setattr(instance, self.field.attname, json.dumps(value))
def __delete__(self, instance):
del(instance.__dict__[self.field.name])
setattr(instance, self.field.attname, json.dumps(None))
class JSONField(models.TextField):
"""A :class:`TextField` which stores its value on the model instance as a python object and stores its value in the database as JSON. Validated with :func:`.json_validator`."""
default_validators = [json_validator]
def get_attname(self):
return "%s_json" % self.name
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, name, JSONDescriptor(self))
models.signals.pre_init.connect(self.fix_init_kwarg, sender=cls)
def fix_init_kwarg(self, sender, args, kwargs, **signal_kwargs):
# Anything passed in as self.name is assumed to come from a serializer and
# will be treated as a json string.
if self.name in kwargs:
value = kwargs.pop(self.name)
# Hack to handle the xml serializer's handling of "null"
if value is None:
value = 'null'
kwargs[self.attname] = value
def formfield(self, *args, **kwargs):
kwargs["form_class"] = JSONFormField
return super(JSONField, self).formfield(*args, **kwargs)
class SlugMultipleChoiceField(models.Field):
"""Stores a selection of multiple items with unique slugs in the form of a comma-separated list. Also knows how to correctly handle :class:`RegistryIterator`\ s passed in as choices."""
__metaclass__ = models.SubfieldBase
description = _("Comma-separated slug field")
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if not value:
return []
if isinstance(value, list):
return value
return value.split(',')
def get_prep_value(self, value):
return ','.join(value)
def formfield(self, **kwargs):
# This is necessary because django hard-codes TypedChoiceField for things with choices.
defaults = {
'widget': forms.CheckboxSelectMultiple,
'choices': self.get_choices(include_blank=False),
'label': capfirst(self.verbose_name),
'required': not self.blank,
'help_text': self.help_text
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
form_class = forms.TypedMultipleChoiceField
return form_class(**defaults)
def validate(self, value, model_instance):
invalid_values = []
for val in value:
try:
validate_slug(val)
except ValidationError:
invalid_values.append(val)
if invalid_values:
# should really make a custom message.
raise ValidationError(self.error_messages['invalid_choice'] % invalid_values)
def _get_choices(self):
if isinstance(self._choices, RegistryIterator):
return self._choices.copy()
elif hasattr(self._choices, 'next'):
choices, self._choices = itertools.tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^philo\.models\.fields\.SlugMultipleChoiceField"])
add_introspection_rules([], ["^philo\.models\.fields\.TemplateField"])
add_introspection_rules([], ["^philo\.models\.fields\.JSONField"]) | ithinksw/philo | philo/models/fields/__init__.py | Python | isc | 4,971 |
import hashlib
import json
import logging
import os
import subprocess
import sys
import time
from collections import defaultdict
from shutil import copy
from shutil import copyfile
from shutil import copystat
from shutil import copytree
from tempfile import mkdtemp
import boto3
import botocore
import yaml
import sys
from .helpers import archive
from .helpers import get_environment_variable_value
from .helpers import LambdaContext
from .helpers import mkdir
from .helpers import read
from .helpers import timestamp
ARN_PREFIXES = {
"cn-north-1": "aws-cn",
"cn-northwest-1": "aws-cn",
"us-gov-west-1": "aws-us-gov",
}
log = logging.getLogger(__name__)
def load_source(module_name, module_path):
"""Loads a python module from the path of the corresponding file."""
if sys.version_info[0] == 3 and sys.version_info[1] >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
elif sys.version_info[0] == 3 and sys.version_info[1] < 5:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(module_name, module_path)
module = loader.load_module()
return module
def cleanup_old_versions(
src, keep_last_versions, config_file="config.yaml", profile_name=None,
):
"""Deletes old deployed versions of the function in AWS Lambda.
Won't delete $Latest and any aliased version
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param int keep_last_versions:
The number of recent versions to keep and not delete
"""
if keep_last_versions <= 0:
print("Won't delete all versions. Please do this manually")
else:
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
response = client.list_versions_by_function(
FunctionName=cfg.get("function_name"),
)
versions = response.get("Versions")
if len(response.get("Versions")) < keep_last_versions:
print("Nothing to delete. (Too few versions published)")
else:
version_numbers = [
elem.get("Version") for elem in versions[1:-keep_last_versions]
]
for version_number in version_numbers:
try:
client.delete_function(
FunctionName=cfg.get("function_name"),
Qualifier=version_number,
)
except botocore.exceptions.ClientError as e:
print(f"Skipping Version {version_number}: {e}")
def deploy(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function to AWS Lambda.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc
)
else:
create_function(cfg, path_to_zip_file)
def deploy_s3(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
preserve_vpc=False,
):
"""Deploys a new function via AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
use_s3 = True
s3_file = upload_s3(cfg, path_to_zip_file, use_s3)
existing_config = get_function_config(cfg)
if existing_config:
update_function(
cfg,
path_to_zip_file,
existing_config,
use_s3=use_s3,
s3_file=s3_file,
preserve_vpc=preserve_vpc,
)
else:
create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file)
def upload(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Uploads a new function to AWS S3.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Copy all the pip dependencies required to run your code into a temporary
# folder then add the handler file in the root of this directory.
# Zip the contents of this folder into a single file and output to the dist
# directory.
path_to_zip_file = build(
src,
config_file=config_file,
requirements=requirements,
local_package=local_package,
)
upload_s3(cfg, path_to_zip_file)
def invoke(
src,
event_file="event.json",
config_file="config.yaml",
profile_name=None,
verbose=False,
):
"""Simulates a call to your function.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str alt_event:
An optional argument to override which event file to use.
:param bool verbose:
Whether to print out verbose details.
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Set AWS_PROFILE environment variable based on `--profile` option.
if profile_name:
os.environ["AWS_PROFILE"] = profile_name
# Load environment variables from the config file into the actual
# environment.
env_vars = cfg.get("environment_variables")
if env_vars:
for key, value in env_vars.items():
os.environ[key] = get_environment_variable_value(value)
# Load and parse event file.
path_to_event_file = os.path.join(src, event_file)
event = read(path_to_event_file, loader=json.loads)
# Tweak to allow module to import local modules
try:
sys.path.index(src)
except ValueError:
sys.path.append(src)
handler = cfg.get("handler")
# Inspect the handler string (<module>.<function name>) and translate it
# into a function we can execute.
fn = get_callable_handler_function(src, handler)
timeout = cfg.get("timeout")
if timeout:
context = LambdaContext(cfg.get("function_name"), timeout)
else:
context = LambdaContext(cfg.get("function_name"))
start = time.time()
results = fn(event, context)
end = time.time()
print("{0}".format(results))
if verbose:
print(
"\nexecution time: {:.8f}s\nfunction execution "
"timeout: {:2}s".format(end - start, cfg.get("timeout", 15))
)
def init(src, minimal=False):
"""Copies template files to a given directory.
:param str src:
The path to output the template lambda project files.
:param bool minimal:
Minimal possible template files (excludes event.json).
"""
templates_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_templates",
)
for filename in os.listdir(templates_path):
if (minimal and filename == "event.json") or filename.endswith(".pyc"):
continue
dest_path = os.path.join(templates_path, filename)
if not os.path.isdir(dest_path):
copy(dest_path, src)
def build(
src,
requirements=None,
local_package=None,
config_file="config.yaml",
profile_name=None,
):
"""Builds the file bundle.
:param str src:
The path to your Lambda ready project (folder must contain a valid
config.yaml and handler module (e.g.: service.py).
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
# Load and parse the config file.
path_to_config_file = os.path.join(src, config_file)
cfg = read_cfg(path_to_config_file, profile_name)
# Get the absolute path to the output directory and create it if it doesn't
# already exist.
dist_directory = cfg.get("dist_directory", "dist")
path_to_dist = os.path.join(src, dist_directory)
mkdir(path_to_dist)
# Combine the name of the Lambda function with the current timestamp to use
# for the output filename.
function_name = cfg.get("function_name")
output_filename = "{0}-{1}.zip".format(timestamp(), function_name)
path_to_temp = mkdtemp(prefix="aws-lambda")
pip_install_to_target(
path_to_temp, requirements=requirements, local_package=local_package,
)
# Hack for Zope.
if "zope" in os.listdir(path_to_temp):
print(
"Zope packages detected; fixing Zope package paths to "
"make them importable.",
)
# Touch.
with open(os.path.join(path_to_temp, "zope/__init__.py"), "wb"):
pass
# Gracefully handle whether ".zip" was included in the filename or not.
output_filename = (
"{0}.zip".format(output_filename)
if not output_filename.endswith(".zip")
else output_filename
)
# Allow definition of source code directories we want to build into our
# zipped package.
build_config = defaultdict(**cfg.get("build", {}))
build_source_directories = build_config.get("source_directories", "")
build_source_directories = (
build_source_directories
if build_source_directories is not None
else ""
)
source_directories = [
d.strip() for d in build_source_directories.split(",")
]
files = []
for filename in os.listdir(src):
if os.path.isfile(filename):
if filename == ".DS_Store":
continue
if filename == config_file:
continue
print("Bundling: %r" % filename)
files.append(os.path.join(src, filename))
elif os.path.isdir(filename) and filename in source_directories:
print("Bundling directory: %r" % filename)
files.append(os.path.join(src, filename))
# "cd" into `temp_path` directory.
os.chdir(path_to_temp)
for f in files:
if os.path.isfile(f):
_, filename = os.path.split(f)
# Copy handler file into root of the packages folder.
copyfile(f, os.path.join(path_to_temp, filename))
copystat(f, os.path.join(path_to_temp, filename))
elif os.path.isdir(f):
src_path_length = len(src) + 1
destination_folder = os.path.join(
path_to_temp, f[src_path_length:]
)
copytree(f, destination_folder)
# Zip them together into a single file.
# TODO: Delete temp directory created once the archive has been compiled.
path_to_zip_file = archive("./", path_to_dist, output_filename)
return path_to_zip_file
def get_callable_handler_function(src, handler):
"""Translate a string of the form "module.function" into a callable
function.
:param str src:
The path to your Lambda project containing a valid handler file.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
# "cd" into `src` directory.
os.chdir(src)
module_name, function_name = handler.split(".")
filename = get_handler_filename(handler)
path_to_module_file = os.path.join(src, filename)
module = load_source(module_name, path_to_module_file)
return getattr(module, function_name)
def get_handler_filename(handler):
"""Shortcut to get the filename from the handler string.
:param str handler:
A dot delimited string representing the `<module>.<function name>`.
"""
module_name, _ = handler.split(".")
return "{0}.py".format(module_name)
def _install_packages(path, packages):
"""Install all packages listed to the target directory.
Ignores any package that includes Python itself and python-lambda as well
since its only needed for deploying and not running the code
:param str path:
Path to copy installed pip packages to.
:param list packages:
A list of packages to be installed via pip.
"""
def _filter_blacklist(package):
blacklist = ["-i", "#", "Python==", "python-lambda=="]
return all(package.startswith(entry) is False for entry in blacklist)
filtered_packages = filter(_filter_blacklist, packages)
for package in filtered_packages:
if package.startswith("-e "):
package = package.replace("-e ", "")
print("Installing {package}".format(package=package))
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
package,
"-t",
path,
"--ignore-installed",
]
)
print(
"Install directory contents are now: {directory}".format(
directory=os.listdir(path)
)
)
def pip_install_to_target(path, requirements=None, local_package=None):
"""For a given active virtualenv, gather all installed pip packages then
copy (re-install) them to the path provided.
:param str path:
Path to copy installed pip packages to.
:param str requirements:
If set, only the packages in the supplied requirements file are
installed.
If not set then installs all packages found via pip freeze.
:param str local_package:
The path to a local package with should be included in the deploy as
well (and/or is not available on PyPi)
"""
packages = []
if not requirements:
print("Gathering pip packages")
pkgStr = subprocess.check_output(
[sys.executable, "-m", "pip", "freeze"]
)
packages.extend(pkgStr.decode("utf-8").splitlines())
else:
if os.path.exists(requirements):
print("Gathering requirement packages")
data = read(requirements)
packages.extend(data.splitlines())
if not packages:
print("No dependency packages installed!")
if local_package is not None:
if not isinstance(local_package, (list, tuple)):
local_package = [local_package]
for l_package in local_package:
packages.append(l_package)
_install_packages(path, packages)
def get_role_name(region, account_id, role):
"""Shortcut to insert the `account_id` and `role` into the iam string."""
prefix = ARN_PREFIXES.get(region, "aws")
return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role)
def get_account_id(
profile_name, aws_access_key_id, aws_secret_access_key, region=None,
):
"""Query STS for a users' account_id"""
client = get_client(
"sts", profile_name, aws_access_key_id, aws_secret_access_key, region,
)
return client.get_caller_identity().get("Account")
def get_client(
client,
profile_name,
aws_access_key_id,
aws_secret_access_key,
region=None,
):
"""Shortcut for getting an initialized instance of the boto3 client."""
boto3.setup_default_session(
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region,
)
return boto3.client(client)
def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None):
"""Register and upload a function to AWS Lambda."""
print("Creating your new Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
print("Creating lambda function with name: {}".format(func_name))
if use_s3:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {
"S3Bucket": "{}".format(buck_name),
"S3Key": "{}".format(s3_file),
},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
else:
kwargs = {
"FunctionName": func_name,
"Runtime": cfg.get("runtime", "python2.7"),
"Role": role,
"Handler": cfg.get("handler"),
"Code": {"ZipFile": byte_stream},
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
"VpcConfig": {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
},
"Publish": True,
}
if "tags" in cfg:
kwargs.update(
Tags={key: str(value) for key, value in cfg.get("tags").items()}
)
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: get_environment_variable_value(value)
for key, value in cfg.get("environment_variables").items()
},
},
)
client.create_function(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=func_name, ReservedConcurrentExecutions=concurrency
)
def update_function(
cfg,
path_to_zip_file,
existing_cfg,
use_s3=False,
s3_file=None,
preserve_vpc=False,
):
"""Updates the code of an existing Lambda function"""
print("Updating your Lambda function")
byte_stream = read(path_to_zip_file, binary_file=True)
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
account_id = get_account_id(
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region",),
)
role = get_role_name(
cfg.get("region"),
account_id,
cfg.get("role", "lambda_basic_execution"),
)
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
if use_s3:
client.update_function_code(
FunctionName=cfg.get("function_name"),
S3Bucket="{}".format(buck_name),
S3Key="{}".format(s3_file),
Publish=True,
)
else:
client.update_function_code(
FunctionName=cfg.get("function_name"),
ZipFile=byte_stream,
Publish=True,
)
kwargs = {
"FunctionName": cfg.get("function_name"),
"Role": role,
"Runtime": cfg.get("runtime"),
"Handler": cfg.get("handler"),
"Description": cfg.get("description", ""),
"Timeout": cfg.get("timeout", 15),
"MemorySize": cfg.get("memory_size", 512),
}
if preserve_vpc:
kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get(
"VpcConfig"
)
if kwargs["VpcConfig"] is None:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
else:
del kwargs["VpcConfig"]["VpcId"]
else:
kwargs["VpcConfig"] = {
"SubnetIds": cfg.get("subnet_ids", []),
"SecurityGroupIds": cfg.get("security_group_ids", []),
}
if "environment_variables" in cfg:
kwargs.update(
Environment={
"Variables": {
key: str(get_environment_variable_value(value))
for key, value in cfg.get("environment_variables").items()
},
},
)
ret = client.update_function_configuration(**kwargs)
concurrency = get_concurrency(cfg)
if concurrency > 0:
client.put_function_concurrency(
FunctionName=cfg.get("function_name"),
ReservedConcurrentExecutions=concurrency,
)
elif "Concurrency" in existing_cfg:
client.delete_function_concurrency(
FunctionName=cfg.get("function_name")
)
if "tags" in cfg:
tags = {key: str(value) for key, value in cfg.get("tags").items()}
if tags != existing_cfg.get("Tags"):
if existing_cfg.get("Tags"):
client.untag_resource(
Resource=ret["FunctionArn"],
TagKeys=list(existing_cfg["Tags"].keys()),
)
client.tag_resource(Resource=ret["FunctionArn"], Tags=tags)
def upload_s3(cfg, path_to_zip_file, *use_s3):
"""Upload a function to AWS S3."""
print("Uploading your new Lambda function")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"s3",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
byte_stream = b""
with open(path_to_zip_file, mode="rb") as fh:
byte_stream = fh.read()
s3_key_prefix = cfg.get("s3_key_prefix", "/dist")
checksum = hashlib.new("md5", byte_stream).hexdigest()
timestamp = str(time.time())
filename = "{prefix}{checksum}-{ts}.zip".format(
prefix=s3_key_prefix, checksum=checksum, ts=timestamp,
)
# Do we prefer development variable over config?
buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name")
func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get(
"function_name"
)
kwargs = {
"Bucket": "{}".format(buck_name),
"Key": "{}".format(filename),
"Body": byte_stream,
}
client.put_object(**kwargs)
print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name))
if use_s3:
return filename
def get_function_config(cfg):
"""Check whether a function exists or not and return its config"""
function_name = cfg.get("function_name")
profile_name = cfg.get("profile")
aws_access_key_id = cfg.get("aws_access_key_id")
aws_secret_access_key = cfg.get("aws_secret_access_key")
client = get_client(
"lambda",
profile_name,
aws_access_key_id,
aws_secret_access_key,
cfg.get("region"),
)
try:
return client.get_function(FunctionName=function_name)
except client.exceptions.ResourceNotFoundException as e:
if "Function not found" in str(e):
return False
def get_concurrency(cfg):
"""Return the Reserved Concurrent Executions if present in the config"""
concurrency = int(cfg.get("concurrency", 0))
return max(0, concurrency)
def read_cfg(path_to_config_file, profile_name):
cfg = read(path_to_config_file, loader=yaml.full_load)
if profile_name is not None:
cfg["profile"] = profile_name
elif "AWS_PROFILE" in os.environ:
cfg["profile"] = os.environ["AWS_PROFILE"]
return cfg
| nficano/python-lambda | aws_lambda/aws_lambda.py | Python | isc | 26,779 |
# Copyright (c) 2015, Max Fillinger <max@max-fillinger.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# The epub format specification is available at http://idpf.org/epub/201
'''Contains the EpubBuilder class to build epub2.0.1 files with the getebook
module.'''
import html
import re
import datetime
import getebook
import os.path
import re
import zipfile
__all__ = ['EpubBuilder', 'EpubTOC', 'Author']
def _normalize(name):
'''Transform "Firstname [Middlenames] Lastname" into
"Lastname, Firstname [Middlenames]".'''
split = name.split()
if len(split) == 1:
return name
return split[-1] + ', ' + ' '.join(name[0:-1])
def _make_starttag(tag, attrs):
'Write a starttag.'
out = '<' + tag
for key in attrs:
out += ' {}="{}"'.format(key, html.escape(attrs[key]))
out += '>'
return out
def _make_xml_elem(tag, text, attr = []):
'Write a flat xml element.'
out = ' <' + tag
for (key, val) in attr:
out += ' {}="{}"'.format(key, val)
if text:
out += '>{}</{}>\n'.format(text, tag)
else:
out += ' />\n'
return out
class EpubTOC(getebook.TOC):
'Table of contents.'
_head = ((
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" xml:lang="en-US">\n'
' <head>\n'
' <meta name="dtb:uid" content="{}" />\n'
' <meta name="dtb:depth" content="{}" />\n'
' <meta name="dtb:totalPageCount" content="0" />\n'
' <meta name="dtb:maxPageNumber" content="0" />\n'
' </head>\n'
' <docTitle>\n'
' <text>{}</text>\n'
' </docTitle>\n'
))
_doc_author = ((
' <docAuthor>\n'
' <text>{}</text>\n'
' </docAuthor>\n'
))
_navp = ((
'{0}<navPoint id="nav{1}">\n'
'{0} <navLabel>\n'
'{0} <text>{2}</text>\n'
'{0} </navLabel>\n'
'{0} <content src="{3}" />\n'
))
def _navp_xml(self, entry, indent_lvl):
'Write xml for an entry and all its subentries.'
xml = self._navp.format(' '*indent_lvl, str(entry.no), entry.text,
entry.target)
for sub in entry.entries:
xml += self._navp_xml(sub, indent_lvl+1)
xml += ' '*indent_lvl + '</navPoint>\n'
return xml
def write_xml(self, uid, title, authors):
'Write the xml code for the table of contents.'
xml = self._head.format(uid, self.max_depth, title)
for aut in authors:
xml += self._doc_author.format(aut)
xml += ' <navMap>\n'
for entry in self.entries:
xml += self._navp_xml(entry, 2)
xml += ' </navMap>\n</ncx>'
return xml
class _Fileinfo:
'Information about a component file of an epub.'
def __init__(self, name, in_spine = True, guide_title = None,
guide_type = None):
'''Initialize the object. If the file does not belong in the
reading order, in_spine should be set to False. If it should
appear in the guide, set guide_title and guide_type.'''
self.name = name
(self.ident, ext) = os.path.splitext(name)
name_split = name.rsplit('.', 1)
self.ident = name_split[0]
self.in_spine = in_spine
self.guide_title = guide_title
self.guide_type = guide_type
# Infer media-type from file extension
ext = ext.lower()
if ext in ('.htm', '.html', '.xhtml'):
self.media_type = 'application/xhtml+xml'
elif ext in ('.png', '.gif', '.jpeg'):
self.media_type = 'image/' + ext
elif ext == '.jpg':
self.media_type = 'image/jpeg'
elif ext == '.css':
self.media_type = 'text/css'
elif ext == '.ncx':
self.media_type = 'application/x-dtbncx+xml'
else:
raise ValueError('Can\'t infer media-type from extension: %s' % ext)
def manifest_entry(self):
'Write the XML element for the manifest.'
return _make_xml_elem('item', '',
[
('href', self.name),
('id', self.ident),
('media-type', self.media_type)
])
def spine_entry(self):
'''Write the XML element for the spine.
(Empty string if in_spine is False.)'''
if self.in_spine:
return _make_xml_elem('itemref', '', [('idref', self.ident)])
else:
return ''
def guide_entry(self):
'''Write the XML element for the guide.
(Empty string if no guide title and type are given.)'''
if self.guide_title and self.guide_type:
return _make_xml_elem('reference', '',
[
('title', self.guide_title),
('type', self.guide_type),
('href', self.name)
])
else:
return ''
class _EpubMeta:
'Metadata entry for an epub file.'
def __init__(self, tag, text, *args):
'''The metadata entry is an XML element. *args is used for
supplying the XML element's attributes as (key, value) pairs.'''
self.tag = tag
self.text = text
self.attr = args
def write_xml(self):
'Write the XML element.'
return _make_xml_elem(self.tag, self.text, self.attr)
def __repr__(self):
'Returns the text.'
return self.text
def __str__(self):
'Returns the text.'
return self.text
class _EpubDate(_EpubMeta):
'Metadata element for the publication date.'
_date_re = re.compile('^([0-9]{4})(-[0-9]{2}(-[0-9]{2})?)?$')
def __init__(self, date):
'''date must be a string of the form "YYYY[-MM[-DD]]". If it is
not of this form, or if the date is invalid, ValueError is
raised.'''
m = self._date_re.match(date)
if not m:
raise ValueError('invalid date format')
year = int(m.group(1))
try:
mon = int(m.group(2)[1:])
if mon < 0 or mon > 12:
raise ValueError('month must be in 1..12')
except IndexError:
pass
try:
day = int(m.group(3)[1:])
datetime.date(year, mon, day) # raises ValueError if invalid
except IndexError:
pass
self.tag = 'dc:date'
self.text = date
self.attr = ()
class _EpubLang(_EpubMeta):
'Metadata element for the language of the book.'
_lang_re = re.compile('^[a-z]{2}(-[A-Z]{2})?$')
def __init__(self, lang):
'''lang must be a lower-case two-letter language code,
optionally followed by a "-" and a upper-case two-letter country
code. (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")'''
if self._lang_re.match(lang):
self.tag = 'dc:language'
self.text = lang
self.attr = ()
else:
raise ValueError('invalid language format')
class Author(_EpubMeta):
'''To control the file-as and role attribute for the authors, pass
an Author object to the EpubBuilder instead of a string. The file-as
attribute is a form of the name used for sorting. The role attribute
describes how the person was involved in the work.
You ONLY need this if an author's name is not of the form
"Given-name Family-name", or if you want to specify a role other
than author. Otherwise, you can just pass a string.
The value of role should be a MARC relator, e.g., "aut" for author
or "edt" for editor. See http://www.loc.gov/marc/relators/ for a
full list.'''
def __init__(self, name, fileas = None, role = 'aut'):
'''Initialize the object. If the argument "fileas" is not given,
"Last-name, First-name" is used for the file-as attribute. If
the argument "role" is not given, "aut" is used for the role
attribute.'''
if not fileas:
fileas = _normalize(name)
self.tag = 'dc:creator'
self.text = name
self.attr = (('opf:file-as', fileas), ('opf:role', role))
class _OPFfile:
'''Class for writing the OPF (Open Packaging Format) file for an
epub file. The OPF file contains the metadata, a manifest of all
component files in the epub, a "spine" which specifies the reading
order and a guide which points to important components of the book
such as the title page.'''
_opf = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<package version="2.0" xmlns="http://www.idpf.org/2007/opf" unique_identifier="uid_id">\n'
' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n'
'{}'
' </metadata>\n'
' <manifest>\n'
'{}'
' </manifest>\n'
' <spine toc="toc">\n'
'{}'
' </spine>\n'
' <guide>\n'
'{}'
' </guide>\n'
'</package>\n'
)
def __init__(self):
'Initialize.'
self.meta = []
self.filelist = []
def write_xml(self):
'Write the XML code for the OPF file.'
metadata = ''
for elem in self.meta:
metadata += elem.write_xml()
manif = ''
spine = ''
guide = ''
for finfo in self.filelist:
manif += finfo.manifest_entry()
spine += finfo.spine_entry()
guide += finfo.guide_entry()
return self._opf.format(metadata, manif, spine, guide)
class EpubBuilder:
'''Builds an epub2.0.1 file. Some of the attributes of this class
(title, uid, lang) are marked as "mandatory" because they represent
metadata that is required by the epub specification. If these
attributes are left unset, default values will be used.'''
_style_css = (
'h1, h2, h3, h4, h5, h6 {\n'
' text-align: center;\n'
'}\n'
'p {\n'
' text-align: justify;\n'
' margin-top: 0.125em;\n'
' margin-bottom: 0em;\n'
' text-indent: 1.0em;\n'
'}\n'
'.getebook-tp {\n'
' margin-top: 8em;\n'
'}\n'
'.getebook-tp-authors {\n'
' font-size: 2em;\n'
' text-align: center;\n'
' margin-bottom: 1em;\n'
'}\n'
'.getebook-tp-title {\n'
' font-weight: bold;\n'
' font-size: 3em;\n'
' text-align: center;\n'
'}\n'
'.getebook-tp-sub {\n'
' text-align: center;\n'
' font-weight: normal;\n'
' font-size: 0.8em;\n'
' margin-top: 1em;\n'
'}\n'
'.getebook-false-h {\n'
' font-weight: bold;\n'
' font-size: 1.5em;\n'
'}\n'
'.getebook-small-h {\n'
' font-style: normal;\n'
' font-weight: normal;\n'
' font-size: 0.8em;\n'
'}\n'
)
_container_xml = (
'<?xml version="1.0"?>\n'
'<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">\n'
' <rootfiles>\n'
' <rootfile full-path="package.opf" media-type="application/oebps-package+xml"/>\n'
' </rootfiles>\n'
'</container>\n'
)
_html = (
'<?xml version="1.0" encoding="utf-8"?>\n'
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n'
'<html xmlns="http://www.w3.org/1999/xhtml">\n'
' <head>\n'
' <title>{}</title>\n'
' <meta http-equiv="content-type" content="application/xtml+xml; charset=utf-8" />\n'
' <link href="style.css" rel="stylesheet" type="text/css" />\n'
' </head>\n'
' <body>\n{}'
' </body>\n'
'</html>\n'
)
_finalized = False
def __init__(self, epub_file):
'''Initialize the EpubBuilder instance. "epub_file" is the
filename of the epub to be created.'''
self.epub_f = zipfile.ZipFile(epub_file, 'w', zipfile.ZIP_DEFLATED)
self.epub_f.writestr('mimetype', 'application/epub+zip')
self.epub_f.writestr('META-INF/container.xml', self._container_xml)
self.toc = EpubTOC()
self.opf = _OPFfile()
self.opf.filelist.append(_Fileinfo('toc.ncx', False))
self.opf.filelist.append(_Fileinfo('style.css', False))
self._authors = []
self.opt_meta = {} # Optional metadata (other than authors)
self.content = ''
self.part_no = 0
self.cont_filename = 'part%03d.html' % self.part_no
def __enter__(self):
'Return self for use in with ... as ... statement.'
return self
def __exit__(self, except_type, except_val, traceback):
'Call finalize() and close the file.'
try:
self.finalize()
finally:
# Close again in case an exception happened in finalize()
self.epub_f.close()
return False
@property
def uid(self):
'''Unique identifier of the ebook. (mandatory)
If this property is left unset, a pseudo-random string will be
generated which is long enough for collisions with existing
ebooks to be extremely unlikely.'''
try:
return self._uid
except AttributeError:
import random
from string import (ascii_letters, digits)
alnum = ascii_letters + digits
self.uid = ''.join([random.choice(alnum) for i in range(15)])
return self._uid
@uid.setter
def uid(self, val):
self._uid = _EpubMeta('dc:identifier', str(val), ('id', 'uid_id'))
@property
def title(self):
'''Title of the ebook. (mandatory)
If this property is left unset, it defaults to "Untitled".'''
try:
return self._title
except AttributeError:
self.title = 'Untitled'
return self._title
@title.setter
def title(self, val):
# If val is not a string, raise TypeError now rather than later.
self._title = _EpubMeta('dc:title', '' + val)
@property
def lang(self):
'''Language of the ebook. (mandatory)
The language must be given as a lower-case two-letter code, optionally
followed by a "-" and an upper-case two-letter country code.
(e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")
If this property is left unset, it defaults to "en".'''
try:
return self._lang
except AttributeError:
self.lang = 'en'
return self._lang
@lang.setter
def lang(self, val):
self._lang = _EpubLang(val)
@property
def author(self):
'''Name of the author. (optional)
If there are multiple authors, pass a list of strings.
To control the file-as and role attribute, use author objects instead
of strings; file-as is an alternate form of the name used for sorting.
For a description of the role attribute, see the docstring of the
author class.'''
if len(self._authors) == 1:
return self._authors[0]
return tuple([aut for aut in self._authors])
@author.setter
def author(self, val):
if isinstance(val, Author) or isinstance(val, str):
authors = [val]
else:
authors = val
for aut in authors:
try:
self._authors.append(Author('' + aut))
except TypeError:
# aut is not a string, so it should be an Author object
self._authors.append(aut)
@author.deleter
def author(self):
self._authors = []
@property
def date(self):
'''Publication date. (optional)
Must be given in "YYYY[-MM[-DD]]" format.'''
try:
return self.opt_meta['date']
except KeyError:
return None
@date.setter
def date(self, val):
self.opt_meta['date'] = _EpubDate(val)
@date.deleter
def date(self):
del self._date
@property
def rights(self):
'Copyright/licensing information. (optional)'
try:
return self.opt_meta['rights']
except KeyError:
return None
@rights.setter
def rights(self, val):
self.opt_meta['rights'] = _EpubMeta('dc:rights', '' + val)
@rights.deleter
def rights(self):
del self._rights
@property
def publisher(self):
'Publisher name. (optional)'
try:
return self.opt_meta['publisher']
except KeyError:
return None
@publisher.setter
def publisher(self, val):
self.opt_meta['publisher'] = _EpubMeta('dc:publisher', '' + val)
@publisher.deleter
def publisher(self):
del self._publisher
@property
def style_css(self):
'''CSS stylesheet for the files that are generated by the EpubBuilder
instance. Can be overwritten or extended, but not deleted.'''
return self._style_css
@style_css.setter
def style_css(self, val):
self._style_css = '' + val
def titlepage(self, main_title = None, subtitle = None):
'''Create a title page for the ebook. If no main_title is given,
the title attribute of the EpubBuilder instance is used.'''
tp = '<div class="getebook-tp">\n'
if len(self._authors) >= 1:
if len(self._authors) == 1:
aut_str = str(self._authors[0])
else:
aut_str = ', '.join(str(self._authors[0:-1])) + ', and ' \
+ str(self._authors[-1])
tp += '<div class="getebook-tp-authors">%s</div>\n' % aut_str
if not main_title:
main_title = str(self.title)
tp += '<div class="getebook-tp-title">%s' % main_title
if subtitle:
tp += '<div class="getebook-tp-sub">%s</div>' % subtitle
tp += '</div>\n</div>\n'
self.opf.filelist.insert(0, _Fileinfo('title.html',
guide_title = 'Titlepage', guide_type = 'title-page'))
self.epub_f.writestr('title.html', self._html.format(self.title, tp))
def headingpage(self, heading, subtitle = None, toc_text = None):
'''Create a page containing only a (large) heading, optionally
with a smaller subtitle. If toc_text is not given, it defaults
to the heading.'''
self.new_part()
tag = 'h%d' % min(6, self.toc.depth)
self.content += '<div class="getebook-tp">'
self.content += '<{} class="getebook-tp-title">{}'.format(tag, heading)
if subtitle:
self.content += '<div class="getebook-tp-sub">%s</div>' % subtitle
self.content += '</%s>\n' % tag
if not toc_text:
toc_text = heading
self.toc.new_entry(toc_text, self.cont_filename)
self.new_part()
def insert_file(self, name, in_spine = False, guide_title = None,
guide_type = None, arcname = None):
'''Include an external file into the ebook. By default, it will
be added to the archive under its basename; the argument
"arcname" can be used to specify a different name.'''
if not arcname:
arcname = os.path.basename(name)
self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title,
guide_type))
self.epub_f.write(name, arcname)
def add_file(self, arcname, str_or_bytes, in_spine = False,
guide_title = None, guide_type = None):
'''Add the string or bytes instance str_or_bytes to the archive
under the name arcname.'''
self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title,
guide_type))
self.epub_f.writestr(arcname, str_or_bytes)
def false_heading(self, elem):
'''Handle a "false heading", i.e., text that appears in heading
tags in the source even though it is not a chapter heading.'''
elem.attrs['class'] = 'getebook-false-h'
elem.tag = 'p'
self.handle_elem(elem)
def _heading(self, elem):
'''Write a heading.'''
# Handle paragraph heading if we have one waiting (see the
# par_heading method). We don\'t use _handle_par_h here because
# we merge it with the subsequent proper heading.
try:
par_h = self.par_h
del self.par_h
except AttributeError:
toc_text = elem.text
else:
# There is a waiting paragraph heading, we merge it with the
# new heading.
toc_text = par_h.text + '. ' + elem.text
par_h.tag = 'div'
par_h.attrs['class'] = 'getebook-small-h'
elem.children.insert(0, par_h)
# Set the class attribute value.
elem.attrs['class'] = 'getebook-chapter-h'
self.toc.new_entry(toc_text, self.cont_filename)
# Add heading to the epub.
tag = 'h%d' % min(self.toc.depth, 6)
self.content += _make_starttag(tag, elem.attrs)
for elem in elem.children:
self.handle_elem(elem)
self.content += '</%s>\n' % tag
def par_heading(self, elem):
'''Handle a "paragraph heading", i.e., a chaper heading or part
of a chapter heading inside paragraph tags. If it is immediately
followed by a heading, they will be merged into one.'''
self.par_h = elem
def _handle_par_h(self):
'Check if there is a waiting paragraph heading and handle it.'
try:
self._heading(self.par_h)
except AttributeError:
pass
def handle_elem(self, elem):
'Handle html element as supplied by getebook.EbookParser.'
try:
tag = elem.tag
except AttributeError:
# elem should be a string
is_string = True
tag = None
else:
is_string = False
if tag in getebook._headings:
self._heading(elem)
else:
# Handle waiting par_h if necessary (see par_heading)
try:
self._heading(self.par_h)
except AttributeError:
pass
if is_string:
self.content += elem
elif tag == 'br':
self.content += '<br />\n'
elif tag == 'img':
self.content += self._handle_image(elem.attrs) + '\n'
elif tag == 'a' or tag == 'noscript':
# Ignore tag, just write child elements
for child in elem.children:
self.handle_elem(child)
else:
self.content += _make_starttag(tag, elem.attrs)
for child in elem.children:
self.handle_elem(child)
self.content += '</%s>' % tag
if tag == 'p':
self.content += '\n'
def _handle_image(self, attrs):
'Returns the alt text of an image tag.'
try:
return attrs['alt']
except KeyError:
return ''
def new_part(self):
'''Begin a new part of the epub. Write the current html document
to the archive and begin a new one.'''
# Handle waiting par_h (see par_heading)
try:
self._heading(self.par_h)
except AttributeError:
pass
if self.content:
html = self._html.format(self.title, self.content)
self.epub_f.writestr(self.cont_filename, html)
self.part_no += 1
self.content = ''
self.cont_filename = 'part%03d.html' % self.part_no
self.opf.filelist.append(_Fileinfo(self.cont_filename))
def finalize(self):
'Complete and close the epub file.'
# Handle waiting par_h (see par_heading)
if self._finalized:
# Avoid finalizing twice. Otherwise, calling finalize inside
# a with-block would lead to an exception when __exit__
# calls finalize again.
return
try:
self._heading(self.par_h)
except AttributeError:
pass
if self.content:
html = self._html.format(self.title, self.content)
self.epub_f.writestr(self.cont_filename, html)
self.opf.meta = [self.uid, self.lang, self.title] + self._authors
self.opf.meta += self.opt_meta.values()
self.epub_f.writestr('package.opf', self.opf.write_xml())
self.epub_f.writestr('toc.ncx',
self.toc.write_xml(self.uid, self.title, self._authors))
self.epub_f.writestr('style.css', self._style_css)
self.epub_f.close()
self._finalized = True
| mfil/getebook | getebook/epub.py | Python | isc | 25,314 |
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
from scipy.spatial import distance
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition # PCA
from sklearn.metrics import confusion_matrix
import json
import ml.Features as ft
from utils import Utils
class Identifier(object):
def __init__(self):
columns = ['mean_height', 'min_height', 'max_height', 'mean_width', 'min_width', 'max_width', 'time', 'girth','id']
self.data = DataFrame(columns=columns)
self.event = []
@staticmethod
def subscribe(ch, method, properties, body):
"""
prints the body message. It's the default callback method
:param ch: keep null
:param method: keep null
:param properties: keep null
:param body: the message
:return:
"""
#first we get the JSON from body
#we check if it's part of the walking event
#if walking event is completed, we
if __name__ == '__main__':
# we setup needed params
MAX_HEIGHT = 203
MAX_WIDTH = 142
SPEED = 3
SAMPLING_RATE = 8
mq_host = '172.26.56.122'
queue_name = 'door_data'
# setting up MQTT subscriber
Utils.sub(queue_name=queue_name,callback=subscribe,host=mq_host) | banacer/door-wiz | src/identification/Identifier.py | Python | mit | 1,449 |
#!-*- coding:utf-8 -*-
import time
def retries(times=3, timeout=1):
"""对未捕获异常进行重试"""
def decorator(func):
def _wrapper(*args, **kw):
att, retry = 0, 0
while retry < times:
retry += 1
try:
return func(*args, **kw)
except:
att += timeout
if retry < times:
time.sleep(att)
return _wrapper
return decorator
def empty_content_retries(times=3, timeout=2):
"""响应为空的进行重试"""
def decorator(func):
def _wrapper(*args, **kw):
att, retry = 0, 0
while retry < times:
retry += 1
ret = func(*args, **kw)
if ret:
return ret
att += timeout
time.sleep(att)
return _wrapper
return decorator
def use_logging(level):
"""带参数的装饰器"""
def decorator(func):
print func.__name__
def wrapper(*args, **kwargs):
if level == "warn":
print ("level:%s, %s is running" % (level, func.__name__))
elif level == "info":
print ("level:%s, %s is running" % (level, func.__name__))
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == "__main__":
@use_logging(level="warn")
def foo(name='foo'):
print("i am %s" % name)
foo() | wanghuafeng/spider_tools | decorator.py | Python | mit | 1,524 |
"""
********************************************************************
Test file for implementation check of CR3BP library.
********************************************************************
Last update: 21/01/2022
Description
-----------
Contains a few sample orbit propagations to test the CR3BP library.
The orbits currently found in test file include:
- L2 southern NRHO (9:2 NRHO of Lunar Gateway Station)
- Distant Retrograde Orbit (DRO)
- Butterfly Orbit
- L2 Vertical Orbit
"""
# Testing CR3BP implementation
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from CR3BP import getChar_CR3BP, propagate, propagateSTM
from poliastro.bodies import Earth, Moon
# Earth-Moon system properties
k1 = Earth.k.to(u.km**3 / u.s**2).value
k2 = Moon.k.to(u.km**3 / u.s**2).value
r12 = 384747.99198 # Earth-Moon distance
# Compute CR3BP characterisitic values
mu, kstr, lstr, tstr, vstr, nstr = getChar_CR3BP(k1, k2, r12)
# -- Lunar Gateway Station Orbit - 9:2 NRHO
"""
The orbit is a Near-Rectilinear Halo Orbit (NRHO) around the L2 Lagragian
point of the Earth-Moon system. The orbit presented here is a southern
sub-family of the L2-NRHO. This orbit is 9:2 resonant orbit currenly set
as the candidate orbit for the Lunar Gateway Station (LOP-G). Its called
9:2 resonant since a spacecraft would complete 9 orbits in the NRHO for
every 2 lunar month (slightly different from lunar orbit period).
The exact orbital elements presented here are from the auther's simulations.
The orbit states were obtained starting form guess solutions given in various
references. A few are provided below:
Ref: White Paper: Gateway Destination Orbit Model: A Continuous 15 Year NRHO
Reference Trajectory - NASA, 2019
Ref: Strategies for Low-Thrust Transfer Design Based on Direct Collocation
Techniques - Park, Howell and Folta
The NRHO are subfamily of the Halo orbits. The 'Near-Rectilinear' term comes
from the very elongated state of the orbit considering a regular Halo. Halo
orbits occur in all three co-linear equilibrum points L1,L2 and L3. They occur
in a pair of variants (nothern and southern) due to symmetry of CR3BP.
"""
# 9:2 L2 souther NRHO orbit
r0 = np.array([[1.021881345465263, 0, -0.182000000000000]])
v0 = np.array([0, -0.102950816739606, 0])
tf = 1.509263667286943
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Southern NRHO")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "b")
plt.show()
"""
All other orbits in this section are computed from guess solutions available
in Grebow's Master and PhD thesis. He lists a quite detailed set of methods
to compute most of the major periodic orbits I have presented here. All of
them use differntial correction methods which are not yet implemented in this
library.
Ref: GENERATING PERIODIC ORBITS IN THE CIRCULAR RESTRICTED THREEBODY PROBLEM
WITH APPLICATIONS TO LUNAR SOUTH POLE COVERAGE
- D.Grebow 2006 (Master thesis)
Ref: TRAJECTORY DESIGN IN THE EARTH-MOON SYSTEM
AND LUNAR SOUTH POLE COVERAGE
- D.Grebow 2010 (PhD desertation)
"""
# -- DRO orbit
# DRO orbit states
r0 = np.array([0.783390492345344, 0, 0])
v0 = np.array([0, 0.548464515316651, 0])
tf = 3.63052604667440
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Distant Restrograde orbit (DRO)")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "m")
plt.show()
# -- Butterfly orbit
# Butterfly orbit states
r0 = np.array([1.03599510774957, 0, 0.173944812752286])
v0 = np.array([0, -0.0798042160573269, 0])
tf = 2.78676904546834
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("Butterfly orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "r")
plt.show()
# -- Vertical orbit
# Vertical orbit states
r0 = np.array([0.504689989562366, 0, 0.836429774762193])
v0 = np.array([0, 0.552722840538063, 0])
tf = 6.18448756121754
# number of points to plot
Nplt = 300
tofs = np.linspace(0, tf, Nplt)
# propagate the base trajectory
rf, vf = propagate(mu, r0, v0, tofs, rtol=1e-11)
# ploting orbit
rf = np.array(rf)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.set_box_aspect(
(np.ptp(rf[:, 0]), np.ptp(rf[:, 1]), np.ptp(rf[:, 2]))
) # aspect ratio is 1:1:1 in data space
# ploting the moon
ax.plot3D(1 - mu, 0, 0, "ok")
ax.set_title("L2 Vertical orbit")
ax.set_xlabel("x-axis [nd]")
ax.set_ylabel("y-axis [nd]")
ax.set_zlabel("z-axis [nd]")
ax.plot3D(rf[:, 0], rf[:, 1], rf[:, 2], "g")
plt.show()
# -- Propage STM
# propagate base trajectory with state-transition-matrix
STM0 = np.eye(6)
rf, vf, STM = propagateSTM(mu, r0, v0, STM0, tofs, rtol=1e-11)
# STM is a matrix of partial derivatives which are used in Newton-Raphson
# methods for trajectory design
| poliastro/poliastro | contrib/CR3BP/test_run_CR3BP.py | Python | mit | 6,277 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'
# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'
# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'
# 当前异步事件状态不支持此操作,请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'
# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'
# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'
# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'
# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'
# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'
# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'
# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'
# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'
# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'
# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'
# 当前函数状态无法进行此操作,请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'
# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'
# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'
# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'
# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'
# 当前函数版本状态无法进行此操作,请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'
# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'
# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'
# 当前账号或命名空间处于欠费状态,请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOperation.InsufficientBalance'
# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'
# 命名空间已存在,请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'
# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'
# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'
# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'
# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'
# 当前函数版本已有预置任务处于进行中,请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'
# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'
# 当前函数状态无法发布版本,请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'
# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'
# 当前函数已有保留并发设置任务处于进行中,请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'
# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'
# 用户并发内存配额设置任务处于进行中,请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'
# 指定的服务未开通,可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'
# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'
# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'
# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'
# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'
# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'
# ES错误。
INTERNALERROR_ES = 'InternalError.ES'
# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'
# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'
# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'
# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'
# FunctionName取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'
# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'
# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Action取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'
# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'
# 不支持删除默认别名,请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'
# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'
# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'
# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'
# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'
# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'
# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'
# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'
# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'
# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'
# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'
# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'
# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'
# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'
# 修改Cls配置需要传入Role参数,请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'
# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'
# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'
# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'
# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'
# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'
# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'
# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'
# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'
# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'
# CosBucketRegion取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'
# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'
# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'
# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'
# DeadLetterConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTERCONFIG = 'InvalidParameterValue.DeadLetterConfig'
# 默认Namespace无法创建。
INVALIDPARAMETERVALUE_DEFAULTNAMESPACE = 'InvalidParameterValue.DefaultNamespace'
# Description传入错误。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'
# 环境变量DNS[OS_NAMESERVER]配置有误。
INVALIDPARAMETERVALUE_DNSINFO = 'InvalidParameterValue.DnsInfo'
# EipConfig参数错误。
INVALIDPARAMETERVALUE_EIPCONFIG = 'InvalidParameterValue.EipConfig'
# Enable取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ENABLE = 'InvalidParameterValue.Enable'
# Environment传入错误。
INVALIDPARAMETERVALUE_ENVIRONMENT = 'InvalidParameterValue.Environment'
# 环境变量大小超限,请保持在 4KB 以内。
INVALIDPARAMETERVALUE_ENVIRONMENTEXCEEDEDLIMIT = 'InvalidParameterValue.EnvironmentExceededLimit'
# 不支持修改函数系统环境变量和运行环境变量。
INVALIDPARAMETERVALUE_ENVIRONMENTSYSTEMPROTECT = 'InvalidParameterValue.EnvironmentSystemProtect'
# Filters参数错误。
INVALIDPARAMETERVALUE_FILTERS = 'InvalidParameterValue.Filters'
# Function取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_FUNCTION = 'InvalidParameterValue.Function'
# 函数不存在。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# GitBranch不符合规范。
INVALIDPARAMETERVALUE_GITBRANCH = 'InvalidParameterValue.GitBranch'
# GitCommitId取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_GITCOMMITID = 'InvalidParameterValue.GitCommitId'
# GitDirectory不符合规范。
INVALIDPARAMETERVALUE_GITDIRECTORY = 'InvalidParameterValue.GitDirectory'
# GitPassword不符合规范。
INVALIDPARAMETERVALUE_GITPASSWORD = 'InvalidParameterValue.GitPassword'
# GitUrl不符合规范。
INVALIDPARAMETERVALUE_GITURL = 'InvalidParameterValue.GitUrl'
# GitUserName不符合规范。
INVALIDPARAMETERVALUE_GITUSERNAME = 'InvalidParameterValue.GitUserName'
# Handler传入错误。
INVALIDPARAMETERVALUE_HANDLER = 'InvalidParameterValue.Handler'
# IdleTimeOut参数传入错误。
INVALIDPARAMETERVALUE_IDLETIMEOUT = 'InvalidParameterValue.IdleTimeOut'
# imageUri 传入有误。
INVALIDPARAMETERVALUE_IMAGEURI = 'InvalidParameterValue.ImageUri'
# InlineZipFile非法。
INVALIDPARAMETERVALUE_INLINEZIPFILE = 'InvalidParameterValue.InlineZipFile'
# InvokeType取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_INVOKETYPE = 'InvalidParameterValue.InvokeType'
# L5Enable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_L5ENABLE = 'InvalidParameterValue.L5Enable'
# LayerName参数传入错误。
INVALIDPARAMETERVALUE_LAYERNAME = 'InvalidParameterValue.LayerName'
# Layers参数传入错误。
INVALIDPARAMETERVALUE_LAYERS = 'InvalidParameterValue.Layers'
# Limit传入错误。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数超出长度限制。
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'
# Memory取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_MEMORY = 'InvalidParameterValue.Memory'
# MemorySize错误。
INVALIDPARAMETERVALUE_MEMORYSIZE = 'InvalidParameterValue.MemorySize'
# MinCapacity 参数传入错误。
INVALIDPARAMETERVALUE_MINCAPACITY = 'InvalidParameterValue.MinCapacity'
# Name参数传入错误。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# Namespace参数传入错误。
INVALIDPARAMETERVALUE_NAMESPACE = 'InvalidParameterValue.Namespace'
# 规则不正确,Namespace为英文字母、数字、-_ 符号组成,长度30。
INVALIDPARAMETERVALUE_NAMESPACEINVALID = 'InvalidParameterValue.NamespaceInvalid'
# NodeSpec 参数传入错误。
INVALIDPARAMETERVALUE_NODESPEC = 'InvalidParameterValue.NodeSpec'
# NodeType 参数传入错误。
INVALIDPARAMETERVALUE_NODETYPE = 'InvalidParameterValue.NodeType'
# 偏移量不合法。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'
# Order传入错误。
INVALIDPARAMETERVALUE_ORDER = 'InvalidParameterValue.Order'
# OrderBy取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ORDERBY = 'InvalidParameterValue.OrderBy'
# 入参不是标准的json。
INVALIDPARAMETERVALUE_PARAM = 'InvalidParameterValue.Param'
# ProtocolType参数传入错误。
INVALIDPARAMETERVALUE_PROTOCOLTYPE = 'InvalidParameterValue.ProtocolType'
# 定时预置的cron配置重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERCRONCONFIGDUPLICATE = 'InvalidParameterValue.ProvisionTriggerCronConfigDuplicate'
# TriggerName参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAME = 'InvalidParameterValue.ProvisionTriggerName'
# TriggerName重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAMEDUPLICATE = 'InvalidParameterValue.ProvisionTriggerNameDuplicate'
# ProvisionType 参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTYPE = 'InvalidParameterValue.ProvisionType'
# PublicNetConfig参数错误。
INVALIDPARAMETERVALUE_PUBLICNETCONFIG = 'InvalidParameterValue.PublicNetConfig'
# 不支持的函数版本。
INVALIDPARAMETERVALUE_QUALIFIER = 'InvalidParameterValue.Qualifier'
# 企业版镜像实例ID[RegistryId]传值错误。
INVALIDPARAMETERVALUE_REGISTRYID = 'InvalidParameterValue.RegistryId'
# RetCode不合法。
INVALIDPARAMETERVALUE_RETCODE = 'InvalidParameterValue.RetCode'
# RoutingConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ROUTINGCONFIG = 'InvalidParameterValue.RoutingConfig'
# Runtime传入错误。
INVALIDPARAMETERVALUE_RUNTIME = 'InvalidParameterValue.Runtime'
# searchkey 不是 Keyword,Tag 或者 Runtime。
INVALIDPARAMETERVALUE_SEARCHKEY = 'InvalidParameterValue.SearchKey'
# SecretInfo错误。
INVALIDPARAMETERVALUE_SECRETINFO = 'InvalidParameterValue.SecretInfo'
# ServiceName命名不规范。
INVALIDPARAMETERVALUE_SERVICENAME = 'InvalidParameterValue.ServiceName'
# Stamp取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STAMP = 'InvalidParameterValue.Stamp'
# 起始时间传入错误。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'
# 需要同时指定开始日期与结束日期。
INVALIDPARAMETERVALUE_STARTTIMEORENDTIME = 'InvalidParameterValue.StartTimeOrEndTime'
# Status取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'
# 系统环境变量错误。
INVALIDPARAMETERVALUE_SYSTEMENVIRONMENT = 'InvalidParameterValue.SystemEnvironment'
# 非法的TempCosObjectName。
INVALIDPARAMETERVALUE_TEMPCOSOBJECTNAME = 'InvalidParameterValue.TempCosObjectName'
# TraceEnable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_TRACEENABLE = 'InvalidParameterValue.TraceEnable'
# TrackingTarget 参数输入错误。
INVALIDPARAMETERVALUE_TRACKINGTARGET = 'InvalidParameterValue.TrackingTarget'
# TriggerCronConfig参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIG = 'InvalidParameterValue.TriggerCronConfig'
# TriggerCronConfig参数定时触发间隔小于指定值。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIGTIMEINTERVAL = 'InvalidParameterValue.TriggerCronConfigTimeInterval'
# TriggerDesc传入参数错误。
INVALIDPARAMETERVALUE_TRIGGERDESC = 'InvalidParameterValue.TriggerDesc'
# TriggerName传入错误。
INVALIDPARAMETERVALUE_TRIGGERNAME = 'InvalidParameterValue.TriggerName'
# TriggerProvisionedConcurrencyNum参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERPROVISIONEDCONCURRENCYNUM = 'InvalidParameterValue.TriggerProvisionedConcurrencyNum'
# Type传入错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 开启cfs配置的同时必须开启vpc。
INVALIDPARAMETERVALUE_VPCNOTSETWHENOPENCFS = 'InvalidParameterValue.VpcNotSetWhenOpenCfs'
# WebSocketsParams参数传入错误。
INVALIDPARAMETERVALUE_WEBSOCKETSPARAMS = 'InvalidParameterValue.WebSocketsParams'
# 检测到不是标准的zip文件,请重新压缩后再试。
INVALIDPARAMETERVALUE_ZIPFILE = 'InvalidParameterValue.ZipFile'
# 压缩文件base64解码失败: `Incorrect padding`,请修正后再试。
INVALIDPARAMETERVALUE_ZIPFILEBASE64BINASCIIERROR = 'InvalidParameterValue.ZipFileBase64BinasciiError'
# 别名个数超过最大限制。
LIMITEXCEEDED_ALIAS = 'LimitExceeded.Alias'
# Cdn使用超过最大限制。
LIMITEXCEEDED_CDN = 'LimitExceeded.Cdn'
# eip资源超限。
LIMITEXCEEDED_EIP = 'LimitExceeded.Eip'
# 函数数量超出最大限制 ,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_FUNCTION = 'LimitExceeded.Function'
# 同一个主题下的函数超过最大限制。
LIMITEXCEEDED_FUNCTIONONTOPIC = 'LimitExceeded.FunctionOnTopic'
# FunctionProvisionedConcurrencyMemory数量达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionProvisionedConcurrencyMemory'
# 函数保留并发内存超限。
LIMITEXCEEDED_FUNCTIONRESERVEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionReservedConcurrencyMemory'
# FunctionTotalProvisionedConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionTotalProvisionedConcurrencyMemory'
# 函数预置并发总数达到限制。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYNUM = 'LimitExceeded.FunctionTotalProvisionedConcurrencyNum'
# InitTimeout达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_INITTIMEOUT = 'LimitExceeded.InitTimeout'
# layer版本数量超出最大限制。
LIMITEXCEEDED_LAYERVERSIONS = 'LimitExceeded.LayerVersions'
# layer数量超出最大限制。
LIMITEXCEEDED_LAYERS = 'LimitExceeded.Layers'
# 内存超出最大限制。
LIMITEXCEEDED_MEMORY = 'LimitExceeded.Memory'
# 函数异步重试配置消息保留时间超过限制。
LIMITEXCEEDED_MSGTTL = 'LimitExceeded.MsgTTL'
# 命名空间数量超过最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_NAMESPACE = 'LimitExceeded.Namespace'
# Offset超出限制。
LIMITEXCEEDED_OFFSET = 'LimitExceeded.Offset'
# 定时预置数量超过最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERACTION = 'LimitExceeded.ProvisionTriggerAction'
# 定时触发间隔小于最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERINTERVAL = 'LimitExceeded.ProvisionTriggerInterval'
# 配额超限。
LIMITEXCEEDED_QUOTA = 'LimitExceeded.Quota'
# 函数异步重试配置异步重试次数超过限制。
LIMITEXCEEDED_RETRYNUM = 'LimitExceeded.RetryNum'
# Timeout超出最大限制。
LIMITEXCEEDED_TIMEOUT = 'LimitExceeded.Timeout'
# 用户并发内存配额超限。
LIMITEXCEEDED_TOTALCONCURRENCYMEMORY = 'LimitExceeded.TotalConcurrencyMemory'
# 触发器数量超出最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_TRIGGER = 'LimitExceeded.Trigger'
# UserTotalConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_USERTOTALCONCURRENCYMEMORY = 'LimitExceeded.UserTotalConcurrencyMemory'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# Code没有传入。
MISSINGPARAMETER_CODE = 'MissingParameter.Code'
# 缺失 Runtime 字段。
MISSINGPARAMETER_RUNTIME = 'MissingParameter.Runtime'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# Alias已被占用。
RESOURCEINUSE_ALIAS = 'ResourceInUse.Alias'
# Cdn已被占用。
RESOURCEINUSE_CDN = 'ResourceInUse.Cdn'
# Cmq已被占用。
RESOURCEINUSE_CMQ = 'ResourceInUse.Cmq'
# Cos已被占用。
RESOURCEINUSE_COS = 'ResourceInUse.Cos'
# 函数已存在。
RESOURCEINUSE_FUNCTION = 'ResourceInUse.Function'
# FunctionName已存在。
RESOURCEINUSE_FUNCTIONNAME = 'ResourceInUse.FunctionName'
# Layer版本正在使用中。
RESOURCEINUSE_LAYERVERSION = 'ResourceInUse.LayerVersion'
# Namespace已存在。
RESOURCEINUSE_NAMESPACE = 'ResourceInUse.Namespace'
# TriggerName已存在。
RESOURCEINUSE_TRIGGER = 'ResourceInUse.Trigger'
# TriggerName已存在。
RESOURCEINUSE_TRIGGERNAME = 'ResourceInUse.TriggerName'
# COS资源不足。
RESOURCEINSUFFICIENT_COS = 'ResourceInsufficient.COS'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 别名不存在。
RESOURCENOTFOUND_ALIAS = 'ResourceNotFound.Alias'
# 未找到指定的AsyncEvent,请创建后再试。
RESOURCENOTFOUND_ASYNCEVENT = 'ResourceNotFound.AsyncEvent'
# Cdn不存在。
RESOURCENOTFOUND_CDN = 'ResourceNotFound.Cdn'
# 指定的cfs下未找到您所指定的挂载点。
RESOURCENOTFOUND_CFSMOUNTINSNOTMATCH = 'ResourceNotFound.CfsMountInsNotMatch'
# 检测cfs状态为不可用。
RESOURCENOTFOUND_CFSSTATUSERROR = 'ResourceNotFound.CfsStatusError'
# cfs与云函数所处vpc不一致。
RESOURCENOTFOUND_CFSVPCNOTMATCH = 'ResourceNotFound.CfsVpcNotMatch'
# Ckafka不存在。
RESOURCENOTFOUND_CKAFKA = 'ResourceNotFound.Ckafka'
# Cmq不存在。
RESOURCENOTFOUND_CMQ = 'ResourceNotFound.Cmq'
# Cos不存在。
RESOURCENOTFOUND_COS = 'ResourceNotFound.Cos'
# 不存在的Demo。
RESOURCENOTFOUND_DEMO = 'ResourceNotFound.Demo'
# 函数不存在。
RESOURCENOTFOUND_FUNCTION = 'ResourceNotFound.Function'
# 函数不存在。
RESOURCENOTFOUND_FUNCTIONNAME = 'ResourceNotFound.FunctionName'
# 函数版本不存在。
RESOURCENOTFOUND_FUNCTIONVERSION = 'ResourceNotFound.FunctionVersion'
# 获取cfs挂载点信息错误。
RESOURCENOTFOUND_GETCFSMOUNTINSERROR = 'ResourceNotFound.GetCfsMountInsError'
# 获取cfs信息错误。
RESOURCENOTFOUND_GETCFSNOTMATCH = 'ResourceNotFound.GetCfsNotMatch'
# 未找到指定的ImageConfig,请创建后再试。
RESOURCENOTFOUND_IMAGECONFIG = 'ResourceNotFound.ImageConfig'
# layer不存在。
RESOURCENOTFOUND_LAYER = 'ResourceNotFound.Layer'
# Layer版本不存在。
RESOURCENOTFOUND_LAYERVERSION = 'ResourceNotFound.LayerVersion'
# Namespace不存在。
RESOURCENOTFOUND_NAMESPACE = 'ResourceNotFound.Namespace'
# 版本不存在。
RESOURCENOTFOUND_QUALIFIER = 'ResourceNotFound.Qualifier'
# 角色不存在。
RESOURCENOTFOUND_ROLE = 'ResourceNotFound.Role'
# Role不存在。
RESOURCENOTFOUND_ROLECHECK = 'ResourceNotFound.RoleCheck'
# Timer不存在。
RESOURCENOTFOUND_TIMER = 'ResourceNotFound.Timer'
# 并发内存配额资源未找到。
RESOURCENOTFOUND_TOTALCONCURRENCYMEMORY = 'ResourceNotFound.TotalConcurrencyMemory'
# 触发器不存在。
RESOURCENOTFOUND_TRIGGER = 'ResourceNotFound.Trigger'
# 版本不存在。
RESOURCENOTFOUND_VERSION = 'ResourceNotFound.Version'
# VPC或子网不存在。
RESOURCENOTFOUND_VPC = 'ResourceNotFound.Vpc'
# 余额不足,请先充值。
RESOURCEUNAVAILABLE_INSUFFICIENTBALANCE = 'ResourceUnavailable.InsufficientBalance'
# Namespace不可用。
RESOURCEUNAVAILABLE_NAMESPACE = 'ResourceUnavailable.Namespace'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# CAM鉴权失败。
UNAUTHORIZEDOPERATION_CAM = 'UnauthorizedOperation.CAM'
# 无访问代码权限。
UNAUTHORIZEDOPERATION_CODESECRET = 'UnauthorizedOperation.CodeSecret'
# 没有权限。
UNAUTHORIZEDOPERATION_CREATETRIGGER = 'UnauthorizedOperation.CreateTrigger'
# 没有权限的操作。
UNAUTHORIZEDOPERATION_DELETEFUNCTION = 'UnauthorizedOperation.DeleteFunction'
# 没有权限。
UNAUTHORIZEDOPERATION_DELETETRIGGER = 'UnauthorizedOperation.DeleteTrigger'
# 不是从控制台调用的该接口。
UNAUTHORIZEDOPERATION_NOTMC = 'UnauthorizedOperation.NotMC'
# Region错误。
UNAUTHORIZEDOPERATION_REGION = 'UnauthorizedOperation.Region'
# 没有权限访问您的Cos资源。
UNAUTHORIZEDOPERATION_ROLE = 'UnauthorizedOperation.Role'
# TempCos的Appid和请求账户的APPID不一致。
UNAUTHORIZEDOPERATION_TEMPCOSAPPID = 'UnauthorizedOperation.TempCosAppid'
# 无法进行此操作。
UNAUTHORIZEDOPERATION_UPDATEFUNCTIONCODE = 'UnauthorizedOperation.UpdateFunctionCode'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 资源还有别名绑定,不支持当前操作,请解绑别名后重试。
UNSUPPORTEDOPERATION_ALIASBIND = 'UnsupportedOperation.AliasBind'
# 指定的配置AsyncRunEnable暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_ASYNCRUNENABLE = 'UnsupportedOperation.AsyncRunEnable'
# Cdn不支持。
UNSUPPORTEDOPERATION_CDN = 'UnsupportedOperation.Cdn'
# Cos操作不支持。
UNSUPPORTEDOPERATION_COS = 'UnsupportedOperation.Cos'
# 指定的配置EipFixed暂不支持。
UNSUPPORTEDOPERATION_EIPFIXED = 'UnsupportedOperation.EipFixed'
# 不支持此地域。
UNSUPPORTEDOPERATION_REGION = 'UnsupportedOperation.Region'
# Trigger操作不支持。
UNSUPPORTEDOPERATION_TRIGGER = 'UnsupportedOperation.Trigger'
# 指定的配置暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_UPDATEFUNCTIONEVENTINVOKECONFIG = 'UnsupportedOperation.UpdateFunctionEventInvokeConfig'
# 指定的配置VpcConfig暂不支持。
UNSUPPORTEDOPERATION_VPCCONFIG = 'UnsupportedOperation.VpcConfig'
| tzpBingo/github-trending | codespace/python/tencentcloud/scf/v20180416/errorcodes.py | Python | mit | 27,390 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1,6))
total = 0
for num in it:
total += num
self.assertEqual(15 , total)
def test_iterating_with_next(self):
stages = iter(['alpha','beta','gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertRegex(err_msg, 'Ran out')
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = list()
mapping = map(self.add_ten, seq)
self.assertNotEqual(list, mapping.__class__)
self.assertEqual(map, mapping.__class__)
# In Python 3 built in iterator funcs return iterable view objects
# instead of lists
for item in mapping:
mapped_seq.append(item)
self.assertEqual([11, 12, 13], mapped_seq)
# Note, iterator methods actually return objects of iter type in
# python 3. In python 2 map() would give you a list.
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = list()
for item in filter(is_even, seq):
even_numbers.append(item)
self.assertEqual([2,4,6], even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
name = None
iterator = filter(is_big_name, names)
try:
name = next(iterator)
except StopIteration:
msg = 'Ran out of big names'
self.assertEqual("Clarence", name)
# ------------------------------------------------------------------
def add(self,accum,item):
return accum + item
def multiply(self,accum,item):
return accum * item
def test_reduce_will_blow_your_mind(self):
import functools
# As of Python 3 reduce() has been demoted from a builtin function
# to the functools module.
result = functools.reduce(self.add, [2, 3, 4])
self.assertEqual(int, result.__class__)
# Reduce() syntax is same as Python 2
self.assertEqual(9, result)
result2 = functools.reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(24, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1,5):
pass
self.assertEqual(4, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iterable sequence
result = map(self.add_ten, range(1,4))
self.assertEqual([11, 12, 13], list(result))
try:
file = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, file.readlines())
self.assertEqual(["THIS", "IS", "A", "TEST"] , list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
file.close()
except IOError:
# should never happen
self.fail()
| bohdan7/python_koans | python3/koans/about_iteration.py | Python | mit | 3,923 |
from api_request import Api
from util import Util
from twocheckout import Twocheckout
class Sale(Twocheckout):
def __init__(self, dict_):
super(self.__class__, self).__init__(dict_)
@classmethod
def find(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/detail_sale', params))
return response.sale
@classmethod
def list(cls, params=None):
if params is None:
params = dict()
response = cls(Api.call('sales/list_sales', params))
return response.sale_summary
def refund(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
url = 'sales/refund_lineitem'
elif hasattr(self, 'invoice_id'):
params['invoice_id'] = self.invoice_id
url = 'sales/refund_invoice'
else:
params['sale_id'] = self.sale_id
url = 'sales/refund_invoice'
return Sale(Api.call(url, params))
def stop(self, params=None):
if params is None:
params = dict()
if hasattr(self, 'lineitem_id'):
params['lineitem_id'] = self.lineitem_id
return Api.call('sales/stop_lineitem_recurring', params)
elif hasattr(self, 'sale_id'):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
params = {'lineitem_id': lineitem_id}
result[i] = Api.call('sales/stop_lineitem_recurring', params)
i += 1
response = { "response_code": "OK",
"response_message": str(len(result)) + " lineitems stopped successfully"
}
else:
response = {
"response_code": "NOTICE",
"response_message": "No active recurring lineitems"
}
else:
response = { "response_code": "NOTICE",
"response_message": "This method can only be called on a sale or lineitem"
}
return Sale(response)
def active(self):
active_lineitems = Util.active(self)
if dict(active_lineitems):
result = dict()
i = 0
for k, v in active_lineitems.items():
lineitem_id = v
result[i] = lineitem_id
i += 1
response = { "response_code": "ACTIVE",
"response_message": str(len(result)) + " active recurring lineitems"
}
else:
response = {
"response_code": "NOTICE","response_message":
"No active recurring lineitems"
}
return Sale(response)
def comment(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/create_comment', params))
def ship(self, params=None):
if params is None:
params = dict()
params['sale_id'] = self.sale_id
return Sale(Api.call('sales/mark_shipped', params))
| 2Checkout/2checkout-python | twocheckout/sale.py | Python | mit | 3,388 |
import json
import os
from flask import request, g, render_template, make_response, jsonify, Response
from helpers.raw_endpoint import get_id, store_json_to_file
from helpers.groups import get_groups
from json_controller import JSONController
from main import app
from pymongo import MongoClient, errors
HERE = os.path.dirname(os.path.abspath(__file__))
# setup database connection
def connect_client():
"""Connects to Mongo client"""
try:
return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT']))
except errors.ConnectionFailure as e:
raise e
def get_db():
"""Connects to Mongo database"""
if not hasattr(g, 'mongo_client'):
g.mongo_client = connect_client()
g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME'])
g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')]
return g.mongo_db
@app.teardown_appcontext
def close_db(error):
"""Closes connection with Mongo client"""
if hasattr(g, 'mongo_client'):
g.mongo_client.close()
# Begin view routes
@app.route('/')
@app.route('/index/')
def index():
"""Landing page for SciNet"""
return render_template("index.html")
@app.route('/faq/')
def faq():
"""FAQ page for SciNet"""
return render_template("faq.html")
@app.route('/leaderboard/')
def leaderboard():
"""Leaderboard page for SciNet"""
get_db()
groups = get_groups(g.groups_collection)
return render_template("leaderboard.html", groups=groups)
@app.route('/ping', methods=['POST'])
def ping_endpoint():
"""API endpoint determines potential article hash exists in db
:return: status code 204 -- hash not present, continue submission
:return: status code 201 -- hash already exists, drop submission
"""
db = get_db()
target_hash = request.form.get('hash')
if db.raw.find({'hash': target_hash}).count():
return Response(status=201)
else:
return Response(status=204)
@app.route('/articles')
def ArticleEndpoint():
"""Eventual landing page for searching/retrieving articles"""
if request.method == 'GET':
return render_template("articles.html")
@app.route('/raw', methods=['POST'])
def raw_endpoint():
"""API endpoint for submitting raw article data
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported content-type or invalid publisher
:return: status code 201 - successful submission
"""
# Ensure post's content-type is supported
if request.headers['content-type'] == 'application/json':
# Ensure data is a valid JSON
try:
user_submission = json.loads(request.data)
except ValueError:
return Response(status=405)
# generate UID for new entry
uid = get_id()
# store incoming JSON in raw storage
file_path = os.path.join(
HERE,
'raw_payloads',
str(uid)
)
store_json_to_file(user_submission, file_path)
# hand submission to controller and return Resposne
db = get_db()
controller_response = JSONController(user_submission, db=db, _id=uid).submit()
return controller_response
# User submitted an unsupported content-type
else:
return Response(status=400)
#@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page
#@TODO: Add form validation
@app.route('/requestnewgroup/', methods=['POST'])
def request_new_group():
# Grab submission form data and prepare email message
data = request.json
msg = "Someone has request that you add {group_name} to the leaderboard \
groups. The groups website is {group_website} and the submitter can \
be reached at {submitter_email}.".format(
group_name=data['new_group_name'],
group_website=data['new_group_website'],
submitter_email=data['submitter_email'])
return Response(status=200)
'''
try:
email(
subject="SciNet: A new group has been requested",
fro="no-reply@scinet.osf.io",
to='harry@scinet.osf.io',
msg=msg)
return Response(status=200)
except:
return Response(status=500)
'''
# Error handlers
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Page Not Found' } ), 404)
@app.errorhandler(405)
def method_not_allowed(error):
return make_response(jsonify( { 'error': 'Method Not Allowed' } ), 405) | CenterForOpenScience/scinet | scinet/views.py | Python | mit | 4,696 |
from corecat.constants import OBJECT_CODES, MODEL_VERSION
from ._sqlalchemy import Base, CoreCatBaseMixin
from ._sqlalchemy import Column, \
Integer, \
String, Text
class Project(CoreCatBaseMixin, Base):
"""Project Model class represent for the 'projects' table
which is used to store project's basic information."""
# Add the real table name here.
# TODO: Add the database prefix here
__tablename__ = 'project'
# Column definition
project_id = Column('id', Integer,
primary_key=True,
autoincrement=True
)
project_name = Column('name', String(100),
nullable=False
)
project_description = Column('description', Text,
nullable=True
)
# Relationship
# TODO: Building relationship
def __init__(self, project_name,
created_by_user_id,
**kwargs):
"""
Constructor of Project Model Class.
:param project_name: Name of the project.
:param created_by_user_id: Project is created under this user ID.
:param project_description: Description of the project.
"""
self.set_up_basic_information(
MODEL_VERSION[OBJECT_CODES['Project']],
created_by_user_id
)
self.project_name = project_name
self.project_description = kwargs.get('project_description', None)
| DanceCats/CoreCat | corecat/models/project.py | Python | mit | 1,533 |
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role get module
description:
- Module to get a approle role from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_get:
name: 'ashley'
register: 'vault_approle_role_get'
- debug: msg="Role is {{vault_approle_role_get.role}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_approle_role_get(params):
name = params.get('name')
client = hashivault_auth_client(params)
result = client.get_role(name, mount_point=params.get('mount_point'))
return {'role': result}
if __name__ == '__main__':
main()
| TerryHowe/ansible-modules-hashivault | ansible/modules/hashivault/hashivault_approle_role_get.py | Python | mit | 1,659 |
from scrapy.spiders import Spider
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from FIFAscrape.items import PlayerItem
from urlparse import urlparse, urljoin
from scrapy.http.request import Request
from scrapy.conf import settings
import random
import time
class fifaSpider(Spider):
name = "fifa"
allowed_domains = ["futhead.com"]
start_urls = [
"http://www.futhead.com/16/players/?level=all_nif&bin_platform=ps"
]
def parse(self, response):
#obtains links from page to page and passes links to parse_playerURL
sel = Selector(response) #define selector based on response object (points to urls in start_urls by default)
url_list = sel.xpath('//a[@class="display-block padding-0"]/@href') #obtain a list of href links that contain relative links of players
for i in url_list:
relative_url = self.clean_str(i.extract()) #i is a selector and hence need to extract it to obtain unicode object
print urljoin(response.url, relative_url) #urljoin is able to merge absolute and relative paths to form 1 coherent link
req = Request(urljoin(response.url, relative_url),callback=self.parse_playerURL) #pass on request with new urls to parse_playerURL
req.headers["User-Agent"] = self.random_ua()
yield req
next_url=sel.xpath('//div[@class="right-nav pull-right"]/a[@rel="next"]/@href').extract_first()
if(next_url): #checks if next page exists
clean_next_url = self.clean_str(next_url)
reqNext = Request(urljoin(response.url, clean_next_url),callback=self.parse) #calls back this function to repeat process on new list of links
yield reqNext
def parse_playerURL(self, response):
#parses player specific data into items list
site = Selector(response)
items = []
item = PlayerItem()
item['1name'] = (response.url).rsplit("/")[-2].replace("-"," ")
title = self.clean_str(site.xpath('/html/head/title/text()').extract_first())
item['OVR'] = title.partition("FIFA 16 -")[1].split("-")[0]
item['POS'] = self.clean_str(site.xpath('//div[@class="playercard-position"]/text()').extract_first())
#stats = site.xpath('//div[@class="row player-center-container"]/div/a')
stat_names = site.xpath('//span[@class="player-stat-title"]')
stat_values = site.xpath('//span[contains(@class, "player-stat-value")]')
for index in range(len(stat_names)):
attr_name = stat_names[index].xpath('.//text()').extract_first()
item[attr_name] = stat_values[index].xpath('.//text()').extract_first()
items.append(item)
return items
def clean_str(self,ustring):
#removes wierd unicode chars (/u102 bla), whitespaces, tabspaces, etc to form clean string
return str(ustring.encode('ascii', 'replace')).strip()
def random_ua(self):
#randomise user-agent from list to reduce chance of being banned
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
ua='Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36'
return ua
| HashirZahir/FIFA-Player-Ratings | FIFAscrape/spiders/fifa_spider.py | Python | mit | 3,458 |
print("hello!!!!") | coolralf/KaggleTraining | HELP.py | Python | mit | 18 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import SqlVirtualMachineManagementClientConfiguration
from .operations import AvailabilityGroupListenersOperations, Operations, SqlVirtualMachineGroupsOperations, SqlVirtualMachinesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SqlVirtualMachineManagementClient:
"""The SQL virtual machine management API provides a RESTful set of web APIs that interact with Azure Compute, Network & Storage services to manage your SQL Server virtual machine. The API enables users to create, delete and retrieve a SQL virtual machine, SQL virtual machine group or availability group listener.
:ivar availability_group_listeners: AvailabilityGroupListenersOperations operations
:vartype availability_group_listeners:
azure.mgmt.sqlvirtualmachine.aio.operations.AvailabilityGroupListenersOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.sqlvirtualmachine.aio.operations.Operations
:ivar sql_virtual_machine_groups: SqlVirtualMachineGroupsOperations operations
:vartype sql_virtual_machine_groups:
azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachineGroupsOperations
:ivar sql_virtual_machines: SqlVirtualMachinesOperations operations
:vartype sql_virtual_machines:
azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachinesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription ID that identifies an Azure subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SqlVirtualMachineManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.availability_group_listeners = AvailabilityGroupListenersOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.sql_virtual_machine_groups = SqlVirtualMachineGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.sql_virtual_machines = SqlVirtualMachinesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SqlVirtualMachineManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/aio/_sql_virtual_machine_management_client.py | Python | mit | 5,342 |
from flask import Blueprint, request, render_template
from ..load import processing_results
from ..abbr import get_abbr_map
abbr_map = get_abbr_map()
liner_mod = Blueprint('liner', __name__, template_folder='templates', static_folder='static')
@liner_mod.route('/liner', methods=['GET', 'POST'])
def liner():
if request.method == 'POST':
query = request.form['liner-text']
text = query.split('.')[:-1]
if len(text) == 0:
return render_template('projects/line.html', message='Please separate each line with "."')
abbr_expanded_text = ""
for word in query.split():
if word in abbr_map:
abbr_expanded_text += abbr_map[word]
else:
abbr_expanded_text += word
abbr_expanded_text += " "
data, emotion_sents, score, line_sentiment, text, length = processing_results(text)
return render_template('projects/line.html', data=[data, emotion_sents, score, zip(text, line_sentiment), length, abbr_expanded_text])
else:
return render_template('projects/line.html')
| griimick/feature-mlsite | app/liner/views.py | Python | mit | 1,108 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dimension(Model):
"""Dimension of a resource metric. For e.g. instance specific HTTP requests
for a web app,
where instance name is dimension of the metric HTTP request.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param internal_name:
:type internal_name: str
:param to_be_exported_for_shoebox:
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(self, name=None, display_name=None, internal_name=None, to_be_exported_for_shoebox=None):
super(Dimension, self).__init__()
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
| lmazuel/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/dimension.py | Python | mit | 1,562 |
import asyncio
import discord
import datetime
import pytz
from discord.ext import commands
from Cogs import FuzzySearch
from Cogs import Settings
from Cogs import DisplayName
from Cogs import Message
from Cogs import Nullify
class Time:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
@commands.command(pass_context=True)
async def settz(self, ctx, *, tz : str = None):
"""Sets your TimeZone - Overrides your UTC offset - and accounts for DST."""
usage = 'Usage: `{}settz [Region/City]`\nYou can get a list of available TimeZones with `{}listtz`'.format(ctx.prefix, ctx.prefix)
if not tz:
self.settings.setGlobalUserStat(ctx.author, "TimeZone", None)
await ctx.channel.send("*{}*, your TimeZone has been removed!".format(DisplayName.name(ctx.author)))
return
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
if not tz_list[0]['Ratio'] == 1:
# We didn't find a complete match
msg = "I couldn't find that TimeZone!\n\nMaybe you meant one of the following?\n```"
for tz in tz_list:
msg += tz['Item'] + "\n"
msg += '```'
await ctx.channel.send(msg)
return
# We got a time zone
self.settings.setGlobalUserStat(ctx.author, "TimeZone", tz_list[0]['Item'])
await ctx.channel.send("TimeZone set to *{}!*".format(tz_list[0]['Item']))
@commands.command(pass_context=True)
async def listtz(self, ctx, *, tz_search = None):
"""List all the supported TimeZones in PM."""
if not tz_search:
msg = "__Available TimeZones:__\n\n"
for tz in pytz.all_timezones:
msg += tz + "\n"
else:
tz_list = FuzzySearch.search(tz_search, pytz.all_timezones)
msg = "__Top 3 TimeZone Matches:__\n\n"
for tz in tz_list:
msg += tz['Item'] + "\n"
await Message.say(self.bot, msg, ctx.channel, ctx.author, 1)
@commands.command(pass_context=True)
async def tz(self, ctx, *, member = None):
"""See a member's TimeZone."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
timezone = self.settings.getGlobalUserStat(member, "TimeZone")
if timezone == None:
msg = '*{}* hasn\'t set their TimeZone yet - they can do so with the `{}settz [Region/City]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
msg = '*{}\'s* TimeZone is *{}*'.format(DisplayName.name(member), timezone)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setoffset(self, ctx, *, offset : str = None):
"""Set your UTC offset."""
if offset == None:
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", None)
msg = '*{}*, your UTC offset has been removed!'.format(DisplayName.name(ctx.message.author))
await ctx.channel.send(msg)
return
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
off = "{}:{}".format(hours, minutes)
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", off)
msg = '*{}*, your UTC offset has been set to *{}!*'.format(DisplayName.name(ctx.message.author), off)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def offset(self, ctx, *, member = None):
"""See a member's UTC offset."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their offset yet - they can do so with the `{}setoffset [+-offset]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
msg = 'UTC'
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
msg = '*{}\'s* offset is *{}*'.format(DisplayName.name(member), msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def time(self, ctx, *, offset : str = None):
"""Get UTC time +- an offset."""
timezone = None
if offset == None:
member = ctx.message.author
else:
# Try to get a user first
member = DisplayName.memberForName(offset, ctx.message.guild)
if member:
# We got one
# Check for timezone first
offset = self.settings.getGlobalUserStat(member, "TimeZone")
if offset == None:
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their TimeZone or offset yet - they can do so with the `{}setoffset [+-offset]` or `{}settz [Region/City]` command.\nThe current UTC time is *{}*.'.format(DisplayName.name(member), ctx.prefix, ctx.prefix, datetime.datetime.utcnow().strftime("%I:%M %p"))
await ctx.channel.send(msg)
return
# At this point - we need to determine if we have an offset - or possibly a timezone passed
t = self.getTimeFromTZ(offset)
if t == None:
# We did not get an offset
t = self.getTimeFromOffset(offset)
if t == None:
await ctx.channel.send("I couldn't find that TimeZone or offset!")
return
if member:
msg = '{}; where *{}* is, it\'s currently *{}*'.format(t["zone"], DisplayName.name(member), t["time"])
else:
msg = '{} is currently *{}*'.format(t["zone"], t["time"])
# Say message
await ctx.channel.send(msg)
def getTimeFromOffset(self, offset):
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
return None
# await ctx.channel.send('Offset has to be in +-H:M!')
# return
msg = 'UTC'
# Get the time
t = datetime.datetime.utcnow()
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
td = datetime.timedelta(hours=hours, minutes=minutes)
newTime = t + td
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes))
newTime = t - td
else:
# No offset
newTime = t
return { "zone" : msg, "time" : newTime.strftime("%I:%M %p") }
def getTimeFromTZ(self, tz):
# Assume sanitized zones - as they're pulled from pytz
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
if not tz_list[0]['Ratio'] == 1:
# We didn't find a complete match
return None
zone = pytz.timezone(tz_list[0]['Item'])
zone_now = datetime.datetime.now(zone)
return { "zone" : tz_list[0]['Item'], "time" : zone_now.strftime("%I:%M %p") } | TheMasterGhost/CorpBot | Cogs/Time.py | Python | mit | 8,457 |
import unittest
from katas.beta.what_color_is_your_name import string_color
class StringColorTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(string_color('Jack'), '79CAE5')
def test_equal_2(self):
self.assertEqual(string_color('Joshua'), '6A10D6')
def test_equal_3(self):
self.assertEqual(string_color('Joshua Smith'), '8F00FB')
def test_equal_4(self):
self.assertEqual(string_color('Hayden Smith'), '7E00EE')
def test_equal_5(self):
self.assertEqual(string_color('Mathew Smith'), '8B00F1')
def test_is_none_1(self):
self.assertIsNone(string_color('a'))
| the-zebulan/CodeWars | tests/beta_tests/test_what_color_is_your_name.py | Python | mit | 656 |
# coding: utf-8
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \
config, is_
import re
from sqlalchemy.testing.util import picklers
from sqlalchemy.interfaces import ConnectionProxy
from sqlalchemy import MetaData, Integer, String, INT, VARCHAR, func, \
bindparam, select, event, TypeDecorator, create_engine, Sequence
from sqlalchemy.sql import column, literal
from sqlalchemy.testing.schema import Table, Column
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy import util
from sqlalchemy.testing.engines import testing_engine
import logging.handlers
from sqlalchemy.dialects.oracle.zxjdbc import ReturningParam
from sqlalchemy.engine import result as _result, default
from sqlalchemy.engine.base import Engine
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock, call, patch
from contextlib import contextmanager
users, metadata, users_autoinc = None, None, None
class ExecuteTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, users_autoinc, metadata
metadata = MetaData(testing.db)
users = Table('users', metadata,
Column('user_id', INT, primary_key=True, autoincrement=False),
Column('user_name', VARCHAR(20)),
)
users_autoinc = Table('users_autoinc', metadata,
Column('user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
)
metadata.create_all()
@engines.close_first
def teardown(self):
testing.db.execute(users.delete())
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on("postgresql+pg8000",
"pg8000 still doesn't allow single % without params")
def test_no_params_option(self):
stmt = "SELECT '%'" + testing.db.dialect.statement_compiler(
testing.db.dialect, None).default_from()
conn = testing.db.connect()
result = conn.\
execution_options(no_parameters=True).\
scalar(stmt)
eq_(result, '%')
@testing.fails_on_everything_except('firebird',
'sqlite', '+pyodbc',
'+mxodbc', '+zxjdbc', 'mysql+oursql')
def test_raw_qmark(self):
def go(conn):
conn.execute('insert into users (user_id, user_name) '
'values (?, ?)', (1, 'jack'))
conn.execute('insert into users (user_id, user_name) '
'values (?, ?)', [2, 'fred'])
conn.execute('insert into users (user_id, user_name) '
'values (?, ?)', [3, 'ed'], [4, 'horse'])
conn.execute('insert into users (user_id, user_name) '
'values (?, ?)', (5, 'barney'), (6, 'donkey'))
conn.execute('insert into users (user_id, user_name) '
'values (?, ?)', 7, 'sally')
res = conn.execute('select * from users order by user_id')
assert res.fetchall() == [
(1, 'jack'),
(2, 'fred'),
(3, 'ed'),
(4, 'horse'),
(5, 'barney'),
(6, 'donkey'),
(7, 'sally'),
]
for multiparam, param in [
(("jack", "fred"), {}),
((["jack", "fred"],), {})
]:
res = conn.execute(
"select * from users where user_name=? or "
"user_name=? order by user_id",
*multiparam, **param)
assert res.fetchall() == [
(1, 'jack'),
(2, 'fred')
]
res = conn.execute("select * from users where user_name=?",
"jack"
)
assert res.fetchall() == [(1, 'jack')]
conn.execute('delete from users')
go(testing.db)
conn = testing.db.connect()
try:
go(conn)
finally:
conn.close()
# some psycopg2 versions bomb this.
@testing.fails_on_everything_except('mysql+mysqldb', 'mysql+pymysql',
'mysql+cymysql', 'mysql+mysqlconnector', 'postgresql')
@testing.fails_on('postgresql+zxjdbc', 'sprintf not supported')
def test_raw_sprintf(self):
def go(conn):
conn.execute('insert into users (user_id, user_name) '
'values (%s, %s)', [1, 'jack'])
conn.execute('insert into users (user_id, user_name) '
'values (%s, %s)', [2, 'ed'], [3, 'horse'])
conn.execute('insert into users (user_id, user_name) '
'values (%s, %s)', 4, 'sally')
conn.execute('insert into users (user_id) values (%s)', 5)
res = conn.execute('select * from users order by user_id')
assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
'horse'), (4, 'sally'), (5, None)]
for multiparam, param in [
(("jack", "ed"), {}),
((["jack", "ed"],), {})
]:
res = conn.execute(
"select * from users where user_name=%s or "
"user_name=%s order by user_id",
*multiparam, **param)
assert res.fetchall() == [
(1, 'jack'),
(2, 'ed')
]
res = conn.execute("select * from users where user_name=%s",
"jack"
)
assert res.fetchall() == [(1, 'jack')]
conn.execute('delete from users')
go(testing.db)
conn = testing.db.connect()
try:
go(conn)
finally:
conn.close()
# pyformat is supported for mysql, but skipping because a few driver
# versions have a bug that bombs out on this test. (1.2.2b3,
# 1.2.2c1, 1.2.2)
@testing.skip_if(lambda : testing.against('mysql+mysqldb'),
'db-api flaky')
@testing.fails_on_everything_except('postgresql+psycopg2',
'postgresql+pypostgresql', 'mysql+mysqlconnector',
'mysql+pymysql', 'mysql+cymysql')
def test_raw_python(self):
def go(conn):
conn.execute('insert into users (user_id, user_name) '
'values (%(id)s, %(name)s)', {'id': 1, 'name'
: 'jack'})
conn.execute('insert into users (user_id, user_name) '
'values (%(id)s, %(name)s)', {'id': 2, 'name'
: 'ed'}, {'id': 3, 'name': 'horse'})
conn.execute('insert into users (user_id, user_name) '
'values (%(id)s, %(name)s)', id=4, name='sally'
)
res = conn.execute('select * from users order by user_id')
assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
'horse'), (4, 'sally')]
conn.execute('delete from users')
go(testing.db)
conn = testing.db.connect()
try:
go(conn)
finally:
conn.close()
@testing.fails_on_everything_except('sqlite', 'oracle+cx_oracle')
def test_raw_named(self):
def go(conn):
conn.execute('insert into users (user_id, user_name) '
'values (:id, :name)', {'id': 1, 'name': 'jack'
})
conn.execute('insert into users (user_id, user_name) '
'values (:id, :name)', {'id': 2, 'name': 'ed'
}, {'id': 3, 'name': 'horse'})
conn.execute('insert into users (user_id, user_name) '
'values (:id, :name)', id=4, name='sally')
res = conn.execute('select * from users order by user_id')
assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3,
'horse'), (4, 'sally')]
conn.execute('delete from users')
go(testing.db)
conn= testing.db.connect()
try:
go(conn)
finally:
conn.close()
@testing.engines.close_open_connections
def test_exception_wrapping_dbapi(self):
conn = testing.db.connect()
for _c in testing.db, conn:
assert_raises_message(
tsa.exc.DBAPIError,
r"not_a_valid_statement",
_c.execute, 'not_a_valid_statement'
)
@testing.requires.sqlite
def test_exception_wrapping_non_dbapi_error(self):
e = create_engine('sqlite://')
e.dialect.is_disconnect = is_disconnect = Mock()
with e.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=TypeError("I'm not a DBAPI error")
))
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.execute, "select "
)
eq_(is_disconnect.call_count, 0)
def test_exception_wrapping_non_dbapi_statement(self):
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise Exception("nope")
def _go(conn):
assert_raises_message(
tsa.exc.StatementError,
r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
conn.execute,
select([1]).\
where(
column('foo') == literal('bar', MyType())
)
)
_go(testing.db)
conn = testing.db.connect()
try:
_go(conn)
finally:
conn.close()
def test_stmt_exception_non_ascii(self):
name = util.u('méil')
with testing.db.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
util.u(
"A value is required for bind parameter 'uname'"
r'.*SELECT users.user_name AS .m\\xe9il.') if util.py2k
else
util.u(
"A value is required for bind parameter 'uname'"
'.*SELECT users.user_name AS .méil.')
,
conn.execute,
select([users.c.user_name.label(name)]).where(
users.c.user_name == bindparam("uname")),
{'uname_incorrect': 'foo'}
)
def test_stmt_exception_pickleable_no_dbapi(self):
self._test_stmt_exception_pickleable(Exception("hello world"))
@testing.crashes("postgresql+psycopg2",
"Older versions don't support cursor pickling, newer ones do")
@testing.fails_on("mysql+oursql",
"Exception doesn't come back exactly the same from pickle")
@testing.fails_on("mysql+mysqlconnector",
"Exception doesn't come back exactly the same from pickle")
@testing.fails_on("oracle+cx_oracle",
"cx_oracle exception seems to be having "
"some issue with pickling")
def test_stmt_exception_pickleable_plus_dbapi(self):
raw = testing.db.raw_connection()
the_orig = None
try:
try:
cursor = raw.cursor()
cursor.execute("SELECTINCORRECT")
except testing.db.dialect.dbapi.DatabaseError as orig:
# py3k has "orig" in local scope...
the_orig = orig
finally:
raw.close()
self._test_stmt_exception_pickleable(the_orig)
def _test_stmt_exception_pickleable(self, orig):
for sa_exc in (
tsa.exc.StatementError("some error",
"select * from table",
{"foo":"bar"},
orig),
tsa.exc.InterfaceError("select * from table",
{"foo":"bar"},
orig),
tsa.exc.NoReferencedTableError("message", "tname"),
tsa.exc.NoReferencedColumnError("message", "tname", "cname"),
tsa.exc.CircularDependencyError("some message", [1, 2, 3], [(1, 2), (3, 4)]),
):
for loads, dumps in picklers():
repickled = loads(dumps(sa_exc))
eq_(repickled.args[0], sa_exc.args[0])
if isinstance(sa_exc, tsa.exc.StatementError):
eq_(repickled.params, {"foo":"bar"})
eq_(repickled.statement, sa_exc.statement)
if hasattr(sa_exc, "connection_invalidated"):
eq_(repickled.connection_invalidated,
sa_exc.connection_invalidated)
eq_(repickled.orig.args[0], orig.args[0])
def test_dont_wrap_mixin(self):
class MyException(Exception, tsa.exc.DontWrapMixin):
pass
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise MyException("nope")
def _go(conn):
assert_raises_message(
MyException,
"nope",
conn.execute,
select([1]).\
where(
column('foo') == literal('bar', MyType())
)
)
_go(testing.db)
conn = testing.db.connect()
try:
_go(conn)
finally:
conn.close()
def test_empty_insert(self):
"""test that execute() interprets [] as a list with no params"""
testing.db.execute(users_autoinc.insert().
values(user_name=bindparam('name', None)), [])
eq_(testing.db.execute(users_autoinc.select()).fetchall(), [(1, None)])
@testing.requires.ad_hoc_engines
def test_engine_level_options(self):
eng = engines.testing_engine(options={'execution_options':
{'foo': 'bar'}})
with eng.contextual_connect() as conn:
eq_(conn._execution_options['foo'], 'bar')
eq_(conn.execution_options(bat='hoho')._execution_options['foo'
], 'bar')
eq_(conn.execution_options(bat='hoho')._execution_options['bat'
], 'hoho')
eq_(conn.execution_options(foo='hoho')._execution_options['foo'
], 'hoho')
eng.update_execution_options(foo='hoho')
conn = eng.contextual_connect()
eq_(conn._execution_options['foo'], 'hoho')
@testing.requires.ad_hoc_engines
def test_generative_engine_execution_options(self):
eng = engines.testing_engine(options={'execution_options':
{'base': 'x1'}})
eng1 = eng.execution_options(foo="b1")
eng2 = eng.execution_options(foo="b2")
eng1a = eng1.execution_options(bar="a1")
eng2a = eng2.execution_options(foo="b3", bar="a2")
eq_(eng._execution_options,
{'base': 'x1'})
eq_(eng1._execution_options,
{'base': 'x1', 'foo': 'b1'})
eq_(eng2._execution_options,
{'base': 'x1', 'foo': 'b2'})
eq_(eng1a._execution_options,
{'base': 'x1', 'foo': 'b1', 'bar': 'a1'})
eq_(eng2a._execution_options,
{'base': 'x1', 'foo': 'b3', 'bar': 'a2'})
is_(eng1a.pool, eng.pool)
# test pool is shared
eng2.dispose()
is_(eng1a.pool, eng2.pool)
is_(eng.pool, eng2.pool)
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch(self):
canary = []
def l1(*arg, **kw):
canary.append("l1")
def l2(*arg, **kw):
canary.append("l2")
def l3(*arg, **kw):
canary.append("l3")
eng = engines.testing_engine(options={'execution_options':
{'base': 'x1'}})
event.listen(eng, "before_execute", l1)
eng1 = eng.execution_options(foo="b1")
event.listen(eng, "before_execute", l2)
event.listen(eng1, "before_execute", l3)
eng.execute(select([1])).close()
eng1.execute(select([1])).close()
eq_(canary, ["l1", "l2", "l3", "l1", "l2"])
@testing.requires.ad_hoc_engines
def test_generative_engine_event_dispatch_hasevents(self):
def l1(*arg, **kw):
pass
eng = create_engine(testing.db.url)
assert not eng._has_events
event.listen(eng, "before_execute", l1)
eng2 = eng.execution_options(foo='bar')
assert eng2._has_events
def test_unicode_test_fails_warning(self):
class MockCursor(engines.DBAPIProxyCursor):
def execute(self, stmt, params=None, **kw):
if "test unicode returns" in stmt:
raise self.engine.dialect.dbapi.DatabaseError("boom")
else:
return super(MockCursor, self).execute(stmt, params, **kw)
eng = engines.proxying_engine(cursor_cls=MockCursor)
assert_raises_message(
tsa.exc.SAWarning,
"Exception attempting to detect unicode returns",
eng.connect
)
assert eng.dialect.returns_unicode_strings in (True, False)
eng.dispose()
def test_works_after_dispose(self):
eng = create_engine(testing.db.url)
for i in range(3):
eq_(eng.scalar(select([1])), 1)
eng.dispose()
def test_works_after_dispose_testing_engine(self):
eng = engines.testing_engine()
for i in range(3):
eq_(eng.scalar(select([1])), 1)
eng.dispose()
class ConvenienceExecuteTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.table = Table('exec_test', metadata,
Column('a', Integer),
Column('b', Integer),
test_needs_acid=True
)
def _trans_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
return go
def _trans_rollback_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
raise Exception("breakage")
return go
def _assert_no_data(self):
eq_(
testing.db.scalar(self.table.count()), 0
)
def _assert_fn(self, x, value=None):
eq_(
testing.db.execute(self.table.select()).fetchall(),
[(x, value)]
)
def test_transaction_engine_ctx_commit(self):
fn = self._trans_fn()
ctx = testing.db.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_engine_ctx_begin_fails(self):
engine = engines.testing_engine()
mock_connection = Mock(
return_value=Mock(
begin=Mock(side_effect=Exception("boom"))
)
)
engine._connection_cls = mock_connection
assert_raises(
Exception,
engine.begin
)
eq_(
mock_connection.return_value.close.mock_calls,
[call()]
)
def test_transaction_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
ctx = testing.db.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager, ctx, fn, 5, value=8
)
self._assert_no_data()
def test_transaction_tlocal_engine_ctx_commit(self):
fn = self._trans_fn()
engine = engines.testing_engine(options=dict(
strategy='threadlocal',
pool=testing.db.pool))
ctx = engine.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_tlocal_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
engine = engines.testing_engine(options=dict(
strategy='threadlocal',
pool=testing.db.pool))
ctx = engine.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager, ctx, fn, 5, value=8
)
self._assert_no_data()
def test_transaction_connection_ctx_commit(self):
fn = self._trans_fn(True)
conn = testing.db.connect()
ctx = conn.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_connection_ctx_rollback(self):
fn = self._trans_rollback_fn(True)
conn = testing.db.connect()
ctx = conn.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager, ctx, fn, 5, value=8
)
self._assert_no_data()
def test_connection_as_ctx(self):
fn = self._trans_fn()
ctx = testing.db.connect()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
# autocommit is on
self._assert_fn(5, value=8)
@testing.fails_on('mysql+oursql', "oursql bug ? getting wrong rowcount")
def test_connect_as_ctx_noautocommit(self):
fn = self._trans_fn()
self._assert_no_data()
ctx = testing.db.connect().execution_options(autocommit=False)
testing.run_as_contextmanager(ctx, fn, 5, value=8)
# autocommit is off
self._assert_no_data()
def test_transaction_engine_fn_commit(self):
fn = self._trans_fn()
testing.db.transaction(fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_engine_fn_rollback(self):
fn = self._trans_rollback_fn()
assert_raises_message(
Exception,
"breakage",
testing.db.transaction, fn, 5, value=8
)
self._assert_no_data()
def test_transaction_connection_fn_commit(self):
fn = self._trans_fn()
conn = testing.db.connect()
conn.transaction(fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_connection_fn_rollback(self):
fn = self._trans_rollback_fn()
conn = testing.db.connect()
assert_raises(
Exception,
conn.transaction, fn, 5, value=8
)
self._assert_no_data()
class CompiledCacheTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, metadata
metadata = MetaData(testing.db)
users = Table('users', metadata,
Column('user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
)
metadata.create_all()
@engines.close_first
def teardown(self):
testing.db.execute(users.delete())
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_cache(self):
conn = testing.db.connect()
cache = {}
cached_conn = conn.execution_options(compiled_cache=cache)
ins = users.insert()
cached_conn.execute(ins, {'user_name':'u1'})
cached_conn.execute(ins, {'user_name':'u2'})
cached_conn.execute(ins, {'user_name':'u3'})
assert len(cache) == 1
eq_(conn.execute("select count(*) from users").scalar(), 3)
class MockStrategyTest(fixtures.TestBase):
def _engine_fixture(self):
buf = util.StringIO()
def dump(sql, *multiparams, **params):
buf.write(util.text_type(sql.compile(dialect=engine.dialect)))
engine = create_engine('postgresql://', strategy='mock', executor=dump)
return engine, buf
def test_sequence_not_duped(self):
engine, buf = self._engine_fixture()
metadata = MetaData()
t = Table('testtable', metadata,
Column('pk', Integer, Sequence('testtable_pk_seq'), primary_key=True)
)
t.create(engine)
t.drop(engine)
eq_(
re.findall(r'CREATE (\w+)', buf.getvalue()),
["SEQUENCE", "TABLE"]
)
eq_(
re.findall(r'DROP (\w+)', buf.getvalue()),
["SEQUENCE", "TABLE"]
)
class ResultProxyTest(fixtures.TestBase):
__backend__ = True
def test_nontuple_row(self):
"""ensure the C version of BaseRowProxy handles
duck-type-dependent rows."""
from sqlalchemy.engine import RowProxy
class MyList(object):
def __init__(self, l):
self.l = l
def __len__(self):
return len(self.l)
def __getitem__(self, i):
return list.__getitem__(self.l, i)
proxy = RowProxy(object(), MyList(['value']), [None], {'key'
: (None, None, 0), 0: (None, None, 0)})
eq_(list(proxy), ['value'])
eq_(proxy[0], 'value')
eq_(proxy['key'], 'value')
@testing.provide_metadata
def test_no_rowcount_on_selects_inserts(self):
"""assert that rowcount is only called on deletes and updates.
This because cursor.rowcount may can be expensive on some dialects
such as Firebird, however many dialects require it be called
before the cursor is closed.
"""
metadata = self.metadata
engine = engines.testing_engine()
t = Table('t1', metadata,
Column('data', String(10))
)
metadata.create_all(engine)
with patch.object(engine.dialect.execution_ctx_cls, "rowcount") as mock_rowcount:
mock_rowcount.__get__ = Mock()
engine.execute(t.insert(),
{'data': 'd1'},
{'data': 'd2'},
{'data': 'd3'})
eq_(len(mock_rowcount.__get__.mock_calls), 0)
eq_(
engine.execute(t.select()).fetchall(),
[('d1', ), ('d2', ), ('d3', )]
)
eq_(len(mock_rowcount.__get__.mock_calls), 0)
engine.execute(t.update(), {'data': 'd4'})
eq_(len(mock_rowcount.__get__.mock_calls), 1)
engine.execute(t.delete())
eq_(len(mock_rowcount.__get__.mock_calls), 2)
def test_rowproxy_is_sequence(self):
import collections
from sqlalchemy.engine import RowProxy
row = RowProxy(object(), ['value'], [None], {'key'
: (None, None, 0), 0: (None, None, 0)})
assert isinstance(row, collections.Sequence)
@testing.requires.cextensions
def test_row_c_sequence_check(self):
import csv
import collections
metadata = MetaData()
metadata.bind = 'sqlite://'
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(40)),
)
users.create()
users.insert().execute(name='Test')
row = users.select().execute().fetchone()
s = util.StringIO()
writer = csv.writer(s)
# csv performs PySequenceCheck call
writer.writerow(row)
assert s.getvalue().strip() == '1,Test'
@testing.requires.selectone
def test_empty_accessors(self):
statements = [
(
"select 1",
[
lambda r: r.last_inserted_params(),
lambda r: r.last_updated_params(),
lambda r: r.prefetch_cols(),
lambda r: r.postfetch_cols(),
lambda r : r.inserted_primary_key
],
"Statement is not a compiled expression construct."
),
(
select([1]),
[
lambda r: r.last_inserted_params(),
lambda r : r.inserted_primary_key
],
r"Statement is not an insert\(\) expression construct."
),
(
select([1]),
[
lambda r: r.last_updated_params(),
],
r"Statement is not an update\(\) expression construct."
),
(
select([1]),
[
lambda r: r.prefetch_cols(),
lambda r : r.postfetch_cols()
],
r"Statement is not an insert\(\) "
r"or update\(\) expression construct."
),
]
for stmt, meths, msg in statements:
r = testing.db.execute(stmt)
try:
for meth in meths:
assert_raises_message(
tsa.exc.InvalidRequestError,
msg,
meth, r
)
finally:
r.close()
class ExecutionOptionsTest(fixtures.TestBase):
def test_dialect_conn_options(self):
engine = testing_engine("sqlite://", options=dict(_initialize=False))
engine.dialect = Mock()
conn = engine.connect()
c2 = conn.execution_options(foo="bar")
eq_(
engine.dialect.set_connection_execution_options.mock_calls,
[call(c2, {"foo": "bar"})]
)
def test_dialect_engine_options(self):
engine = testing_engine("sqlite://")
engine.dialect = Mock()
e2 = engine.execution_options(foo="bar")
eq_(
engine.dialect.set_engine_execution_options.mock_calls,
[call(e2, {"foo": "bar"})]
)
def test_dialect_engine_construction_options(self):
dialect = Mock()
engine = Engine(Mock(), dialect, Mock(),
execution_options={"foo": "bar"})
eq_(
dialect.set_engine_execution_options.mock_calls,
[call(engine, {"foo": "bar"})]
)
def test_propagate_engine_to_connection(self):
engine = testing_engine("sqlite://",
options=dict(execution_options={"foo": "bar"}))
conn = engine.connect()
eq_(conn._execution_options, {"foo": "bar"})
def test_propagate_option_engine_to_connection(self):
e1 = testing_engine("sqlite://",
options=dict(execution_options={"foo": "bar"}))
e2 = e1.execution_options(bat="hoho")
c1 = e1.connect()
c2 = e2.connect()
eq_(c1._execution_options, {"foo": "bar"})
eq_(c2._execution_options, {"foo": "bar", "bat": "hoho"})
class AlternateResultProxyTest(fixtures.TestBase):
__requires__ = ('sqlite', )
@classmethod
def setup_class(cls):
from sqlalchemy.engine import base, default
cls.engine = engine = testing_engine('sqlite://')
m = MetaData()
cls.table = t = Table('test', m,
Column('x', Integer, primary_key=True),
Column('y', String(50, convert_unicode='force'))
)
m.create_all(engine)
engine.execute(t.insert(), [
{'x':i, 'y':"t_%d" % i} for i in range(1, 12)
])
def _test_proxy(self, cls):
class ExcCtx(default.DefaultExecutionContext):
def get_result_proxy(self):
return cls(self)
self.engine.dialect.execution_ctx_cls = ExcCtx
rows = []
r = self.engine.execute(select([self.table]))
assert isinstance(r, cls)
for i in range(5):
rows.append(r.fetchone())
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
rows = r.fetchmany(3)
eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
rows = r.fetchall()
eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
r = self.engine.execute(select([self.table]))
rows = r.fetchmany(None)
eq_(rows[0], (1, "t_1"))
# number of rows here could be one, or the whole thing
assert len(rows) == 1 or len(rows) == 11
r = self.engine.execute(select([self.table]).limit(1))
r.fetchone()
eq_(r.fetchone(), None)
r = self.engine.execute(select([self.table]).limit(5))
rows = r.fetchmany(6)
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
def test_plain(self):
self._test_proxy(_result.ResultProxy)
def test_buffered_row_result_proxy(self):
self._test_proxy(_result.BufferedRowResultProxy)
def test_fully_buffered_result_proxy(self):
self._test_proxy(_result.FullyBufferedResultProxy)
def test_buffered_column_result_proxy(self):
self._test_proxy(_result.BufferedColumnResultProxy)
class EngineEventsTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
__backend__ = True
def tearDown(self):
Engine.dispatch._clear()
Engine._has_events = False
def _assert_stmts(self, expected, received):
orig = list(received)
for stmt, params, posn in expected:
if not received:
assert False, "Nothing available for stmt: %s" % stmt
while received:
teststmt, testparams, testmultiparams = \
received.pop(0)
teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ',
teststmt).strip()
if teststmt.startswith(stmt) and (testparams
== params or testparams == posn):
break
def test_per_engine_independence(self):
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
canary = Mock()
event.listen(e1, "before_execute", canary)
s1 = select([1])
s2 = select([2])
e1.execute(s1)
e2.execute(s2)
eq_(
[arg[1][1] for arg in canary.mock_calls], [s1]
)
event.listen(e2, "before_execute", canary)
e1.execute(s1)
e2.execute(s2)
eq_([arg[1][1] for arg in canary.mock_calls], [s1, s1, s2])
def test_per_engine_plus_global(self):
canary = Mock()
event.listen(Engine, "before_execute", canary.be1)
e1 = testing_engine(config.db_url)
e2 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be2)
event.listen(Engine, "before_execute", canary.be3)
e1.connect()
e2.connect()
e1.execute(select([1]))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
e2.execute(select([1]))
eq_(canary.be1.call_count, 2)
eq_(canary.be2.call_count, 1)
eq_(canary.be3.call_count, 2)
def test_per_connection_plus_engine(self):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_execute", canary.be1)
conn = e1.connect()
event.listen(conn, "before_execute", canary.be2)
conn.execute(select([1]))
eq_(canary.be1.call_count, 1)
eq_(canary.be2.call_count, 1)
conn._branch().execute(select([1]))
eq_(canary.be1.call_count, 2)
eq_(canary.be2.call_count, 2)
def test_add_event_after_connect(self):
# new feature as of #2978
canary = Mock()
e1 = create_engine(config.db_url)
assert not e1._has_events
conn = e1.connect()
event.listen(e1, "before_execute", canary.be1)
conn.execute(select([1]))
eq_(canary.be1.call_count, 1)
conn._branch().execute(select([1]))
eq_(canary.be1.call_count, 2)
def test_force_conn_events_false(self):
canary = Mock()
e1 = create_engine(config.db_url)
assert not e1._has_events
event.listen(e1, "before_execute", canary.be1)
conn = e1._connection_cls(e1, connection=e1.raw_connection(),
_has_events=False)
conn.execute(select([1]))
eq_(canary.be1.call_count, 0)
conn._branch().execute(select([1]))
eq_(canary.be1.call_count, 0)
def test_cursor_events_ctx_execute_scalar(self):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select([1]).compile(dialect=e1.dialect))
with e1.connect() as conn:
dialect = conn.dialect
ctx = dialect.execution_ctx_cls._init_statement(
dialect, conn, conn.connection, stmt, {})
ctx._execute_scalar(stmt, Integer())
eq_(canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
eq_(canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
def test_cursor_events_execute(self):
canary = Mock()
e1 = testing_engine(config.db_url)
event.listen(e1, "before_cursor_execute", canary.bce)
event.listen(e1, "after_cursor_execute", canary.ace)
stmt = str(select([1]).compile(dialect=e1.dialect))
with e1.connect() as conn:
result = conn.execute(stmt)
ctx = result.context
eq_(canary.bce.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
eq_(canary.ace.mock_calls,
[call(conn, ctx.cursor, stmt, ctx.parameters[0], ctx, False)])
def test_argument_format_execute(self):
def before_execute(conn, clauseelement, multiparams, params):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, dict)
def after_execute(conn, clauseelement, multiparams, params, result):
assert isinstance(multiparams, (list, tuple))
assert isinstance(params, dict)
e1 = testing_engine(config.db_url)
event.listen(e1, 'before_execute', before_execute)
event.listen(e1, 'after_execute', after_execute)
e1.execute(select([1]))
e1.execute(select([1]).compile(dialect=e1.dialect).statement)
e1.execute(select([1]).compile(dialect=e1.dialect))
e1._execute_compiled(select([1]).compile(dialect=e1.dialect), (), {})
@testing.fails_on('firebird', 'Data type unknown')
def test_execute_events(self):
stmts = []
cursor_stmts = []
def execute(conn, clauseelement, multiparams,
params ):
stmts.append((str(clauseelement), params, multiparams))
def cursor_execute(conn, cursor, statement, parameters,
context, executemany):
cursor_stmts.append((str(statement), parameters, None))
for engine in [
engines.testing_engine(options=dict(implicit_returning=False)),
engines.testing_engine(options=dict(implicit_returning=False,
strategy='threadlocal')),
engines.testing_engine(options=dict(implicit_returning=False)).\
connect()
]:
event.listen(engine, 'before_execute', execute)
event.listen(engine, 'before_cursor_execute', cursor_execute)
m = MetaData(engine)
t1 = Table('t1', m,
Column('c1', Integer, primary_key=True),
Column('c2', String(50), default=func.lower('Foo'),
primary_key=True)
)
m.create_all()
try:
t1.insert().execute(c1=5, c2='some data')
t1.insert().execute(c1=6)
eq_(engine.execute('select * from t1').fetchall(), [(5,
'some data'), (6, 'foo')])
finally:
m.drop_all()
compiled = [('CREATE TABLE t1', {}, None),
('INSERT INTO t1 (c1, c2)',
{'c2': 'some data', 'c1': 5}, None),
('INSERT INTO t1 (c1, c2)',
{'c1': 6}, None),
('select * from t1', {}, None),
('DROP TABLE t1', {}, None)]
# or engine.dialect.preexecute_pk_sequences:
if not testing.against('oracle+zxjdbc'):
cursor = [
('CREATE TABLE t1', {}, ()),
('INSERT INTO t1 (c1, c2)', {
'c2': 'some data', 'c1': 5},
(5, 'some data')),
('SELECT lower', {'lower_2': 'Foo'},
('Foo', )),
('INSERT INTO t1 (c1, c2)',
{'c2': 'foo', 'c1': 6},
(6, 'foo')),
('select * from t1', {}, ()),
('DROP TABLE t1', {}, ()),
]
else:
insert2_params = 6, 'Foo'
if testing.against('oracle+zxjdbc'):
insert2_params += (ReturningParam(12), )
cursor = [('CREATE TABLE t1', {}, ()),
('INSERT INTO t1 (c1, c2)',
{'c2': 'some data', 'c1': 5}, (5, 'some data')),
('INSERT INTO t1 (c1, c2)', {'c1': 6,
'lower_2': 'Foo'}, insert2_params),
('select * from t1', {}, ()),
('DROP TABLE t1', {}, ())]
# bind param name 'lower_2' might
# be incorrect
self._assert_stmts(compiled, stmts)
self._assert_stmts(cursor, cursor_stmts)
def test_options(self):
canary = []
def execute(conn, *args, **kw):
canary.append('execute')
def cursor_execute(conn, *args, **kw):
canary.append('cursor_execute')
engine = engines.testing_engine()
event.listen(engine, 'before_execute', execute)
event.listen(engine, 'before_cursor_execute', cursor_execute)
conn = engine.connect()
c2 = conn.execution_options(foo='bar')
eq_(c2._execution_options, {'foo':'bar'})
c2.execute(select([1]))
c3 = c2.execution_options(bar='bat')
eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'})
eq_(canary, ['execute', 'cursor_execute'])
def test_retval_flag(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
def execute(conn, clauseelement, multiparams, params):
canary.append('execute')
return clauseelement, multiparams, params
def cursor_execute(conn, cursor, statement,
parameters, context, executemany):
canary.append('cursor_execute')
return statement, parameters
engine = engines.testing_engine()
assert_raises(
tsa.exc.ArgumentError,
event.listen, engine, "begin", tracker("begin"), retval=True
)
event.listen(engine, "before_execute", execute, retval=True)
event.listen(engine, "before_cursor_execute", cursor_execute, retval=True)
engine.execute(select([1]))
eq_(
canary, ['execute', 'cursor_execute']
)
def test_engine_connect(self):
engine = engines.testing_engine()
tracker = Mock()
event.listen(engine, "engine_connect", tracker)
c1 = engine.connect()
c2 = c1._branch()
c1.close()
eq_(
tracker.mock_calls,
[call(c1, False), call(c2, True)]
)
def test_execution_options(self):
engine = engines.testing_engine()
engine_tracker = Mock()
conn_tracker = Mock()
event.listen(engine, "set_engine_execution_options", engine_tracker)
event.listen(engine, "set_connection_execution_options", conn_tracker)
e2 = engine.execution_options(e1='opt_e1')
c1 = engine.connect()
c2 = c1.execution_options(c1='opt_c1')
c3 = e2.connect()
c4 = c3.execution_options(c3='opt_c3')
eq_(
engine_tracker.mock_calls,
[call(e2, {'e1': 'opt_e1'})]
)
eq_(
conn_tracker.mock_calls,
[call(c2, {"c1": "opt_c1"}), call(c4, {"c3": "opt_c3"})]
)
@testing.requires.sequences
@testing.provide_metadata
def test_cursor_execute(self):
canary = []
def tracker(name):
def go(conn, cursor, statement, parameters, context, executemany):
canary.append((statement, context))
return go
engine = engines.testing_engine()
t = Table('t', self.metadata,
Column('x', Integer, Sequence('t_id_seq'), primary_key=True),
implicit_returning=False
)
self.metadata.create_all(engine)
with engine.begin() as conn:
event.listen(conn, 'before_cursor_execute', tracker('cursor_execute'))
conn.execute(t.insert())
# we see the sequence pre-executed in the first call
assert "t_id_seq" in canary[0][0]
assert "INSERT" in canary[1][0]
# same context
is_(
canary[0][1], canary[1][1]
)
def test_transactional(self):
canary = []
def tracker(name):
def go(conn, *args, **kw):
canary.append(name)
return go
engine = engines.testing_engine()
event.listen(engine, 'before_execute', tracker('execute'))
event.listen(engine, 'before_cursor_execute', tracker('cursor_execute'))
event.listen(engine, 'begin', tracker('begin'))
event.listen(engine, 'commit', tracker('commit'))
event.listen(engine, 'rollback', tracker('rollback'))
conn = engine.connect()
trans = conn.begin()
conn.execute(select([1]))
trans.rollback()
trans = conn.begin()
conn.execute(select([1]))
trans.commit()
eq_(canary, [
'begin', 'execute', 'cursor_execute', 'rollback',
'begin', 'execute', 'cursor_execute', 'commit',
])
@testing.requires.savepoints
@testing.requires.two_phase_transactions
def test_transactional_advanced(self):
canary1 = []
def tracker1(name):
def go(*args, **kw):
canary1.append(name)
return go
canary2 = []
def tracker2(name):
def go(*args, **kw):
canary2.append(name)
return go
engine = engines.testing_engine()
for name in ['begin', 'savepoint',
'rollback_savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']:
event.listen(engine, '%s' % name, tracker1(name))
conn = engine.connect()
for name in ['begin', 'savepoint',
'rollback_savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']:
event.listen(conn, '%s' % name, tracker2(name))
trans = conn.begin()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.rollback()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.commit()
trans.rollback()
trans = conn.begin_twophase()
conn.execute(select([1]))
trans.prepare()
trans.commit()
eq_(canary1, ['begin', 'savepoint',
'rollback_savepoint', 'savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']
)
eq_(canary2, ['begin', 'savepoint',
'rollback_savepoint', 'savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']
)
class HandleErrorTest(fixtures.TestBase):
__requires__ = 'ad_hoc_engines',
__backend__ = True
def tearDown(self):
Engine.dispatch._clear()
Engine._has_events = False
def test_legacy_dbapi_error(self):
engine = engines.testing_engine()
canary = Mock()
event.listen(engine, "dbapi_error", canary)
with engine.connect() as conn:
try:
conn.execute("SELECT FOO FROM I_DONT_EXIST")
assert False
except tsa.exc.DBAPIError as e:
eq_(canary.mock_calls[0][1][5], e.orig)
eq_(canary.mock_calls[0][1][2], "SELECT FOO FROM I_DONT_EXIST")
def test_legacy_dbapi_error_no_ad_hoc_context(self):
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, 'dbapi_error', listener)
nope = Exception("nope")
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise nope
with engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
conn.execute,
select([1]).where(
column('foo') == literal('bar', MyType()))
)
# no legacy event
eq_(listener.mock_calls, [])
def test_legacy_dbapi_error_non_dbapi_error(self):
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, 'dbapi_error', listener)
nope = TypeError("I'm not a DBAPI error")
with engine.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=nope
))
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.execute, "select "
)
# no legacy event
eq_(listener.mock_calls, [])
def test_handle_error(self):
engine = engines.testing_engine()
canary = Mock(return_value=None)
event.listen(engine, "handle_error", canary)
with engine.connect() as conn:
try:
conn.execute("SELECT FOO FROM I_DONT_EXIST")
assert False
except tsa.exc.DBAPIError as e:
ctx = canary.mock_calls[0][1][0]
eq_(ctx.original_exception, e.orig)
is_(ctx.sqlalchemy_exception, e)
eq_(ctx.statement, "SELECT FOO FROM I_DONT_EXIST")
def test_exception_event_reraise(self):
engine = engines.testing_engine()
class MyException(Exception):
pass
@event.listens_for(engine, 'handle_error', retval=True)
def err(context):
stmt = context.statement
exception = context.original_exception
if "ERROR ONE" in str(stmt):
return MyException("my exception")
elif "ERROR TWO" in str(stmt):
return exception
else:
return None
conn = engine.connect()
# case 1: custom exception
assert_raises_message(
MyException,
"my exception",
conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
)
# case 2: return the DBAPI exception we're given;
# no wrapping should occur
assert_raises(
conn.dialect.dbapi.Error,
conn.execute, "SELECT 'ERROR TWO' FROM I_DONT_EXIST"
)
# case 3: normal wrapping
assert_raises(
tsa.exc.DBAPIError,
conn.execute, "SELECT 'ERROR THREE' FROM I_DONT_EXIST"
)
def test_exception_event_reraise_chaining(self):
engine = engines.testing_engine()
class MyException1(Exception):
pass
class MyException2(Exception):
pass
class MyException3(Exception):
pass
@event.listens_for(engine, 'handle_error', retval=True)
def err1(context):
stmt = context.statement
if "ERROR ONE" in str(stmt) or "ERROR TWO" in str(stmt) \
or "ERROR THREE" in str(stmt):
return MyException1("my exception")
elif "ERROR FOUR" in str(stmt):
raise MyException3("my exception short circuit")
@event.listens_for(engine, 'handle_error', retval=True)
def err2(context):
stmt = context.statement
if ("ERROR ONE" in str(stmt) or "ERROR FOUR" in str(stmt)) \
and isinstance(context.chained_exception, MyException1):
raise MyException2("my exception chained")
elif "ERROR TWO" in str(stmt):
return context.chained_exception
else:
return None
conn = engine.connect()
with patch.object(engine.
dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
assert_raises_message(
MyException2,
"my exception chained",
conn.execute, "SELECT 'ERROR ONE' FROM I_DONT_EXIST"
)
eq_(patched.call_count, 1)
with patch.object(engine.
dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
assert_raises(
MyException1,
conn.execute, "SELECT 'ERROR TWO' FROM I_DONT_EXIST"
)
eq_(patched.call_count, 1)
with patch.object(engine.
dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
# test that non None from err1 isn't cancelled out
# by err2
assert_raises(
MyException1,
conn.execute, "SELECT 'ERROR THREE' FROM I_DONT_EXIST"
)
eq_(patched.call_count, 1)
with patch.object(engine.
dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
assert_raises(
tsa.exc.DBAPIError,
conn.execute, "SELECT 'ERROR FIVE' FROM I_DONT_EXIST"
)
eq_(patched.call_count, 1)
with patch.object(engine.
dialect.execution_ctx_cls,
"handle_dbapi_exception") as patched:
assert_raises_message(
MyException3,
"my exception short circuit",
conn.execute, "SELECT 'ERROR FOUR' FROM I_DONT_EXIST"
)
eq_(patched.call_count, 1)
def test_exception_event_ad_hoc_context(self):
"""test that handle_error is called with a context in
cases where _handle_dbapi_error() is normally called without
any context.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, 'handle_error', listener)
nope = Exception("nope")
class MyType(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
raise nope
with engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"nope \(original cause: Exception: nope\) u?'SELECT 1 ",
conn.execute,
select([1]).where(
column('foo') == literal('bar', MyType()))
)
ctx = listener.mock_calls[0][1][0]
assert ctx.statement.startswith("SELECT 1 ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def test_exception_event_non_dbapi_error(self):
"""test that dbapi_error is called with a context in
cases where DBAPI raises an exception that is not a DBAPI
exception, e.g. internal errors or encoding problems.
"""
engine = engines.testing_engine()
listener = Mock(return_value=None)
event.listen(engine, 'handle_error', listener)
nope = TypeError("I'm not a DBAPI error")
with engine.connect() as c:
c.connection.cursor = Mock(
return_value=Mock(
execute=Mock(
side_effect=nope
))
)
assert_raises_message(
TypeError,
"I'm not a DBAPI error",
c.execute, "select "
)
ctx = listener.mock_calls[0][1][0]
eq_(ctx.statement, "select ")
is_(ctx.is_disconnect, False)
is_(ctx.original_exception, nope)
def _test_alter_disconnect(self, orig_error, evt_value):
engine = engines.testing_engine()
@event.listens_for(engine, "handle_error")
def evt(ctx):
ctx.is_disconnect = evt_value
with patch.object(engine.dialect, "is_disconnect",
Mock(return_value=orig_error)):
with engine.connect() as c:
try:
c.execute("SELECT x FROM nonexistent")
assert False
except tsa.exc.StatementError as st:
eq_(st.connection_invalidated, evt_value)
def test_alter_disconnect_to_true(self):
self._test_alter_disconnect(False, True)
self._test_alter_disconnect(True, True)
def test_alter_disconnect_to_false(self):
self._test_alter_disconnect(True, False)
self._test_alter_disconnect(False, False)
class ProxyConnectionTest(fixtures.TestBase):
"""These are the same tests as EngineEventsTest, except using
the deprecated ConnectionProxy interface.
"""
__requires__ = 'ad_hoc_engines',
__prefer_requires__ = 'two_phase_transactions',
@testing.uses_deprecated(r'.*Use event.listen')
@testing.fails_on('firebird', 'Data type unknown')
def test_proxy(self):
stmts = []
cursor_stmts = []
class MyProxy(ConnectionProxy):
def execute(
self,
conn,
execute,
clauseelement,
*multiparams,
**params
):
stmts.append((str(clauseelement), params, multiparams))
return execute(clauseelement, *multiparams, **params)
def cursor_execute(
self,
execute,
cursor,
statement,
parameters,
context,
executemany,
):
cursor_stmts.append((str(statement), parameters, None))
return execute(cursor, statement, parameters, context)
def assert_stmts(expected, received):
for stmt, params, posn in expected:
if not received:
assert False, "Nothing available for stmt: %s" % stmt
while received:
teststmt, testparams, testmultiparams = \
received.pop(0)
teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ',
teststmt).strip()
if teststmt.startswith(stmt) and (testparams
== params or testparams == posn):
break
for engine in \
engines.testing_engine(options=dict(implicit_returning=False,
proxy=MyProxy())), \
engines.testing_engine(options=dict(implicit_returning=False,
proxy=MyProxy(),
strategy='threadlocal')):
m = MetaData(engine)
t1 = Table('t1', m,
Column('c1', Integer, primary_key=True),
Column('c2', String(50), default=func.lower('Foo'),
primary_key=True)
)
m.create_all()
try:
t1.insert().execute(c1=5, c2='some data')
t1.insert().execute(c1=6)
eq_(engine.execute('select * from t1').fetchall(), [(5,
'some data'), (6, 'foo')])
finally:
m.drop_all()
engine.dispose()
compiled = [('CREATE TABLE t1', {}, None),
('INSERT INTO t1 (c1, c2)', {'c2': 'some data',
'c1': 5}, None), ('INSERT INTO t1 (c1, c2)',
{'c1': 6}, None), ('select * from t1', {},
None), ('DROP TABLE t1', {}, None)]
if not testing.against('oracle+zxjdbc'): # or engine.dialect.pr
# eexecute_pk_sequence
# s:
cursor = [
('CREATE TABLE t1', {}, ()),
('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1'
: 5}, (5, 'some data')),
('SELECT lower', {'lower_2': 'Foo'},
('Foo', )),
('INSERT INTO t1 (c1, c2)', {'c2': 'foo', 'c1': 6},
(6, 'foo')),
('select * from t1', {}, ()),
('DROP TABLE t1', {}, ()),
]
else:
insert2_params = 6, 'Foo'
if testing.against('oracle+zxjdbc'):
insert2_params += (ReturningParam(12), )
cursor = [('CREATE TABLE t1', {}, ()),
('INSERT INTO t1 (c1, c2)', {'c2': 'some data'
, 'c1': 5}, (5, 'some data')),
('INSERT INTO t1 (c1, c2)', {'c1': 6,
'lower_2': 'Foo'}, insert2_params),
('select * from t1', {}, ()), ('DROP TABLE t1'
, {}, ())] # bind param name 'lower_2' might
# be incorrect
assert_stmts(compiled, stmts)
assert_stmts(cursor, cursor_stmts)
@testing.uses_deprecated(r'.*Use event.listen')
def test_options(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
engine = engines.testing_engine(options={'proxy':TrackProxy()})
conn = engine.connect()
c2 = conn.execution_options(foo='bar')
eq_(c2._execution_options, {'foo':'bar'})
c2.execute(select([1]))
c3 = c2.execution_options(bar='bat')
eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'})
eq_(canary, ['execute', 'cursor_execute'])
@testing.uses_deprecated(r'.*Use event.listen')
def test_transactional(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
engine = engines.testing_engine(options={'proxy':TrackProxy()})
conn = engine.connect()
trans = conn.begin()
conn.execute(select([1]))
trans.rollback()
trans = conn.begin()
conn.execute(select([1]))
trans.commit()
eq_(canary, [
'begin', 'execute', 'cursor_execute', 'rollback',
'begin', 'execute', 'cursor_execute', 'commit',
])
@testing.uses_deprecated(r'.*Use event.listen')
@testing.requires.savepoints
@testing.requires.two_phase_transactions
def test_transactional_advanced(self):
canary = []
class TrackProxy(ConnectionProxy):
def __getattribute__(self, key):
fn = object.__getattribute__(self, key)
def go(*arg, **kw):
canary.append(fn.__name__)
return fn(*arg, **kw)
return go
engine = engines.testing_engine(options={'proxy':TrackProxy()})
conn = engine.connect()
trans = conn.begin()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.rollback()
trans2 = conn.begin_nested()
conn.execute(select([1]))
trans2.commit()
trans.rollback()
trans = conn.begin_twophase()
conn.execute(select([1]))
trans.prepare()
trans.commit()
canary = [t for t in canary if t not in ('cursor_execute', 'execute')]
eq_(canary, ['begin', 'savepoint',
'rollback_savepoint', 'savepoint', 'release_savepoint',
'rollback', 'begin_twophase',
'prepare_twophase', 'commit_twophase']
)
class DialectEventTest(fixtures.TestBase):
@contextmanager
def _run_test(self, retval):
m1 = Mock()
m1.do_execute.return_value = retval
m1.do_executemany.return_value = retval
m1.do_execute_no_params.return_value = retval
e = engines.testing_engine(options={"_initialize": False})
event.listen(e, "do_execute", m1.do_execute)
event.listen(e, "do_executemany", m1.do_executemany)
event.listen(e, "do_execute_no_params", m1.do_execute_no_params)
e.dialect.do_execute = m1.real_do_execute
e.dialect.do_executemany = m1.real_do_executemany
e.dialect.do_execute_no_params = m1.real_do_execute_no_params
def mock_the_cursor(cursor, *arg):
arg[-1].get_result_proxy = Mock(return_value=Mock(context=arg[-1]))
return retval
m1.real_do_execute.side_effect = m1.do_execute.side_effect = mock_the_cursor
m1.real_do_executemany.side_effect = m1.do_executemany.side_effect = mock_the_cursor
m1.real_do_execute_no_params.side_effect = m1.do_execute_no_params.side_effect = mock_the_cursor
with e.connect() as conn:
yield conn, m1
def _assert(self, retval, m1, m2, mock_calls):
eq_(m1.mock_calls, mock_calls)
if retval:
eq_(m2.mock_calls, [])
else:
eq_(m2.mock_calls, mock_calls)
def _test_do_execute(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.execute("insert into table foo", {"foo": "bar"})
self._assert(
retval,
m1.do_execute, m1.real_do_execute,
[call(
result.context.cursor,
"insert into table foo",
{"foo": "bar"}, result.context)]
)
def _test_do_executemany(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.execute("insert into table foo",
[{"foo": "bar"}, {"foo": "bar"}])
self._assert(
retval,
m1.do_executemany, m1.real_do_executemany,
[call(
result.context.cursor,
"insert into table foo",
[{"foo": "bar"}, {"foo": "bar"}], result.context)]
)
def _test_do_execute_no_params(self, retval):
with self._run_test(retval) as (conn, m1):
result = conn.execution_options(no_parameters=True).\
execute("insert into table foo")
self._assert(
retval,
m1.do_execute_no_params, m1.real_do_execute_no_params,
[call(
result.context.cursor,
"insert into table foo", result.context)]
)
def _test_cursor_execute(self, retval):
with self._run_test(retval) as (conn, m1):
dialect = conn.dialect
stmt = "insert into table foo"
params = {"foo": "bar"}
ctx = dialect.execution_ctx_cls._init_statement(
dialect, conn, conn.connection, stmt, [params])
conn._cursor_execute(ctx.cursor, stmt, params, ctx)
self._assert(
retval,
m1.do_execute, m1.real_do_execute,
[call(
ctx.cursor,
"insert into table foo",
{"foo": "bar"}, ctx)]
)
def test_do_execute_w_replace(self):
self._test_do_execute(True)
def test_do_execute_wo_replace(self):
self._test_do_execute(False)
def test_do_executemany_w_replace(self):
self._test_do_executemany(True)
def test_do_executemany_wo_replace(self):
self._test_do_executemany(False)
def test_do_execute_no_params_w_replace(self):
self._test_do_execute_no_params(True)
def test_do_execute_no_params_wo_replace(self):
self._test_do_execute_no_params(False)
def test_cursor_execute_w_replace(self):
self._test_cursor_execute(True)
def test_cursor_execute_wo_replace(self):
self._test_cursor_execute(False)
| michaelBenin/sqlalchemy | test/engine/test_execute.py | Python | mit | 70,582 |
"""
``editquality generate_make -h``
::
Code-generate Makefile from template and configuration
:Usage:
generate_make -h | --help
generate_make
[--config=<path>]
[--main=<filename>]
[--output=<path>]
[--templates=<path>]
[--debug]
:Options:
--config=<path> Directory to search for configuration files
[default: config/]
--main=<filename> Override to use a main template other than the
default [default: Makefile.j2]
--output=<path> Where to write the Makefile output.
[default: <stdout>]
--templates=<path> Directory to search for input templates.
[default: templates/]
--debug Print debug logging
"""
# TODO:
# * make API calls to learn things
# * ores/config has dict merge
# * survey dependency solvers
# https://github.com/ninja-build/ninja/wiki/List-of-generators-producing-ninja-build-files
# ** Still considering: scons, doit, drake, ninja, meson
# ** Don't like so far: waf
# * Where can we store information about samples?
# Original population rates; how we've distorted them.
import logging
import os.path
import sys
import docopt
from .. import config
from ..codegen import generate
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.DEBUG if args['--debug'] else logging.WARNING,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
config_path = args["--config"]
output_f = sys.stdout \
if args["--output"] == "<stdout>" \
else open(args["--output"], "w")
templates_path = args["--templates"]
main_template_path = args["--main"]
if not os.path.isabs(main_template_path):
# Join a filename to the default templates dir.
main_template_path = os.path.join(templates_path, main_template_path)
with open(main_template_path, "r") as f:
main_template = f.read()
variables = config.load_config(config_path)
output = generate.generate(variables, templates_path, main_template)
output_f.write(output)
| wiki-ai/editquality | editquality/utilities/generate_make.py | Python | mit | 2,362 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Product(Model):
_required = []
_attribute_map = {
'integer': {'key': 'integer', 'type': 'int'},
'string': {'key': 'string', 'type': 'str'},
}
def __init__(self, *args, **kwargs):
"""Product
:param int integer
:param str string
"""
self.integer = None
self.string = None
super(Product, self).__init__(*args, **kwargs)
| vulcansteel/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyArray/auto_rest_swagger_bat_array_service/models/product.py | Python | mit | 931 |
# coding=utf8
"""
Parser for todo format string.
from todo.parser import parser
parser.parse(string) # return an Todo instance
"""
from models import Task
from models import Todo
from ply import lex
from ply import yacc
class TodoLexer(object):
"""
Lexer for Todo format string.
Tokens
ID e.g. '1.'
DONE e.g. '(x)'
TASK e.g. 'This is a task'
"""
tokens = (
"ID",
"DONE",
"TASK",
)
t_ignore = "\x20\x09" # ignore spaces and tabs
def t_ID(self, t):
r'\d+\.([uU]|[lL]|[uU][lL]|[lL][uU])?'
t.value = int(t.value[:-1])
return t
def t_DONE(self, t):
r'(\(x\))'
return t
def t_TASK(self, t):
r'((?!\(x\))).+'
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
raise SyntaxError(
"Illegal character: '%s' at Line %d" % (t.value[0], t.lineno)
)
def __init__(self):
self.lexer = lex.lex(module=self)
class TodoParser(object):
"""
Parser for Todo format string, works with a todo lexer.
Parse string to Python list
todo_str = "1. (x) Write email to tom"
TodoParser().parse(todo_str)
"""
tokens = TodoLexer.tokens
def p_error(self, p):
if p:
raise SyntaxError(
"Character '%s' at line %d" % (p.value[0], p.lineno)
)
else:
raise SyntaxError("SyntaxError at EOF")
def p_start(self, p):
"start : translation_unit"
p[0] = self.todo
def p_translation_unit(self, p):
"""
translation_unit : translate_task
| translation_unit translate_task
|
"""
pass
def p_translation_task(self, p):
"""
translate_task : ID DONE TASK
| ID TASK
"""
if len(p) == 4:
done = True
content = p[3]
elif len(p) == 3:
done = False
content = p[2]
task = Task(p[1], content, done)
self.todo.append(task)
def __init__(self):
self.parser = yacc.yacc(module=self, debug=0, write_tables=0)
def parse(self, data):
# reset list
self.todo = Todo()
return self.parser.parse(data)
lexer = TodoLexer() # build lexer
parser = TodoParser() # build parser
| guori12321/todo | todo/parser.py | Python | mit | 2,473 |
import time
import pymemcache.client
import pytest
from limits import RateLimitItemPerMinute, RateLimitItemPerSecond
from limits.storage import MemcachedStorage, storage_from_string
from limits.strategies import (
FixedWindowElasticExpiryRateLimiter,
FixedWindowRateLimiter,
)
from tests.utils import fixed_start
@pytest.mark.memcached
@pytest.mark.flaky
class TestMemcachedStorage:
@pytest.fixture(autouse=True)
def setup(self, memcached, memcached_cluster):
self.storage_url = "memcached://localhost:22122"
def test_init_options(self, mocker):
constructor = mocker.spy(pymemcache.client, "PooledClient")
assert storage_from_string(self.storage_url, connect_timeout=1).check()
assert constructor.call_args[1]["connect_timeout"] == 1
@fixed_start
def test_fixed_window(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerSecond(10)
start = time.time()
count = 0
while time.time() - start < 0.5 and count < 10:
assert limiter.hit(per_min)
count += 1
assert not limiter.hit(per_min)
while time.time() - start <= 1:
time.sleep(0.1)
assert limiter.hit(per_min)
@fixed_start
def test_fixed_window_with_elastic_expiry(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
assert limiter.test(per_sec)
@fixed_start
def test_fixed_window_with_elastic_expiry_cluster(self):
storage = MemcachedStorage("memcached://localhost:22122,localhost:22123")
limiter = FixedWindowElasticExpiryRateLimiter(storage)
per_sec = RateLimitItemPerSecond(2, 2)
assert limiter.hit(per_sec)
time.sleep(1)
assert limiter.hit(per_sec)
assert not limiter.test(per_sec)
time.sleep(1)
assert not limiter.test(per_sec)
time.sleep(1)
assert limiter.test(per_sec)
def test_clear(self):
storage = MemcachedStorage("memcached://localhost:22122")
limiter = FixedWindowRateLimiter(storage)
per_min = RateLimitItemPerMinute(1)
limiter.hit(per_min)
assert not limiter.hit(per_min)
limiter.clear(per_min)
assert limiter.hit(per_min)
| alisaifee/limits | tests/storage/test_memcached.py | Python | mit | 3,218 |
import os
import sys
import tempfile
from fabric.api import run, sudo, env, local, hide, settings
from fabric.contrib.files import append, sed, exists, contains
from fabric.context_managers import prefix
from fabric.operations import get, put
from fabric.context_managers import cd
from fabric.tasks import Task
from fab_deploy.functions import random_password
from fab_deploy.base import postgres as base_postgres
class JoyentMixin(object):
version_directory_join = ''
def _get_data_dir(self, db_version):
# Try to get from svc first
output = run('svcprop -p config/data postgresql')
if output.stdout and exists(output.stdout, use_sudo=True):
return output.stdout
return base_postgres.PostgresInstall._get_data_dir(self, db_version)
def _install_package(self, db_version):
sudo("pkg_add postgresql%s-server" %db_version)
sudo("pkg_add postgresql%s-replicationtools" %db_version)
sudo("svcadm enable postgresql")
def _restart_db_server(self, db_version):
sudo('svcadm restart postgresql')
def _stop_db_server(self, db_version):
sudo('svcadm disable postgresql')
def _start_db_server(self, db_version):
sudo('svcadm enable postgresql')
class PostgresInstall(JoyentMixin, base_postgres.PostgresInstall):
"""
Install postgresql on server
install postgresql package;
enable postgres access from localhost without password;
enable all other user access from other machines with password;
setup a few parameters related with streaming replication;
database server listen to all machines '*';
create a user for database with password.
"""
name = 'master_setup'
db_version = '9.1'
class SlaveSetup(JoyentMixin, base_postgres.SlaveSetup):
"""
Set up master-slave streaming replication: slave node
"""
name = 'slave_setup'
class PGBouncerInstall(Task):
"""
Set up PGBouncer on a database server
"""
name = 'setup_pgbouncer'
pgbouncer_src = 'http://pkgsrc.smartos.org/packages/SmartOS/2012Q2/databases/pgbouncer-1.4.2.tgz'
pkg_name = 'pgbouncer-1.4.2.tgz'
config_dir = '/etc/opt/pkg'
config = {
'*': 'host=127.0.0.1',
'logfile': '/var/log/pgbouncer/pgbouncer.log',
'listen_addr': '*',
'listen_port': '6432',
'unix_socket_dir': '/tmp',
'auth_type': 'md5',
'auth_file': '%s/pgbouncer.userlist' %config_dir,
'pool_mode': 'session',
'admin_users': 'postgres',
'stats_users': 'postgres',
}
def install_package(self):
sudo('pkg_add libevent')
with cd('/tmp'):
run('wget %s' %self.pgbouncer_src)
sudo('pkg_add %s' %self.pkg_name)
def _setup_parameter(self, file_name, **kwargs):
for key, value in kwargs.items():
origin = "%s =" %key
new = "%s = %s" %(key, value)
sudo('sed -i "/%s/ c\%s" %s' %(origin, new, file_name))
def _get_passwd(self, username):
with hide('output'):
string = run('echo "select usename, passwd from pg_shadow where '
'usename=\'%s\' order by 1" | sudo su postgres -c '
'"psql"' %username)
user, passwd = string.split('\n')[2].split('|')
user = user.strip()
passwd = passwd.strip()
__, tmp_name = tempfile.mkstemp()
fn = open(tmp_name, 'w')
fn.write('"%s" "%s" ""\n' %(user, passwd))
fn.close()
put(tmp_name, '%s/pgbouncer.userlist'%self.config_dir, use_sudo=True)
local('rm %s' %tmp_name)
def _get_username(self, section=None):
try:
names = env.config_object.get_list(section, env.config_object.USERNAME)
username = names[0]
except:
print ('You must first set up a database server on this machine, '
'and create a database user')
raise
return username
def run(self, section=None):
"""
"""
sudo('mkdir -p /opt/pkg/bin')
sudo("ln -sf /opt/local/bin/awk /opt/pkg/bin/nawk")
sudo("ln -sf /opt/local/bin/sed /opt/pkg/bin/nbsed")
self.install_package()
svc_method = os.path.join(env.configs_dir, 'pgbouncer.xml')
put(svc_method, self.config_dir, use_sudo=True)
home = run('bash -c "echo ~postgres"')
bounce_home = os.path.join(home, 'pgbouncer')
pidfile = os.path.join(bounce_home, 'pgbouncer.pid')
self._setup_parameter('%s/pgbouncer.ini' %self.config_dir,
pidfile=pidfile, **self.config)
if not section:
section = 'db-server'
username = self._get_username(section)
self._get_passwd(username)
# postgres should be the owner of these config files
sudo('chown -R postgres:postgres %s' %self.config_dir)
sudo('mkdir -p %s' % bounce_home)
sudo('chown postgres:postgres %s' % bounce_home)
sudo('mkdir -p /var/log/pgbouncer')
sudo('chown postgres:postgres /var/log/pgbouncer')
# set up log
sudo('logadm -C 3 -p1d -c -w /var/log/pgbouncer/pgbouncer.log -z 1')
run('svccfg import %s/pgbouncer.xml' %self.config_dir)
# start pgbouncer
sudo('svcadm enable pgbouncer')
setup = PostgresInstall()
slave_setup = SlaveSetup()
setup_pgbouncer = PGBouncerInstall()
| ff0000/red-fab-deploy | fab_deploy/joyent/postgres.py | Python | mit | 5,514 |
"""
Gauged
https://github.com/chriso/gauged (MIT Licensed)
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
from urlparse import urlparse, parse_qsl
from urllib import unquote
from .mysql import MySQLDriver
from .sqlite import SQLiteDriver
from .postgresql import PostgreSQLDriver
def parse_dsn(dsn_string):
"""Parse a connection string and return the associated driver"""
dsn = urlparse(dsn_string)
scheme = dsn.scheme.split('+')[0]
username = password = host = port = None
host = dsn.netloc
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
password = unquote(password)
username = unquote(username)
if ':' in host:
host, port = host.split(':')
port = int(port)
database = dsn.path.split('?')[0][1:]
query = dsn.path.split('?')[1] if '?' in dsn.path else dsn.query
kwargs = dict(parse_qsl(query, True))
if scheme == 'sqlite':
return SQLiteDriver, [dsn.path], {}
elif scheme == 'mysql':
kwargs['user'] = username or 'root'
kwargs['db'] = database
if port:
kwargs['port'] = port
if host:
kwargs['host'] = host
if password:
kwargs['passwd'] = password
return MySQLDriver, [], kwargs
elif scheme == 'postgresql':
kwargs['user'] = username or 'postgres'
kwargs['database'] = database
if port:
kwargs['port'] = port
if 'unix_socket' in kwargs:
kwargs['host'] = kwargs.pop('unix_socket')
elif host:
kwargs['host'] = host
if password:
kwargs['password'] = password
return PostgreSQLDriver, [], kwargs
else:
raise ValueError('Unknown driver %s' % dsn_string)
def get_driver(dsn_string):
driver, args, kwargs = parse_dsn(dsn_string)
return driver(*args, **kwargs)
| chriso/gauged | gauged/drivers/__init__.py | Python | mit | 1,960 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ibtokin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| ibtokin/ibtokin | manage.py | Python | mit | 805 |
import boto
import mock
import moto
import tempfile
import unittest
from click.testing import CliRunner
from rubberjackcli.click import rubberjack
class CLITests(unittest.TestCase):
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy(self, cav, ue):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promote(self, ue, de):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('sys.exit')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_promoting_same_version(self, ue, de, se):
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': 'laterpay-devnull-live', # FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'same',
},
],
},
},
}
CliRunner().invoke(rubberjack, ['promote'], catch_exceptions=False)
self.assertTrue(se.called)
@moto.mock_s3_deprecated
def test_sigv4(self):
CliRunner().invoke(rubberjack, ['--sigv4-host', 'foo', 'deploy'], catch_exceptions=False)
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--environment', 'wibble', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_without_updating_the_environment(self, ue, cav):
s3 = boto.connect_s3()
s3.create_bucket("laterpay-rubberjack-ebdeploy") # FIXME Remove hardcoded bucket name
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['deploy', '--no-update-environment', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 0, "update_environment was called, but it shouldn't")
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.create_application_version')
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
def test_deploy_to_custom_bucket(self, ue, cav):
bucket_name = 'rbbrjck-test'
s3 = boto.connect_s3()
s3.create_bucket(bucket_name)
with tempfile.NamedTemporaryFile() as tmp:
result = CliRunner().invoke(rubberjack, ['--bucket', bucket_name, 'deploy', tmp.name], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
self.assertEqual(cav.call_count, 1, "create_application_version wasn't called, but it should")
self.assertEqual(ue.call_count, 1, "update_environment wasn't called, but it should")
_, cav_kwargs = cav.call_args
self.assertEqual(bucket_name, cav_kwargs['s3_bucket'])
@moto.mock_s3_deprecated
@mock.patch('boto.beanstalk.layer1.Layer1.update_environment')
@mock.patch('boto.beanstalk.layer1.Layer1.describe_environments')
def test_promote_to_custom_environment(self, de, ue):
CUSTOM_TO_ENVIRONMENT = "loremipsum"
de.return_value = {
'DescribeEnvironmentsResponse': {
'DescribeEnvironmentsResult': {
'Environments': [
{
'EnvironmentName': CUSTOM_TO_ENVIRONMENT,
'VersionLabel': 'old',
},
{
'EnvironmentName': 'laterpay-devnull-dev', # FIXME Remove hardcoded EnvName
'VersionLabel': 'new',
},
],
},
},
}
result = CliRunner().invoke(rubberjack, ['promote', '--to-environment', CUSTOM_TO_ENVIRONMENT], catch_exceptions=False)
self.assertEquals(result.exit_code, 0, result.output)
| laterpay/rubberjack-cli | tests/test_cli.py | Python | mit | 6,380 |
from __future__ import absolute_import, division, print_function
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
from io import UnsupportedOperation
import _pytest._code
import py
import pytest
import contextlib
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
if sys.version_info >= (3, 0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager(object):
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, 'dup', raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@needsosdup
@pytest.mark.parametrize("method",
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
outerr = capman.suspend_global_capture()
assert outerr == ("", "")
outerr = capman.suspend_global_capture()
assert outerr == ("", "")
print("hello")
out, err = capman.suspend_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
out, err = capman.suspend_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, "capman.start_global_capturing()")
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile("""
def test_unicode():
print ('b\\u00f6y')
""")
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_collect_capturing(testdir):
p = testdir.makepyfile("""
print ("collect %s failure" % 13)
import xyz42123
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*Captured stdout*",
"*collect 13 failure*",
])
class TestPerTestCapturing(object):
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile("""
def setup_module(mod):
print ("setup module")
def setup_function(function):
print ("setup " + function.__name__)
def test_func1():
print ("in func1")
assert 0
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
])
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile("""
import sys
def setup_module(func):
print ("module-setup")
def setup_function(func):
print ("function-setup")
def test_func():
print ("in function")
assert 0
def teardown_function(func):
print ("in teardown")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
])
def test_no_carry_over(self, testdir):
p = testdir.makepyfile("""
def test_func1():
print ("in func1")
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile("""
def setup_function(function):
print ("setup func1")
def teardown_function(function):
print ("teardown func1")
assert 0
def test_func1():
print ("in func1")
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*teardown_function*',
'*Captured stdout*',
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
])
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile("""
def teardown_module(mod):
print ("teardown module")
assert 0
def test_func():
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print (42)
sys.stderr.write(str(23))
def test_capturing_error():
print (1)
sys.stderr.write(str(2))
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
])
class TestLoggingInteraction(object):
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile("""
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_function(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_function(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_module(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_module(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"WARNING*hello435*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
""")
# make sure that logging is still captured in tests
p = testdir.makepyfile("""
def test_hello():
import logging
logging.warn("hello433")
assert 0
""")
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
])
assert 'something' not in result.stderr.str()
assert 'operation on closed file' not in result.stderr.str()
class TestCaptureFixture(object):
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print (42)
out, err = capsys.readouterr()
assert out.startswith("42")
""", *opt)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*"])
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile("""
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capfd*capsys*same*time*",
"*2 failed in*",
])
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfdbinary):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"E*capfdbinary*capsys*same*time*",
"*1 error*"])
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile("""
def test_hello(cap%s):
print ("xxx42xxx")
assert 0
""" % method)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"xxx42xxx",
])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
""")
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
""")
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info < (3,),
reason='only have capsysbinary in python 3',
)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
""")
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info >= (3,),
reason='only have capsysbinary in python 3',
)
def test_capsysbinary_forbidden_in_python2(self, testdir):
testdir.makepyfile("""
def test_hello(capsysbinary):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*test_hello*",
"*capsysbinary is only supported on python 3*",
"*1 error in*",
])
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
def test_hello(capsys, missingarg):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_partial_setup_failure*",
"*1 error*",
])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile("""
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile("""
import logging
def test_log(capsys):
logging.error('x')
""")
result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
@pytest.mark.parametrize('no_capture', [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile("""
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(fixture=fixture))
args = ('-s',) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines("""
*while capture is disabled*
""")
assert 'captured before' not in result.stdout.str()
assert 'captured after' not in result.stdout.str()
if no_capture:
assert 'test_normal executed' in result.stdout.str()
else:
assert 'test_normal executed' not in result.stdout.str()
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile("""
from __future__ import print_function
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(fixture=fixture))
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("*1 passed*")
assert 'stdout contents begin' not in result.stdout.str()
assert 'stderr contents begin' not in result.stdout.str()
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(_pytest._code.Source("""
def pytest_runtest_setup(item):
raise ValueError(42)
"""))
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
result.stdout.fnmatch_lines([
"*ValueError(42)*",
"*1 error*"
])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile("""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
""")
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert 'hello19' not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile("""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
""")
result = testdir.runpytest('--cap=fd')
result.stdout.fnmatch_lines('''
*def test_func*
*assert 0*
*Captured*
*1 failed*
''')
def test_capture_early_option_parsing(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert 'hello19' in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
""")
result = testdir.runpytest('--assert=plain')
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
""")
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines([
"*in bad_snap",
" raise Exception('boom')",
"Exception: boom",
])
class TestCaptureIO(object):
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
@pytest.mark.skipif(
sys.version_info[0] == 2,
reason='python 3 only behaviour',
)
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b'foo\r\n')
assert f.getvalue() == 'foo\r\n'
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
pytest.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
pytest.raises(IOError, iter, f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
pytest.raises(IOError, iter, fb)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
io = py.io.BytesIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == b"hello"
assert 'BytesIO object' in f.name
def test_dupfile_on_textio():
io = py.io.TextIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == "hello"
assert not hasattr(f, 'name')
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except (py.process.cmdexec.Error, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture(object):
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == tobytes('')
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == totext(data1)
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = tobytes("hello")
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, tobytes("world"))
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, tobytes("but now"))
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture(object):
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@pytest.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, "sys.stdin.read()")
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile("""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
*Captured stdout*
""")
def test_intermingling(self):
with self.getcapture() as cap:
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD(object):
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = 'test text'
print(test_text.encode(sys.stdout.encoding, 'replace'))
(out, err) = capsys.readouterr()
assert out
assert err == ''
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile("""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
*stdout*
*hello*
""")
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warn("hello1")
outerr = cap.readouterr()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.pop_outerr_to_orig()
logging.warn("hello3")
outerr = cap.readouterr()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines("""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
""")
result.stderr.fnmatch_lines("""
WARNING:root:hello2
""")
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6),
reason='only py3.6+ on windows')
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream(object):
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
testdir.makepyfile('''
from __future__ import print_function
import time
import threading
import sys
def spam():
f = sys.stderr
while True:
print('.', end='', file=f)
def test_silly():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
time.sleep(0.5)
''')
result = testdir.runpytest_subprocess()
assert result.ret == 0
assert 'IOError' not in result.stdout.str()
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
| tareqalayan/pytest | testing/test_capture.py | Python | mit | 40,501 |
import zmq
import datetime
import pytz
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from registrations.models import Registration
from registrations import handlers
from registrations import tasks
class Command(BaseCommand):
def log(self, message):
f = open(settings.TASK_LOG_PATH, 'a')
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
log_message = "%s\t%s\n" % (now, message)
self.stdout.write(log_message)
f.write(log_message)
f.close()
def handle(self, *args, **options):
context = zmq.Context()
pull_socket = context.socket(zmq.PULL)
pull_socket.bind('tcp://*:7002')
self.log("Registration Worker ZMQ Socket Bound to 7002")
while True:
try:
data = pull_socket.recv_json()
task_name = data.pop('task')
task_kwargs = data.pop('kwargs')
self.log("Got task '%s' with kwargs: %s" % (task_name, task_kwargs))
if hasattr(tasks, task_name):
result = getattr(tasks, task_name)(**task_kwargs)
self.log("Task '%s' result: %s" % (task_name, result))
else:
self.log("Received unknown task: %s", task_name)
except Exception, e:
self.log("Error: %s" % e)
pull_socket.close()
context.term()
| greencoder/hopefullysunny-django | registrations/management/commands/registration_worker.py | Python | mit | 1,481 |
#!/usr/bin/python
from noisemapper.mapper import *
#from collectors.lib import utils
### Define the object mapper and start mapping
def main():
# utils.drop_privileges()
mapper = NoiseMapper()
mapper.run()
if __name__ == "__main__":
main()
| dustlab/noisemapper | scripts/nmcollector.py | Python | mit | 259 |
import numpy as np
from numpy import cumsum, sum, searchsorted
from numpy.random import rand
import math
import utils
import core.sentence as sentence
import core.markovchain as mc
import logging
logger = logging.getLogger(__name__)
# Dialogue making class. Need to review where to return a string, where to return a list of tokens, etc.
# setters: list of speakers, pronouns, priors etc.
# random transitions
# Internal: build list of structures:
# e.g.{:speaker_name "Alice", :speaker_pronoun "she", :speaker_str "she", :speech_verb "said", :position "end"}
# Then end with fn that maps that out to a suitable string
# e.g. "<SPEECH>, she said."
# External bit then replaces <SPEECH> with a markov-chain-generated sentence (or several).
class dialogue_maker(object):
"""Class to handle creating dialogue based on a list of speakers and a sentence generator."""
def __init__(self, names, pronouns, mc):
self.speakers = [{"name": n, "pronoun": p} for n, p in list(zip(names, pronouns))]
self._transitions = self.make_transition_probs()
self._speech_acts = ["said", "whispered", "shouted", "cried"]
self._acts_transitions = [25, 2, 2, 2]
self.mc = mc
# self.seeds = seeds
self.target_len = np.random.randint(5, 50, size=len(names)) # rough words per sentence
def make_transition_probs(self):
"""Make transition matrix between speakers, with random symmetric biases added in"""
n = len(self.speakers) # TODO why this line ???
transitions = np.random.randint(5, size=(n, n)) + 1
transitions += transitions.transpose()
for i in range(0, math.floor(n / 2)):
s1 = np.random.randint(n)
s2 = np.random.randint(n)
transitions[s1][s2] += 10
transitions[s2][s1] += 8
return(transitions)
def after(self, speaker_id):
"""Pick next person to speak"""
row = self._transitions[speaker_id]
sucessor = searchsorted(cumsum(row), rand() * sum(row))
return sucessor
def speaker_sequence(self, speaker_id, n):
"""Random walk through transitions matrix to produce a sequence of speaker ids"""
seq = []
for i in range(n):
seq.append(speaker_id)
speaker_id = self.after(speaker_id)
return seq
def speech_sequence(self, n):
speech_acts_seq = []
next_speech_id = 0
for i in range(n):
next_speech_id = searchsorted(cumsum(self._acts_transitions), rand() * sum(self._acts_transitions))
speech_acts_seq.append(self._speech_acts[next_speech_id])
return speech_acts_seq
def seq_to_names(self, sequence):
return([self.speakers[id] for id in sequence])
def make_speech_bits(self, seeds):
n = len(seeds)
speaker_id = self.speaker_sequence(0, n)
speech_acts_seq = self.speech_sequence(n)
bits = []
ss = sentence.SentenceMaker(self.mc)
for i in range(n):
sent_toks = ss.generate_sentence_tokens([seeds[i]], self.target_len[speaker_id[i]])
sent_toks = ss.polish_sentence(sent_toks)
bits.append({'speaker_name': self.speakers[speaker_id[i]]["name"],
'speech_act': speech_acts_seq[speaker_id[i]],
'seq_id': speaker_id[i],
'speech': sent_toks,
'paragraph': True})
return(bits)
def simplify(self, seq_map):
"Take a sequence of speech parts and make more natural by removing name reptition etc."
for i in range(0, len(seq_map)):
seq_map[i]['speaker_str'] = seq_map[i]['speaker_name'] # default
# Same speaker contiues:
if i > 0 and seq_map[i]['seq_id'] == seq_map[i - 1]['seq_id']:
seq_map[i]['speaker_str'] = ""
seq_map[i]['speech_act'] = ""
seq_map[i]['paragraph'] = False
else:
if i > 1 and seq_map[i]['seq_id'] == seq_map[i - 2]['seq_id'] \
and seq_map[i]['seq_id'] != seq_map[i - 1]['seq_id']:
seq_map[i]['speaker_str'] = ""
seq_map[i]['speech_act'] = ""
seq_map[i]['paragraph'] = True
return seq_map
def report_seq(self, seq_map):
"""Convert sequence of speeches to a tokens."""
sents = []
for i in range(0, len(seq_map)):
if seq_map[i]['paragraph']:
# text += "\n "
quote_start = '"'
else:
quote_start = ""
if i > len(seq_map) - 2 or seq_map[i + 1]['paragraph']:
quote_end = '"'
else:
quote_end = " "
if len(seq_map[i]['speech_act']) > 0:
speech_act = seq_map[i]['speech_act'] + ","
else:
speech_act = seq_map[i]['speech_act']
tokens = [utils.START_TOKEN]
tokens.append(seq_map[i]['speaker_str'])
tokens.append(speech_act)
tokens.append(quote_start)
tokens.extend(seq_map[i]['speech'][1:-1])
tokens.append(quote_end)
tokens.append(utils.END_TOKEN)
sents.append(tokens)
return sents
def make_dialogue(self, seeds):
"""Returns a list of sentences, each being a list of tokens."""
acts = self.make_speech_bits(seeds)
seq_map = self.simplify(acts)
sents = self.report_seq(seq_map)
return(sents)
def dev():
import knowledge.names as names
mcW = mc.MarkovChain()
nm = names.NameMaker()
speakers = [nm.random_person() for i in range(1, 4)]
dm = dialogue_maker([n['name'] for n in speakers], [n['pronoun'] for n in speakers], mcW)
dlg = dm.make_dialogue(["dog", "run", "spot"])
print(dlg)
| dcorney/text-generation | core/dialogue.py | Python | mit | 5,911 |
# -*- coding: utf-8 -*-
from django.contrib.admin import TabularInline
from .models import GalleryPhoto
class PhotoInline(TabularInline):
"""
Tabular inline that will be displayed in the gallery form during frontend
editing or in the admin site.
"""
model = GalleryPhoto
fk_name = "gallery"
| izimobil/djangocms-unitegallery | djangocms_unitegallery/admin.py | Python | mit | 318 |
from __future__ import annotations
from collections import defaultdict
from collections.abc import Generator, Iterable, Mapping, MutableMapping
from contextlib import contextmanager
import logging
import re
import textwrap
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, NamedTuple
from markdown_it.rules_block.html_block import HTML_SEQUENCES
from mdformat import codepoints
from mdformat._compat import Literal
from mdformat._conf import DEFAULT_OPTS
from mdformat.renderer._util import (
RE_CHAR_REFERENCE,
decimalify_leading,
decimalify_trailing,
escape_asterisk_emphasis,
escape_underscore_emphasis,
get_list_marker_type,
is_tight_list,
is_tight_list_item,
longest_consecutive_sequence,
maybe_add_link_brackets,
)
from mdformat.renderer.typing import Postprocess, Render
if TYPE_CHECKING:
from mdformat.renderer import RenderTreeNode
LOGGER = logging.getLogger(__name__)
# A marker used to point a location where word wrap is allowed
# to occur.
WRAP_POINT = "\x00"
# A marker used to indicate location of a character that should be preserved
# during word wrap. Should be converted to the actual character after wrap.
PRESERVE_CHAR = "\x00"
def make_render_children(separator: str) -> Render:
def render_children(
node: RenderTreeNode,
context: RenderContext,
) -> str:
return separator.join(child.render(context) for child in node.children)
return render_children
def hr(node: RenderTreeNode, context: RenderContext) -> str:
thematic_break_width = 70
return "_" * thematic_break_width
def code_inline(node: RenderTreeNode, context: RenderContext) -> str:
code = node.content
all_chars_are_whitespace = not code.strip()
longest_backtick_seq = longest_consecutive_sequence(code, "`")
if longest_backtick_seq:
separator = "`" * (longest_backtick_seq + 1)
return f"{separator} {code} {separator}"
if code.startswith(" ") and code.endswith(" ") and not all_chars_are_whitespace:
return f"` {code} `"
return f"`{code}`"
def html_block(node: RenderTreeNode, context: RenderContext) -> str:
content = node.content.rstrip("\n")
# Need to strip leading spaces because we do so for regular Markdown too.
# Without the stripping the raw HTML and Markdown get unaligned and
# semantic may change.
content = content.lstrip()
return content
def html_inline(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def _in_block(block_name: str, node: RenderTreeNode) -> bool:
while node.parent:
if node.parent.type == block_name:
return True
node = node.parent
return False
def hardbreak(node: RenderTreeNode, context: RenderContext) -> str:
if _in_block("heading", node):
return "<br /> "
return "\\" + "\n"
def softbreak(node: RenderTreeNode, context: RenderContext) -> str:
if context.do_wrap and _in_block("paragraph", node):
return WRAP_POINT
return "\n"
def text(node: RenderTreeNode, context: RenderContext) -> str:
"""Process a text token.
Text should always be a child of an inline token. An inline token
should always be enclosed by a heading or a paragraph.
"""
text = node.content
# Escape backslash to prevent it from making unintended escapes.
# This escape has to be first, else we start multiplying backslashes.
text = text.replace("\\", "\\\\")
text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker.
text = escape_underscore_emphasis(text) # Escape emphasis/strong marker.
text = text.replace("[", "\\[") # Escape link label enclosure
text = text.replace("]", "\\]") # Escape link label enclosure
text = text.replace("<", "\\<") # Escape URI enclosure
text = text.replace("`", "\\`") # Escape code span marker
# Escape "&" if it starts a sequence that can be interpreted as
# a character reference.
text = RE_CHAR_REFERENCE.sub(r"\\\g<0>", text)
# The parser can give us consecutive newlines which can break
# the markdown structure. Replace two or more consecutive newlines
# with newline character's decimal reference.
text = text.replace("\n\n", " ")
# If the last character is a "!" and the token next up is a link, we
# have to escape the "!" or else the link will be interpreted as image.
next_sibling = node.next_sibling
if text.endswith("!") and next_sibling and next_sibling.type == "link":
text = text[:-1] + "\\!"
if context.do_wrap and _in_block("paragraph", node):
text = re.sub(r"\s+", WRAP_POINT, text)
return text
def fence(node: RenderTreeNode, context: RenderContext) -> str:
info_str = node.info.strip()
lang = info_str.split(maxsplit=1)[0] if info_str else ""
code_block = node.content
# Info strings of backtick code fences cannot contain backticks.
# If that is the case, we make a tilde code fence instead.
fence_char = "~" if "`" in info_str else "`"
# Format the code block using enabled codeformatter funcs
if lang in context.options.get("codeformatters", {}):
fmt_func = context.options["codeformatters"][lang]
try:
code_block = fmt_func(code_block, info_str)
except Exception:
# Swallow exceptions so that formatter errors (e.g. due to
# invalid code) do not crash mdformat.
assert node.map is not None, "A fence token must have `map` attribute set"
filename = context.options.get("mdformat", {}).get("filename", "")
warn_msg = (
f"Failed formatting content of a {lang} code block "
f"(line {node.map[0] + 1} before formatting)"
)
if filename:
warn_msg += f". Filename: {filename}"
LOGGER.warning(warn_msg)
# The code block must not include as long or longer sequence of `fence_char`s
# as the fence string itself
fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
fence_str = fence_char * fence_len
return f"{fence_str}{info_str}\n{code_block}{fence_str}"
def code_block(node: RenderTreeNode, context: RenderContext) -> str:
return fence(node, context)
def image(node: RenderTreeNode, context: RenderContext) -> str:
description = _render_inline_as_text(node, context)
if context.do_wrap:
# Prevent line breaks
description = description.replace(WRAP_POINT, " ")
ref_label = node.meta.get("label")
if ref_label:
context.env["used_refs"].add(ref_label)
ref_label_repr = ref_label.lower()
if description.lower() == ref_label_repr:
return f"![{description}]"
return f"![{description}][{ref_label_repr}]"
uri = node.attrs["src"]
assert isinstance(uri, str)
uri = maybe_add_link_brackets(uri)
title = node.attrs.get("title")
if title is not None:
return f'![{description}]({uri} "{title}")'
return f"![{description}]({uri})"
def _render_inline_as_text(node: RenderTreeNode, context: RenderContext) -> str:
"""Special kludge for image `alt` attributes to conform CommonMark spec.
Don't try to use it! Spec requires to show `alt` content with
stripped markup, instead of simple escaping.
"""
def text_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return node.content
def image_renderer(node: RenderTreeNode, context: RenderContext) -> str:
return _render_inline_as_text(node, context)
inline_renderers: Mapping[str, Render] = defaultdict(
lambda: make_render_children(""),
{
"text": text_renderer,
"image": image_renderer,
"link": link,
"softbreak": softbreak,
},
)
inline_context = RenderContext(
inline_renderers, context.postprocessors, context.options, context.env
)
return make_render_children("")(node, inline_context)
def link(node: RenderTreeNode, context: RenderContext) -> str:
if node.info == "auto":
autolink_url = node.attrs["href"]
assert isinstance(autolink_url, str)
# The parser adds a "mailto:" prefix to autolink email href. We remove the
# prefix if it wasn't there in the source.
if autolink_url.startswith("mailto:") and not node.children[
0
].content.startswith("mailto:"):
autolink_url = autolink_url[7:]
return "<" + autolink_url + ">"
text = "".join(child.render(context) for child in node.children)
if context.do_wrap:
# Prevent line breaks
text = text.replace(WRAP_POINT, " ")
ref_label = node.meta.get("label")
if ref_label:
context.env["used_refs"].add(ref_label)
ref_label_repr = ref_label.lower()
if text.lower() == ref_label_repr:
return f"[{text}]"
return f"[{text}][{ref_label_repr}]"
uri = node.attrs["href"]
assert isinstance(uri, str)
uri = maybe_add_link_brackets(uri)
title = node.attrs.get("title")
if title is None:
return f"[{text}]({uri})"
assert isinstance(title, str)
title = title.replace('"', '\\"')
return f'[{text}]({uri} "{title}")'
def em(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
indicator = node.markup
return indicator + text + indicator
def strong(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
indicator = node.markup
return indicator + text + indicator
def heading(node: RenderTreeNode, context: RenderContext) -> str:
text = make_render_children(separator="")(node, context)
if node.markup == "=":
prefix = "# "
elif node.markup == "-":
prefix = "## "
else: # ATX heading
prefix = node.markup + " "
# There can be newlines in setext headers, but we make an ATX
# header always. Convert newlines to spaces.
text = text.replace("\n", " ")
# If the text ends in a sequence of hashes (#), the hashes will be
# interpreted as an optional closing sequence of the heading, and
# will not be rendered. Escape a line ending hash to prevent this.
if text.endswith("#"):
text = text[:-1] + "\\#"
return prefix + text
def blockquote(node: RenderTreeNode, context: RenderContext) -> str:
marker = "> "
with context.indented(len(marker)):
text = make_render_children(separator="\n\n")(node, context)
lines = text.splitlines()
if not lines:
return ">"
quoted_lines = (f"{marker}{line}" if line else ">" for line in lines)
quoted_str = "\n".join(quoted_lines)
return quoted_str
def _wrap(text: str, *, width: int | Literal["no"]) -> str:
"""Wrap text at locations pointed by `WRAP_POINT`s.
Converts `WRAP_POINT`s to either a space or newline character, thus
wrapping the text. Already existing whitespace will be preserved as
is.
"""
text, replacements = _prepare_wrap(text)
if width == "no":
return _recover_preserve_chars(text, replacements)
wrapper = textwrap.TextWrapper(
break_long_words=False,
break_on_hyphens=False,
width=width,
expand_tabs=False,
replace_whitespace=False,
)
wrapped = wrapper.fill(text)
wrapped = _recover_preserve_chars(wrapped, replacements)
return " " + wrapped if text.startswith(" ") else wrapped
def _prepare_wrap(text: str) -> tuple[str, str]:
"""Prepare text for wrap.
Convert `WRAP_POINT`s to spaces. Convert whitespace to
`PRESERVE_CHAR`s. Return a tuple with the prepared string, and
another string consisting of replacement characters for
`PRESERVE_CHAR`s.
"""
result = ""
replacements = ""
for c in text:
if c == WRAP_POINT:
if not result or result[-1] != " ":
result += " "
elif c in codepoints.UNICODE_WHITESPACE:
result += PRESERVE_CHAR
replacements += c
else:
result += c
return result, replacements
def _recover_preserve_chars(text: str, replacements: str) -> str:
replacement_iterator = iter(replacements)
return "".join(
next(replacement_iterator) if c == PRESERVE_CHAR else c for c in text
)
def paragraph(node: RenderTreeNode, context: RenderContext) -> str: # noqa: C901
inline_node = node.children[0]
text = inline_node.render(context)
if context.do_wrap:
wrap_mode = context.options["mdformat"]["wrap"]
if isinstance(wrap_mode, int):
wrap_mode -= context.env["indent_width"]
wrap_mode = max(1, wrap_mode)
text = _wrap(text, width=wrap_mode)
# A paragraph can start or end in whitespace e.g. if the whitespace was
# in decimal representation form. We need to re-decimalify it, one reason being
# to enable "empty" paragraphs with whitespace only.
text = decimalify_leading(codepoints.UNICODE_WHITESPACE, text)
text = decimalify_trailing(codepoints.UNICODE_WHITESPACE, text)
lines = text.split("\n")
for i in range(len(lines)):
# Strip whitespace to prevent issues like a line starting tab that is
# interpreted as start of a code block.
lines[i] = lines[i].strip()
# If a line looks like an ATX heading, escape the first hash.
if re.match(r"#{1,6}( |\t|$)", lines[i]):
lines[i] = f"\\{lines[i]}"
# Make sure a paragraph line does not start with ">"
# (otherwise it will be interpreted as a block quote).
if lines[i].startswith(">"):
lines[i] = f"\\{lines[i]}"
# Make sure a paragraph line does not start with "*", "-" or "+"
# followed by a space, tab, or end of line.
# (otherwise it will be interpreted as list item).
if re.match(r"[-*+]( |\t|$)", lines[i]):
lines[i] = f"\\{lines[i]}"
# If a line starts with a number followed by "." or ")" followed by
# a space, tab or end of line, escape the "." or ")" or it will be
# interpreted as ordered list item.
if re.match(r"[0-9]+\)( |\t|$)", lines[i]):
lines[i] = lines[i].replace(")", "\\)", 1)
if re.match(r"[0-9]+\.( |\t|$)", lines[i]):
lines[i] = lines[i].replace(".", "\\.", 1)
# Consecutive "-", "*" or "_" sequences can be interpreted as thematic
# break. Escape them.
space_removed = lines[i].replace(" ", "").replace("\t", "")
if len(space_removed) >= 3:
if all(c == "*" for c in space_removed):
lines[i] = lines[i].replace("*", "\\*", 1) # pragma: no cover
elif all(c == "-" for c in space_removed):
lines[i] = lines[i].replace("-", "\\-", 1)
elif all(c == "_" for c in space_removed):
lines[i] = lines[i].replace("_", "\\_", 1) # pragma: no cover
# A stripped line where all characters are "=" or "-" will be
# interpreted as a setext heading. Escape.
stripped = lines[i].strip(" \t")
if all(c == "-" for c in stripped):
lines[i] = lines[i].replace("-", "\\-", 1)
elif all(c == "=" for c in stripped):
lines[i] = lines[i].replace("=", "\\=", 1)
# Check if the line could be interpreted as an HTML block.
# If yes, prefix it with 4 spaces to prevent this.
for html_seq_tuple in HTML_SEQUENCES:
can_break_paragraph = html_seq_tuple[2]
opening_re = html_seq_tuple[0]
if can_break_paragraph and opening_re.search(lines[i]):
lines[i] = f" {lines[i]}"
break
text = "\n".join(lines)
return text
def list_item(node: RenderTreeNode, context: RenderContext) -> str:
"""Return one list item as string.
This returns just the content. List item markers and indentation are
added in `bullet_list` and `ordered_list` renderers.
"""
block_separator = "\n" if is_tight_list_item(node) else "\n\n"
text = make_render_children(block_separator)(node, context)
if not text.strip():
return ""
return text
def bullet_list(node: RenderTreeNode, context: RenderContext) -> str:
marker_type = get_list_marker_type(node)
first_line_indent = " "
indent = " " * len(marker_type + first_line_indent)
block_separator = "\n" if is_tight_list(node) else "\n\n"
with context.indented(len(indent)):
text = ""
for child_idx, child in enumerate(node.children):
list_item = child.render(context)
formatted_lines = []
line_iterator = iter(list_item.split("\n"))
first_line = next(line_iterator)
formatted_lines.append(
f"{marker_type}{first_line_indent}{first_line}"
if first_line
else marker_type
)
for line in line_iterator:
formatted_lines.append(f"{indent}{line}" if line else "")
text += "\n".join(formatted_lines)
if child_idx != len(node.children) - 1:
text += block_separator
return text
def ordered_list(node: RenderTreeNode, context: RenderContext) -> str:
consecutive_numbering = context.options.get("mdformat", {}).get(
"number", DEFAULT_OPTS["number"]
)
marker_type = get_list_marker_type(node)
first_line_indent = " "
block_separator = "\n" if is_tight_list(node) else "\n\n"
list_len = len(node.children)
starting_number = node.attrs.get("start")
if starting_number is None:
starting_number = 1
assert isinstance(starting_number, int)
if consecutive_numbering:
indent_width = len(
f"{list_len + starting_number - 1}{marker_type}{first_line_indent}"
)
else:
indent_width = len(f"{starting_number}{marker_type}{first_line_indent}")
text = ""
with context.indented(indent_width):
for list_item_index, list_item in enumerate(node.children):
list_item_text = list_item.render(context)
formatted_lines = []
line_iterator = iter(list_item_text.split("\n"))
first_line = next(line_iterator)
if consecutive_numbering:
# Prefix first line of the list item with consecutive numbering,
# padded with zeros to make all markers of even length.
# E.g.
# 002. This is the first list item
# 003. Second item
# ...
# 112. Last item
number = starting_number + list_item_index
pad = len(str(list_len + starting_number - 1))
number_str = str(number).rjust(pad, "0")
formatted_lines.append(
f"{number_str}{marker_type}{first_line_indent}{first_line}"
if first_line
else f"{number_str}{marker_type}"
)
else:
# Prefix first line of first item with the starting number of the
# list. Prefix following list items with the number one
# prefixed by zeros to make the list item marker of even length
# with the first one.
# E.g.
# 5321. This is the first list item
# 0001. Second item
# 0001. Third item
first_item_marker = f"{starting_number}{marker_type}"
other_item_marker = (
"0" * (len(str(starting_number)) - 1) + "1" + marker_type
)
if list_item_index == 0:
formatted_lines.append(
f"{first_item_marker}{first_line_indent}{first_line}"
if first_line
else first_item_marker
)
else:
formatted_lines.append(
f"{other_item_marker}{first_line_indent}{first_line}"
if first_line
else other_item_marker
)
for line in line_iterator:
formatted_lines.append(" " * indent_width + line if line else "")
text += "\n".join(formatted_lines)
if list_item_index != len(node.children) - 1:
text += block_separator
return text
DEFAULT_RENDERERS: Mapping[str, Render] = MappingProxyType(
{
"inline": make_render_children(""),
"root": make_render_children("\n\n"),
"hr": hr,
"code_inline": code_inline,
"html_block": html_block,
"html_inline": html_inline,
"hardbreak": hardbreak,
"softbreak": softbreak,
"text": text,
"fence": fence,
"code_block": code_block,
"link": link,
"image": image,
"em": em,
"strong": strong,
"heading": heading,
"blockquote": blockquote,
"paragraph": paragraph,
"bullet_list": bullet_list,
"ordered_list": ordered_list,
"list_item": list_item,
}
)
class RenderContext(NamedTuple):
"""A collection of data that is passed as input to `Render` and
`Postprocess` functions."""
renderers: Mapping[str, Render]
postprocessors: Mapping[str, Iterable[Postprocess]]
options: Mapping[str, Any]
env: MutableMapping
@contextmanager
def indented(self, width: int) -> Generator[None, None, None]:
self.env["indent_width"] += width
try:
yield
finally:
self.env["indent_width"] -= width
@property
def do_wrap(self) -> bool:
wrap_mode = self.options.get("mdformat", {}).get("wrap", DEFAULT_OPTS["wrap"])
return isinstance(wrap_mode, int) or wrap_mode == "no"
def with_default_renderer_for(self, *syntax_names: str) -> RenderContext:
renderers = dict(self.renderers)
for syntax in syntax_names:
if syntax in DEFAULT_RENDERERS:
renderers[syntax] = DEFAULT_RENDERERS[syntax]
else:
renderers.pop(syntax, None)
return RenderContext(
MappingProxyType(renderers), self.postprocessors, self.options, self.env
)
| executablebooks/mdformat | src/mdformat/renderer/_context.py | Python | mit | 22,558 |
import teca.utils as tecautils
import teca.ConfigHandler as tecaconf
import unittest
class TestFileFilter(unittest.TestCase):
def setUp(self):
self.conf = tecaconf.ConfigHandler(
"tests/test_data/configuration.json",
{"starting_path": "tests/test_data/images"}
)
self.files_list = [
"foo.doc",
"yukinon.jpg",
"cuteflushadoingflushathings.webm"
]
def test_dothefiltering(self):
self.assertTrue("foo.doc" not in
tecautils.filterImages(self.files_list,
self.conf))
self.assertTrue("yukinon.jpg" in
tecautils.filterImages(self.files_list,
self.conf))
def test_nofiles(self):
self.assertEqual(0, len(tecautils.filterImages([], self.conf)))
| alfateam123/Teca | tests/test_utils.py | Python | mit | 902 |
#!/usr/bin/env python
from hdf5handler import HDF5Handler
handler = HDF5Handler('mydata.hdf5')
handler.open()
for i in range(100):
handler.put(i, 'numbers')
handler.close()
| iambernie/hdf5handler | examples/opening.py | Python | mit | 183 |
"""
[2015-07-13] Challenge #223 [Easy] Garland words
https://www.reddit.com/r/dailyprogrammer/comments/3d4fwj/20150713_challenge_223_easy_garland_words/
# Description
A [_garland word_](http://blog.vivekhaldar.com/post/89763722591/garland-words) is one that starts and ends with the
same N letters in the same order, for some N greater than 0, but less than the length of the word. I'll call the
maximum N for which this works the garland word's _degree_. For instance, "onion" is a garland word of degree 2,
because its first 2 letters "on" are the same as its last 2 letters. The name "garland word" comes from the fact that
you can make chains of the word in this manner:
onionionionionionionionionionion...
Today's challenge is to write a function `garland` that, given a lowercase word, returns the degree of the word if it's
a garland word, and 0 otherwise.
# Examples
garland("programmer") -> 0
garland("ceramic") -> 1
garland("onion") -> 2
garland("alfalfa") -> 4
# Optional challenges
1. Given a garland word, print out the chain using that word, as with "onion" above. You can make it as long or short
as you like, even infinite.
1. Find the largest degree of any garland word in the [enable1 English word
list](https://code.google.com/p/dotnetperls-controls/downloads/detail?name=enable1.txt).
1. Find a word list for some other language, and see if you can find a language with a garland word with a higher
degree.
*Thanks to /u/skeeto for submitting this challenge on /r/dailyprogrammer_ideas!*
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20150713A.py | Python | mit | 1,597 |
from decimal import Decimal
from django import forms
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
class BaseWidget(forms.TextInput):
"""
Base widget. Do not use this directly.
"""
template = None
instance = None
def get_parent_id(self, name, attrs):
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
return final_attrs['id']
def get_widget_id(self, prefix, name, key=''):
if self.instance:
opts = self.instance._meta
widget_id = '%s-%s-%s_%s-%s' % (prefix, name, opts.app_label, opts.module_name, self.instance.pk)
else:
widget_id = '%s-%s' % (prefix, name)
if key:
widget_id = '%s_%s' % (widget_id, slugify(key))
return widget_id
def get_values(self, min_value, max_value, step=1):
decimal_step = Decimal(str(step))
value = Decimal(str(min_value))
while value <= max_value:
yield value
value += decimal_step
class SliderWidget(BaseWidget):
"""
Slider widget.
In order to use this widget you must load the jQuery.ui slider
javascript.
This widget triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False, default='',
template='ratings/slider_widget.html', attrs=None):
"""
The argument *default* is used when the initial value is None.
"""
super(SliderWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.default = default
self.template = template
self.key = key
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value*, *step* and *value*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'step': str(self.step),
'can_delete_vote': self.can_delete_vote,
'read_only': self.read_only,
'default': self.default,
'parent': super(SliderWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': str(value),
'has_value': bool(value),
'slider_id': self.get_widget_id('slider', name, self.key),
'label_id': 'slider-label-%s' % name,
'remove_id': 'slider-remove-%s' % name,
}
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context)
class StarWidget(BaseWidget):
"""
Starrating widget.
In order to use this widget you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This widget triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False,
template='ratings/star_widget.html', attrs=None):
super(StarWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.template = template
self.key = key
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value* and *step*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
split_value = int(1 / self.step)
if split_value == 1:
values = range(1, self.max_value+1)
split = u''
else:
values = self.get_values(self.min_value, self.max_value, self.step)
split = u' {split:%d}' % split_value
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'step': str(self.step),
'can_delete_vote': self.can_delete_vote,
'read_only': self.read_only,
'values': values,
'split': split,
'parent': super(StarWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': self._get_value(value, split_value),
'star_id': self.get_widget_id('star', name, self.key),
}
def _get_value(self, original, split):
if original:
value = round(original * split) / split
return Decimal(str(value))
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context)
class LikeWidget(BaseWidget):
def __init__(self, min_value, max_value, instance=None,
can_delete_vote=True, template='ratings/like_widget.html', attrs=None):
super(LikeWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.instance = instance
self.can_delete_vote = can_delete_vote
self.template = template
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value* and *step*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'can_delete_vote': self.can_delete_vote,
'parent': super(LikeWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': str(value),
'like_id': self.get_widget_id('like', name),
}
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context) | redsolution/django-generic-ratings | ratings/forms/widgets.py | Python | mit | 7,704 |
#!/usr/bin/env python
import sys
import os
from treestore import Treestore
try: taxonomy = sys.argv[1]
except: taxonomy = None
t = Treestore()
treebase_uri = 'http://purl.org/phylo/treebase/phylows/tree/%s'
tree_files = [x for x in os.listdir('trees') if x.endswith('.nex')]
base_uri = 'http://www.phylocommons.org/trees/%s'
tree_list = set(t.list_trees())
for tree_uri in tree_list:
if not 'TB2_' in tree_uri: continue
tree_id = t.id_from_uri(tree_uri)
tb_uri = treebase_uri % (tree_id.replace('_', ':'))
print tree_id, tb_uri
t.annotate(tree_uri, annotations='?tree bibo:cites <%s> .' % tb_uri)
| NESCent/phylocommons | tools/treebase_scraper/annotate_trees.py | Python | mit | 622 |
from .stats_view_base import StatsViewSwagger, StatsViewSwaggerKeyRequired
from .stats_util_dataverses import StatsMakerDataverses
class DataverseCountByMonthView(StatsViewSwaggerKeyRequired):
"""API View - Dataverse counts by Month."""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/monthly'
summary = ('Number of published Dataverses by'
' the month they were created*. (*'
' Not month published)')
description = ('Returns a list of counts and'
' cumulative counts of all Dataverses added in a month')
description_200 = 'A list of Dataverse counts by month'
param_names = StatsViewSwagger.PARAM_DV_API_KEY +\
StatsViewSwagger.BASIC_DATE_PARAMS +\
StatsViewSwagger.PUBLISH_PARAMS +\
StatsViewSwagger.PRETTY_JSON_PARAM +\
StatsViewSwagger.PARAM_AS_CSV
tags = [StatsViewSwagger.TAG_DATAVERSES]
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_counts_by_month()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_counts_by_month_unpublished()
else:
stats_result = stats_datasets.get_dataverse_counts_by_month_published()
return stats_result
class DataverseTotalCounts(StatsViewSwaggerKeyRequired):
"""API View - Total count of all Dataverses"""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count'
summary = ('Simple count of published Dataverses')
description = ('Returns number of published Dataverses')
description_200 = 'Number of published Dataverses'
param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS + StatsViewSwagger.PRETTY_JSON_PARAM
tags = [StatsViewSwagger.TAG_DATAVERSES]
result_name = StatsViewSwagger.RESULT_NAME_TOTAL_COUNT
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_count()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_count_unpublished()
else:
stats_result = stats_datasets.get_dataverse_count_published()
return stats_result
class DataverseAffiliationCounts(StatsViewSwaggerKeyRequired):
"""API View - Number of Dataverses by Affiliation"""
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/by-affiliation'
summary = ('Number of Dataverses by Affiliation')
description = ('Number of Dataverses by Affiliation.')
description_200 = 'Number of published Dataverses by Affiliation.'
param_names = StatsViewSwagger.PARAM_DV_API_KEY\
+ StatsViewSwagger.PUBLISH_PARAMS\
+ StatsViewSwagger.PRETTY_JSON_PARAM\
+ StatsViewSwagger.PARAM_AS_CSV
result_name = StatsViewSwagger.RESULT_NAME_AFFILIATION_COUNTS
tags = [StatsViewSwagger.TAG_DATAVERSES]
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_affiliation_counts()
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_affiliation_counts_unpublished()
else:
stats_result = stats_datasets.get_dataverse_affiliation_counts_published()
return stats_result
class DataverseTypeCounts(StatsViewSwaggerKeyRequired):
# Define the swagger attributes
# Note: api_path must match the path in urls.py
#
api_path = '/dataverses/count/by-type'
summary = ('Number of Dataverses by Type')
description = ('Number of Dataverses by Type.')
description_200 = 'Number of published Dataverses by Type.'
param_names = StatsViewSwagger.PARAM_DV_API_KEY + StatsViewSwagger.PUBLISH_PARAMS +\
StatsViewSwagger.PRETTY_JSON_PARAM +\
StatsViewSwagger.DV_TYPE_UNCATEGORIZED_PARAM +\
StatsViewSwagger.PARAM_AS_CSV
result_name = StatsViewSwagger.RESULT_NAME_DATAVERSE_TYPE_COUNTS
tags = [StatsViewSwagger.TAG_DATAVERSES]
def is_show_uncategorized(self, request):
"""Return the result of the "?show_uncategorized" query string param"""
show_uncategorized = request.GET.get('show_uncategorized', False)
if show_uncategorized is True or show_uncategorized == 'true':
return True
return False
def get_stats_result(self, request):
"""Return the StatsResult object for this statistic"""
stats_datasets = StatsMakerDataverses(**request.GET.dict())
if self.is_show_uncategorized(request):
exclude_uncategorized = False
else:
exclude_uncategorized = True
pub_state = self.get_pub_state(request)
if pub_state == self.PUB_STATE_ALL:
stats_result = stats_datasets.get_dataverse_counts_by_type(exclude_uncategorized)
elif pub_state == self.PUB_STATE_UNPUBLISHED:
stats_result = stats_datasets.get_dataverse_counts_by_type_unpublished(exclude_uncategorized)
else:
stats_result = stats_datasets.get_dataverse_counts_by_type_published(exclude_uncategorized)
return stats_result
| IQSS/miniverse | dv_apps/metrics/stats_views_dataverses.py | Python | mit | 6,085 |
import uuid
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
from accelerator_abstract.models import BaseUserRole
from accelerator_abstract.models.base_base_profile import EXPERT_USER_TYPE
MAX_USERNAME_LENGTH = 30
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves an User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('An email address must be provided.')
email = self.normalize_email(email)
if "is_active" not in extra_fields:
extra_fields["is_active"] = True
if "username" not in extra_fields:
# For now we need to have a unique id that is at
# most 30 characters long. Using uuid and truncating.
# Ideally username goes away entirely at some point
# since we're really using email. If we have to keep
# username for some reason then we could switch over
# to a string version of the pk which is guaranteed
# be unique.
extra_fields["username"] = str(uuid.uuid4())[:MAX_USERNAME_LENGTH]
user = self.model(email=email,
is_staff=is_staff,
is_superuser=is_superuser,
last_login=None,
date_joined=now,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email=None, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractUser):
# Override the parent email field to add uniqueness constraint
email = models.EmailField(blank=True, unique=True)
objects = UserManager()
class Meta:
db_table = 'auth_user'
managed = settings.ACCELERATOR_MODELS_ARE_MANAGED
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self.startup = None
self.team_member = None
self.profile = None
self.user_finalist_roles = None
class AuthenticationException(Exception):
pass
def __str__(self):
return self.email
def full_name(self):
fn = self.first_name
ln = self.last_name
if fn and ln:
name = u"%s %s" % (fn, ln)
else:
name = str(self.email)
return name
def user_phone(self):
return self._get_profile().phone
def image_url(self):
return self._get_profile().image_url()
def team_member_id(self):
return self.team_member.id if self._get_member() else ''
def user_title(self):
return self._get_title_and_company()['title']
def user_twitter_handle(self):
return self._get_profile().twitter_handle
def user_linked_in_url(self):
return self._get_profile().linked_in_url
def user_facebook_url(self):
return self._get_profile().facebook_url
def user_personal_website_url(self):
return self._get_profile().personal_website_url
def type(self):
return self._get_profile().user_type
def startup_name(self):
return self._get_title_and_company()['company']
def _get_title_and_company(self):
if self._is_expert() and self._has_expert_details():
profile = self._get_profile()
title = profile.title
company = profile.company
return {
"title": title,
"company": company
}
self._get_member()
title = self.team_member.title if self.team_member else ""
company = self.startup.name if self._get_startup() else None
return {
"title": title,
"company": company
}
def _has_expert_details(self):
if self._is_expert():
profile = self._get_profile()
return True if profile.title or profile.company else False
def startup_industry(self):
return self.startup.primary_industry if self._get_startup() else None
def top_level_startup_industry(self):
industry = (
self.startup.primary_industry if self._get_startup() else None)
return industry.parent if industry and industry.parent else industry
def startup_status_names(self):
if self._get_startup():
return [startup_status.program_startup_status.startup_status
for startup_status in self.startup.startupstatus_set.all()]
def finalist_user_roles(self):
if not self.user_finalist_roles:
finalist_roles = BaseUserRole.FINALIST_USER_ROLES
self.user_finalist_roles = self.programrolegrant_set.filter(
program_role__user_role__name__in=finalist_roles
).values_list('program_role__name', flat=True).distinct()
return list(self.user_finalist_roles)
def program(self):
return self.startup.current_program() if self._get_startup() else None
def location(self):
program = self.program()
return program.program_family.name if program else None
def year(self):
program = self.program()
return program.start_date.year if program else None
def is_team_member(self):
return True if self._get_member() else False
def _get_startup(self):
if not self.startup:
self._get_member()
if self.team_member:
self.startup = self.team_member.startup
return self.startup
def _get_member(self):
if not self.team_member:
self.team_member = self.startupteammember_set.last()
return self.team_member
def _get_profile(self):
if self.profile:
return self.profile
self.profile = self.get_profile()
return self.profile
def has_a_finalist_role(self):
return len(self.finalist_user_roles()) > 0
def _is_expert(self):
profile = self._get_profile()
return profile.user_type == EXPERT_USER_TYPE.lower()
| masschallenge/django-accelerator | simpleuser/models.py | Python | mit | 6,632 |
from setuptools import setup, find_packages
from codecs import open
import os
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='transposer',
version='0.0.3',
description='Transposes columns and rows in delimited text files',
long_description=(read('README.rst')),
url='https://github.com/keithhamilton/transposer',
author='Keith Hamilton',
maintainer='Keith Hamilton',
maintainer_email='the.keith.hamilton@gmail.com',
license='BSD License',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Office/Business'
],
keywords='text, csv, tab-delimited, delimited, excel, sheet, spreadsheet',
packages=find_packages(exclude=['contrib', 'docs', 'test*', 'bin', 'include', 'lib', '.idea']),
install_requires=[],
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'transposer=transposer.script.console_script:main'
]
}
)
| keithhamilton/transposer | setup.py | Python | mit | 1,307 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-01 20:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('phone_numbers', '0001_initial'),
('sims', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='phonenumber',
name='related_sim',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='phone_numbers', to='sims.Sim'),
),
]
| RobSpectre/garfield | garfield/phone_numbers/migrations/0002_phonenumber_related_sim.py | Python | mit | 626 |
def calc():
h, l = input().split(' ')
mapa = []
for i_row in range(int(h)):
mapa.append(input().split(' '))
maior_num = 0
for row in mapa:
for col in row:
n = int(col)
if (n > maior_num):
maior_num = n
qtd = [0 for i in range(maior_num + 1)]
for row in mapa:
for col in row:
n = int(col)
qtd[n] = qtd[n] + 1
menor = 1
for i in range(1, len(qtd)):
if (qtd[i] <= qtd[menor]):
menor = i
print(menor)
calc()
| DestructHub/bcs-contest | 2016/Main/L/Python/solution_1_wrong.py | Python | mit | 471 |
#!/usr/bin/env python
"""
Manage and display experimental results.
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
__version__ = '0.4.3'
import sys
import os
import numpy
import random
import scipy
import socket
sys.path.append('./code')
from argparse import ArgumentParser
from pickle import Unpickler, dump
from subprocess import Popen, PIPE
from os import path
from warnings import warn
from time import time, strftime, localtime
from numpy import ceil, argsort
from numpy.random import rand, randint
from distutils.version import StrictVersion
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from httplib import HTTPConnection
from getopt import getopt
class Experiment:
"""
@type time: float
@ivar time: time at initialization of experiment
@type duration: float
@ivar duration: time in seconds between initialization and saving
@type script: string
@ivar script: stores the content of the main Python script
@type platform: string
@ivar platform: information about operating system
@type processors: string
@ivar processors: some information about the processors
@type environ: string
@ivar environ: environment variables at point of initialization
@type hostname: string
@ivar hostname: hostname of server running the experiment
@type cwd: string
@ivar cwd: working directory at execution time
@type comment: string
@ivar comment: a comment describing the experiment
@type results: dictionary
@ivar results: container to store experimental results
@type commit: string
@ivar commit: git commit hash
@type modified: boolean
@ivar modified: indicates uncommited changes
@type filename: string
@ivar filename: path to stored results
@type seed: int
@ivar seed: random seed used through the experiment
@type versions: dictionary
@ivar versions: versions of Python, numpy and scipy
"""
def __str__(self):
"""
Summarize information about the experiment.
@rtype: string
@return: summary of the experiment
"""
strl = []
# date and duration of experiment
strl.append(strftime('date \t\t %a, %d %b %Y %H:%M:%S', localtime(self.time)))
strl.append('duration \t ' + str(int(self.duration)) + 's')
strl.append('hostname \t ' + self.hostname)
# commit hash
if self.commit:
if self.modified:
strl.append('commit \t\t ' + self.commit + ' (modified)')
else:
strl.append('commit \t\t ' + self.commit)
# results
strl.append('results \t {' + ', '.join(map(str, self.results.keys())) + '}')
# comment
if self.comment:
strl.append('\n' + self.comment)
return '\n'.join(strl)
def __del__(self):
self.status(None)
def __init__(self, filename='', comment='', seed=None, server=None, port=8000):
"""
If the filename is given and points to an existing experiment, load it.
Otherwise store the current timestamp and try to get commit information
from the repository in the current directory.
@type filename: string
@param filename: path to where the experiment will be stored
@type comment: string
@param comment: a comment describing the experiment
@type seed: integer
@param seed: random seed used in the experiment
"""
self.id = 0
self.time = time()
self.comment = comment
self.filename = filename
self.results = {}
self.seed = seed
self.script = ''
self.cwd = ''
self.platform = ''
self.processors = ''
self.environ = ''
self.duration = 0
self.versions = {}
self.server = ''
if self.seed is None:
self.seed = int((time() + 1e6 * rand()) * 1e3) % 4294967295
# set random seed
random.seed(self.seed)
numpy.random.seed(self.seed)
if self.filename:
# load given experiment
self.load()
else:
# identifies the experiment
self.id = randint(1E8)
# check if a comment was passed via the command line
parser = ArgumentParser(add_help=False)
parser.add_argument('--comment')
optlist, argv = parser.parse_known_args(sys.argv[1:])
optlist = vars(optlist)
# remove comment command line argument from argument list
sys.argv[1:] = argv
# comment given as command line argument
self.comment = optlist.get('comment', '')
# get OS information
self.platform = sys.platform
# arguments to the program
self.argv = sys.argv
self.script_path = sys.argv[0]
try:
with open(sys.argv[0]) as handle:
# store python script
self.script = handle.read()
except:
warn('Unable to read Python script.')
# environment variables
self.environ = os.environ
self.cwd = os.getcwd()
self.hostname = socket.gethostname()
# store some information about the processor(s)
if self.platform == 'linux2':
cmd = 'egrep "processor|model name|cpu MHz|cache size" /proc/cpuinfo'
with os.popen(cmd) as handle:
self.processors = handle.read()
elif self.platform == 'darwin':
cmd = 'system_profiler SPHardwareDataType | egrep "Processor|Cores|L2|Bus"'
with os.popen(cmd) as handle:
self.processors = handle.read()
# version information
self.versions['python'] = sys.version
self.versions['numpy'] = numpy.__version__
self.versions['scipy'] = scipy.__version__
# store information about git repository
if path.isdir('.git'):
# get commit hash
pr1 = Popen(['git', 'log', '-1'], stdout=PIPE)
pr2 = Popen(['head', '-1'], stdin=pr1.stdout, stdout=PIPE)
pr3 = Popen(['cut', '-d', ' ', '-f', '2'], stdin=pr2.stdout, stdout=PIPE)
self.commit = pr3.communicate()[0][:-1]
# check if project contains uncommitted changes
pr1 = Popen(['git', 'status', '--porcelain'], stdout=PIPE)
pr2 = Popen(['egrep', '^.M'], stdin=pr1.stdout, stdout=PIPE)
self.modified = pr2.communicate()[0]
if self.modified:
warn('Uncommitted changes.')
else:
# no git repository
self.commit = None
self.modified = False
# server managing experiments
self.server = server
self.port = port
self.status('running')
def status(self, status, **kwargs):
if self.server:
try:
conn = HTTPConnection(self.server, self.port)
conn.request('GET', '/version/')
resp = conn.getresponse()
if not resp.read().startswith('Experiment'):
raise RuntimeError()
HTTPConnection(self.server, self.port).request('POST', '', str(dict({
'id': self.id,
'version': __version__,
'status': status,
'hostname': self.hostname,
'cwd': self.cwd,
'script_path': self.script_path,
'script': self.script,
'comment': self.comment,
'time': self.time,
}, **kwargs)))
except:
warn('Unable to connect to \'{0}:{1}\'.'.format(self.server, self.port))
def progress(self, progress):
self.status('PROGRESS', progress=progress)
def save(self, filename=None, overwrite=False):
"""
Store results. If a filename is given, the default is overwritten.
@type filename: string
@param filename: path to where the experiment will be stored
@type overwrite: boolean
@param overwrite: overwrite existing files
"""
self.duration = time() - self.time
if filename is None:
filename = self.filename
else:
# replace {0} and {1} by date and time
tmp1 = strftime('%d%m%Y', localtime(time()))
tmp2 = strftime('%H%M%S', localtime(time()))
filename = filename.format(tmp1, tmp2)
self.filename = filename
# make sure directory exists
try:
os.makedirs(path.dirname(filename))
except OSError:
pass
# make sure filename is unique
counter = 0
pieces = path.splitext(filename)
if not overwrite:
while path.exists(filename):
counter += 1
filename = pieces[0] + '.' + str(counter) + pieces[1]
if counter:
warn(''.join(pieces) + ' already exists. Saving to ' + filename + '.')
# store experiment
with open(filename, 'wb') as handle:
dump({
'version': __version__,
'id': self.id,
'time': self.time,
'seed': self.seed,
'duration': self.duration,
'environ': self.environ,
'hostname': self.hostname,
'cwd': self.cwd,
'argv': self.argv,
'script': self.script,
'script_path': self.script_path,
'processors': self.processors,
'platform': self.platform,
'comment': self.comment,
'commit': self.commit,
'modified': self.modified,
'versions': self.versions,
'results': self.results}, handle, 1)
self.status('SAVE', filename=filename, duration=self.duration)
def load(self, filename=None):
"""
Loads experimental results from the specified file.
@type filename: string
@param filename: path to where the experiment is stored
"""
if filename:
self.filename = filename
with open(self.filename, 'rb') as handle:
res = load(handle)
self.time = res['time']
self.seed = res['seed']
self.duration = res['duration']
self.processors = res['processors']
self.environ = res['environ']
self.platform = res['platform']
self.comment = res['comment']
self.commit = res['commit']
self.modified = res['modified']
self.versions = res['versions']
self.results = res['results']
self.argv = res['argv'] \
if StrictVersion(res['version']) >= '0.3.1' else None
self.script = res['script'] \
if StrictVersion(res['version']) >= '0.4.0' else None
self.script_path = res['script_path'] \
if StrictVersion(res['version']) >= '0.4.0' else None
self.cwd = res['cwd'] \
if StrictVersion(res['version']) >= '0.4.0' else None
self.hostname = res['hostname'] \
if StrictVersion(res['version']) >= '0.4.0' else None
self.id = res['id'] \
if StrictVersion(res['version']) >= '0.4.0' else None
def __getitem__(self, key):
return self.results[key]
def __setitem__(self, key, value):
self.results[key] = value
def __delitem__(self, key):
del self.results[key]
class ExperimentRequestHandler(BaseHTTPRequestHandler):
"""
Renders HTML showing running and finished experiments.
"""
xpck_path = ''
running = {}
finished = {}
def do_GET(self):
"""
Renders HTML displaying running and saved experiments.
"""
# number of bars representing progress
max_bars = 20
if self.path == '/version/':
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write('Experiment {0}'.format(__version__))
elif self.path.startswith('/running/'):
id = int([s for s in self.path.split('/') if s != ''][-1])
# display running experiment
if id in ExperimentRequestHandler.running:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(HTML_HEADER)
self.wfile.write('<h2>Experiment</h2>')
instance = ExperimentRequestHandler.running[id]
num_bars = int(instance['progress']) * max_bars / 100
self.wfile.write('<table>')
self.wfile.write('<tr><th>Experiment:</th><td>{0}</td></tr>'.format(
os.path.join(instance['cwd'], instance['script_path'])))
self.wfile.write('<tr><th>Hostname:</th><td>{0}</td></tr>'.format(instance['hostname']))
self.wfile.write('<tr><th>Status:</th><td class="running">{0}</td></tr>'.format(instance['status']))
self.wfile.write('<tr><th>Progress:</th><td class="progress"><span class="bars">{0}</span>{1}</td></tr>'.format(
'|' * num_bars, '|' * (max_bars - num_bars)))
self.wfile.write('<tr><th>Start:</th><td>{0}</td></tr>'.format(
strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['time']))))
self.wfile.write('<tr><th>Comment:</th><td>{0}</td></tr>'.format(
instance['comment'] if instance['comment'] else '-'))
self.wfile.write('</table>')
self.wfile.write('<h2>Script</h2>')
self.wfile.write('<pre>{0}</pre>'.format(instance['script']))
self.wfile.write(HTML_FOOTER)
elif id in ExperimentRequestHandler.finished:
self.send_response(302)
self.send_header('Location', '/finished/{0}/'.format(id))
self.end_headers()
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(HTML_HEADER)
self.wfile.write('<h2>404</h2>')
self.wfile.write('Requested experiment not found.')
self.wfile.write(HTML_FOOTER)
elif self.path.startswith('/finished/'):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(HTML_HEADER)
id = int([s for s in self.path.split('/') if s != ''][-1])
# display finished experiment
if id in ExperimentRequestHandler.finished:
instance = ExperimentRequestHandler.finished[id]
if id in ExperimentRequestHandler.running:
progress = ExperimentRequestHandler.running[id]['progress']
else:
progress = 100
num_bars = int(progress) * max_bars / 100
self.wfile.write('<h2>Experiment</h2>')
self.wfile.write('<table>')
self.wfile.write('<tr><th>Experiment:</th><td>{0}</td></tr>'.format(
os.path.join(instance['cwd'], instance['script_path'])))
self.wfile.write('<tr><th>Results:</th><td>{0}</td></tr>'.format(
os.path.join(instance['cwd'], instance['filename'])))
self.wfile.write('<tr><th>Status:</th><td class="finished">{0}</td></tr>'.format(instance['status']))
self.wfile.write('<tr><th>Progress:</th><td class="progress"><span class="bars">{0}</span>{1}</td></tr>'.format(
'|' * num_bars, '|' * (max_bars - num_bars)))
self.wfile.write('<tr><th>Start:</th><td>{0}</td></tr>'.format(
strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['time']))))
self.wfile.write('<tr><th>End:</th><td>{0}</td></tr>'.format(
strftime('%a, %d %b %Y %H:%M:%S', localtime(instance['duration']))))
self.wfile.write('<tr><th>Comment:</th><td>{0}</td></tr>'.format(
instance['comment'] if instance['comment'] else '-'))
self.wfile.write('</table>')
self.wfile.write('<h2>Results</h2>')
try:
experiment = Experiment(os.path.join(instance['cwd'], instance['filename']))
except:
self.wfile.write('Could not open file.')
else:
self.wfile.write('<table>')
for key, value in experiment.results.items():
self.wfile.write('<tr><th>{0}</th><td>{1}</td></tr>'.format(key, value))
self.wfile.write('</table>')
self.wfile.write('<h2>Script</h2>')
self.wfile.write('<pre>{0}</pre>'.format(instance['script']))
else:
self.wfile.write('<h2>404</h2>')
self.wfile.write('Requested experiment not found.')
self.wfile.write(HTML_FOOTER)
else:
files = []
if 'xpck_path' in ExperimentRequestHandler.__dict__:
if ExperimentRequestHandler.xpck_path != '':
for path in ExperimentRequestHandler.xpck_path.split(':'):
files += [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xpck')]
if 'XPCK_PATH' in os.environ:
for path in os.environ['XPCK_PATH'].split(':'):
files += [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xpck')]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(HTML_HEADER)
self.wfile.write('<h2>Running</h2>')
# display running experiments
if ExperimentRequestHandler.running:
self.wfile.write('<table>')
self.wfile.write('<tr>')
self.wfile.write('<th>Experiment</th>')
self.wfile.write('<th>Hostname</th>')
self.wfile.write('<th>Status</th>')
self.wfile.write('<th>Progress</th>')
self.wfile.write('<th>Start</th>')
self.wfile.write('<th>Comment</th>')
self.wfile.write('</tr>')
# sort ids by start time of experiment
times = [instance['time'] for instance in ExperimentRequestHandler.running.values()]
ids = ExperimentRequestHandler.running.keys()
ids = [ids[i] for i in argsort(times)][::-1]
for id in ids:
instance = ExperimentRequestHandler.running[id]
num_bars = int(instance['progress']) * max_bars / 100
self.wfile.write('<tr>')
self.wfile.write('<td class="filepath"><a href="/running/{1}/">{0}</a></td>'.format(
instance['script_path'], instance['id']))
self.wfile.write('<td>{0}</td>'.format(instance['hostname']))
self.wfile.write('<td class="running">{0}</td>'.format(instance['status']))
self.wfile.write('<td class="progress"><span class="bars">{0}</span>{1}</td>'.format(
'|' * num_bars, '|' * (max_bars - num_bars)))
self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
localtime(instance['time']))))
self.wfile.write('<td class="comment">{0}</td>'.format(
instance['comment'] if instance['comment'] else '-'))
self.wfile.write('</tr>')
self.wfile.write('</table>')
else:
self.wfile.write('No running experiments.')
self.wfile.write('<h2>Saved</h2>')
# display saved experiments
if ExperimentRequestHandler.finished:
self.wfile.write('<table>')
self.wfile.write('<tr>')
self.wfile.write('<th>Results</th>')
self.wfile.write('<th>Status</th>')
self.wfile.write('<th>Progress</th>')
self.wfile.write('<th>Start</th>')
self.wfile.write('<th>End</th>')
self.wfile.write('<th>Comment</th>')
self.wfile.write('</tr>')
# sort ids by start time of experiment
times = [instance['time'] + instance['duration']
for instance in ExperimentRequestHandler.finished.values()]
ids = ExperimentRequestHandler.finished.keys()
ids = [ids[i] for i in argsort(times)][::-1]
for id in ids:
instance = ExperimentRequestHandler.finished[id]
if id in ExperimentRequestHandler.running:
progress = ExperimentRequestHandler.running[id]['progress']
else:
progress = 100
num_bars = int(progress) * max_bars / 100
self.wfile.write('<tr>')
self.wfile.write('<td class="filepath"><a href="/finished/{1}/">{0}</a></td>'.format(
instance['filename'], instance['id']))
self.wfile.write('<td class="finished">saved</td>')
self.wfile.write('<td class="progress"><span class="bars">{0}</span>{1}</td>'.format(
'|' * num_bars, '|' * (max_bars - num_bars)))
self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
localtime(instance['time']))))
self.wfile.write('<td>{0}</td>'.format(strftime('%a, %d %b %Y %H:%M:%S',
localtime(instance['time'] + instance['duration']))))
self.wfile.write('<td class="comment">{0}</td>'.format(
instance['comment'] if instance['comment'] else '-'))
self.wfile.write('</tr>')
self.wfile.write('</table>')
else:
self.wfile.write('No saved experiments.')
self.wfile.write(HTML_FOOTER)
def do_POST(self):
instances = ExperimentRequestHandler.running
instance = eval(self.rfile.read(int(self.headers['Content-Length'])))
if instance['status'] is 'PROGRESS':
if instance['id'] not in instances:
instances[instance['id']] = instance
instances[instance['id']]['status'] = 'running'
instances[instance['id']]['progress'] = instance['progress']
elif instance['status'] is 'SAVE':
ExperimentRequestHandler.finished[instance['id']] = instance
ExperimentRequestHandler.finished[instance['id']]['status'] = 'saved'
else:
if instance['id'] in instances:
progress = instances[instance['id']]['progress']
else:
progress = 0
instances[instance['id']] = instance
instances[instance['id']]['progress'] = progress
if instance['status'] is None:
try:
del instances[instance['id']]
except:
pass
class XUnpickler(Unpickler):
"""
An extension of the Unpickler class which resolves some backwards
compatibility issues of Numpy.
"""
def find_class(self, module, name):
"""
Helps Unpickler to find certain Numpy modules.
"""
try:
numpy_version = StrictVersion(numpy.__version__)
if numpy_version >= '1.5.0':
if module == 'numpy.core.defmatrix':
module = 'numpy.matrixlib.defmatrix'
except ValueError:
pass
return Unpickler.find_class(self, module, name)
def load(file):
return XUnpickler(file).load()
def main(argv):
"""
Load and display experiment information.
"""
if len(argv) < 2:
print 'Usage:', argv[0], '[--server] [--port=<port>] [--path=<path>] [filename]'
return 0
optlist, argv = getopt(argv[1:], '', ['server', 'port=', 'path='])
optlist = dict(optlist)
if '--server' in optlist:
try:
ExperimentRequestHandler.xpck_path = optlist.get('--path', '')
port = optlist.get('--port', 8000)
# start server
server = HTTPServer(('', port), ExperimentRequestHandler)
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
return 0
# load experiment
experiment = Experiment(sys.argv[1])
if len(argv) > 1:
# print arguments
for arg in argv[1:]:
try:
print experiment[arg]
except:
print experiment[int(arg)]
return 0
# print summary of experiment
print experiment
return 0
HTML_HEADER = '''<html>
<head>
<title>Experiments</title>
<style type="text/css">
body {
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 11pt;
color: black;
background: white;
padding: 0pt 20pt;
}
h2 {
margin-top: 20pt;
font-size: 16pt;
}
table {
border-collapse: collapse;
}
tr:nth-child(even) {
background: #f4f4f4;
}
th {
font-size: 12pt;
text-align: left;
padding: 2pt 10pt 3pt 0pt;
}
td {
font-size: 10pt;
padding: 3pt 10pt 2pt 0pt;
}
pre {
font-size: 10pt;
background: #f4f4f4;
padding: 5pt;
}
a {
text-decoration: none;
color: #04a;
}
.running {
color: #08b;
}
.finished {
color: #390;
}
.comment {
min-width: 200pt;
font-style: italic;
}
.progress {
color: #ccc;
}
.progress .bars {
color: black;
}
</style>
</head>
<body>'''
HTML_FOOTER = '''
</body>
</html>'''
if __name__ == '__main__':
sys.exit(main(sys.argv))
| jonasrauber/c2s | c2s/experiment.py | Python | mit | 21,962 |
import traceback
class EnsureExceptionHandledGuard:
"""Helper for ensuring that Future's exceptions were handled.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _EnsureExceptionHandledGuard,
and then the _EnsureExceptionHandledGuard would be included in a cycle,
which is what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ['exc', 'tb', 'hndl', 'cls']
def __init__(self, exc, handler):
self.exc = exc
self.hndl = handler
self.cls = type(exc)
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
self.hndl(self.cls, self.tb)
| mikhtonyuk/rxpython | concurrent/futures/cooperative/ensure_exception_handled.py | Python | mit | 3,261 |
__package__ = 'archivebox.core'
| pirate/bookmark-archiver | archivebox/core/__init__.py | Python | mit | 32 |
import logging.handlers
import os
_pabotlog = logging.getLogger('PABot')
_pabotlog.setLevel(logging.DEBUG)
_logPath = os.path.abspath("./logging/pabot.log")
_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
_consoleStreamHandler = logging.StreamHandler()
_consoleStreamHandler.setLevel(logging.DEBUG)
_consoleStreamHandler.setFormatter(_formatter)
_symLogRotFileHandler = logging.handlers.RotatingFileHandler(_logPath, maxBytes=2000000, backupCount=5)
_symLogRotFileHandler.setLevel(logging.DEBUG)
_symLogRotFileHandler.setFormatter(_formatter)
_pabotlog.addHandler(_consoleStreamHandler)
_pabotlog.addHandler(_symLogRotFileHandler)
def LogPABotMessage(message):
_pabotlog.info(message)
def LogPABotError(message):
_pabotlog.error(message)
| KevinJMcGrath/Symphony-Ares | modules/plugins/PABot/logging.py | Python | mit | 796 |
import ast
import heisenberg.library.heisenberg_dynamics_context
import heisenberg.library.orbit_plot
import heisenberg.option_parser
import heisenberg.plot
import heisenberg.util
import matplotlib
import numpy as np
import sys
# https://github.com/matplotlib/matplotlib/issues/5907 says this should fix "Exceeded cell block limit" problems
matplotlib.rcParams['agg.path.chunksize'] = 10000
dynamics_context = heisenberg.library.heisenberg_dynamics_context.Numeric()
op = heisenberg.option_parser.OptionParser(module=heisenberg.plot)
# Add the subprogram-specific options here.
op.add_option(
'--initial-preimage',
dest='initial_preimage',
type='string',
help='Specifies the preimage of the initial conditions with respect to the embedding map specified by the --embedding-dimension and --embedding-solution-sheet-index option values. Should have the form [x_1,...,x_n], where n is the embedding dimension and x_i is a floating point literal for each i.'
)
op.add_option(
'--initial',
dest='initial',
type='string',
help='Specifies the initial conditions [x,y,z,p_x,p_y,p_z], where each of x,y,z,p_x,p_y,p_z are floating point literals.'
)
op.add_option(
'--optimization-iterations',
dest='optimization_iterations',
default=1000,
type='int',
help='Specifies the number of iterations to run the optimization for (if applicable). Default is 1000.'
)
op.add_option(
'--optimize-initial',
dest='optimize_initial',
action='store_true',
default=False,
help='Indicates that the specified initial condition (via whichever of the --initial... options) should be used as the starting point for an optimization to attempt to close the orbit. Default value is False.'
)
op.add_option(
'--output-dir',
dest='output_dir',
default='.',
help='Specifies the directory to write plot images and data files to. Default is current directory.'
)
op.add_option(
'--disable-plot-initial',
dest='disable_plot_initial',
action='store_true',
default=False,
help='Disables plotting the initial curve; only has effect if --optimize-initial is specified.'
)
options,args = op.parse_argv_and_validate()
if options is None:
sys.exit(-1)
num_initial_conditions_specified = sum([
options.initial_preimage is not None,
options.initial is not None,
])
if num_initial_conditions_specified != 1:
print('Some initial condition option must be specified; --initial-preimage, --initial. However, {0} of those were specified.'.format(num_initial_conditions_specified))
op.print_help()
sys.exit(-1)
# Validate subprogram-specific options here.
# Attempt to parse initial conditions. Upon success, the attribute options.qp_0 should exist.
if options.initial_preimage is not None:
try:
options.initial_preimage = np.array(ast.literal_eval(options.initial_preimage))
expected_shape = (options.embedding_dimension,)
if options.initial_preimage.shape != expected_shape:
raise ValueError('--initial-preimage value had the wrong number of components (got {0} but expected {1}).'.format(options.initial_preimage.shape, expected_shape))
options.qp_0 = dynamics_context.embedding(N=options.embedding_dimension, sheet_index=options.embedding_solution_sheet_index)(options.initial_preimage)
except Exception as e:
print('error parsing --initial-preimage value; error was {0}'.format(e))
op.print_help()
sys.exit(-1)
elif options.initial is not None:
try:
options.initial = heisenberg.util.csv_as_ndarray(heisenberg.util.pop_brackets_off_of(options.initial), float)
expected_shape = (6,)
if options.initial.shape != expected_shape:
raise ValueError('--initial value had the wrong number of components (got {0} but expected {1}).'.format(options.initial.shape, expected_shape))
options.qp_0 = options.initial.reshape(2,3)
except ValueError as e:
print('error parsing --initial value: {0}'.format(str(e)))
op.print_help()
sys.exit(-1)
else:
assert False, 'this should never happen because of the check with num_initial_conditions_specified'
rng = np.random.RandomState(options.seed)
heisenberg.plot.plot(dynamics_context, options, rng=rng)
| vdods/heisenberg | heisenberg/plot/__main__.py | Python | mit | 4,283 |
def send_simple_message():
return requests.post(
"https://api.mailgun.net/v3/sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org/messages",
auth=("api", "key-679dc79b890e700f11f001a6bf86f4a1"),
data={"from": "Mailgun Sandbox <postmaster@sandbox049ff464a4d54974bb0143935f9577ef.mailgun.org>",
"to": "nick <nicorellius@gmail.com>",
"subject": "Hello nick",
"text": "Congratulations nick, you just sent an email with Mailgun! You are truly awesome! You can see a record of this email in your logs: https://mailgun.com/cp/log . You can send up to 300 emails/day from this sandbox server. Next, you should add your own domain so you can send 10,000 emails/month for free."})
# cURL command to send mail aith API key
# curl -s --user 'api:key-679dc79b890e700f11f001a6bf86f4a1' \
# https://api.mailgun.net/v3/mail.pdxpixel.com/messages \
# -F from='Excited User <mailgun@pdxpixel.com>' \
# -F to=nick@pdxpixel.com \
# -F subject='Hello' \
# -F text='Testing some Mailgun awesomness!'
| nicorellius/pdxpixel | pdxpixel/core/mailgun.py | Python | mit | 1,073 |
def load_keys(filepath):
"""
Loads the Twitter API keys into a dict.
:param filepath: file path to config file with Twitter API keys.
:return: keys_dict
:raise: IOError
"""
try:
keys_file = open(filepath, 'rb')
keys = {}
for line in keys_file:
key, value = line.split('=')
keys[key.strip()] = value.strip()
except IOError:
message = ('File {} cannot be opened.'
' Check that it exists and is binary.')
print message.format(filepath)
raise
except:
print "Error opening or unpickling file."
raise
return keys
| nhatbui/LebronCoin | lebroncoin/key_loader.py | Python | mit | 654 |
"""Main entry points for scripts."""
from __future__ import print_function, division
from argparse import ArgumentParser
from collections import OrderedDict
from copy import copy
from datetime import datetime
import glob
import json
import logging
import math
import os
import scipy.stats
import numpy as np
from .version import __version__
from .psffuncs import gaussian_moffat_psf
from .psf import TabularPSF, GaussianMoffatPSF
from .io import read_datacube, write_results, read_results
from .fitting import (guess_sky, fit_galaxy_single, fit_galaxy_sky_multi,
fit_position_sky, fit_position_sky_sn_multi,
RegularizationPenalty)
from .utils import yxbounds
from .extern import ADR, Hyper_PSF3D_PL
__all__ = ["cubefit", "cubefit_subtract", "cubefit_plot"]
MODEL_SHAPE = (32, 32)
SPAXEL_SIZE = 0.43
MIN_NMAD = 2.5 # Minimum Number of Median Absolute Deviations above
# the minimum spaxel value in fit_position
LBFGSB_FACTOR = 1e10
REFWAVE = 5000. # reference wavelength in Angstroms for PSF params and ADR
POSITION_BOUND = 3. # Bound on fitted positions relative in initial positions
def snfpsf(wave, psfparams, header, psftype):
"""Create a 3-d PSF based on SNFactory-specific parameterization of
Gaussian + Moffat PSF parameters and ADR."""
# Get Gaussian+Moffat parameters at each wavelength.
relwave = wave / REFWAVE - 1.0
ellipticity = abs(psfparams[0]) * np.ones_like(wave)
alpha = np.abs(psfparams[1] +
psfparams[2] * relwave +
psfparams[3] * relwave**2)
# correlated parameters (coefficients determined externally)
sigma = 0.545 + 0.215 * alpha # Gaussian parameter
beta = 1.685 + 0.345 * alpha # Moffat parameter
eta = 1.040 + 0.0 * alpha # gaussian ampl. / moffat ampl.
# Atmospheric differential refraction (ADR): Because of ADR,
# the center of the PSF will be different at each wavelength,
# by an amount that we can determine (pretty well) from the
# atmospheric conditions and the pointing and angle of the
# instrument. We calculate the offsets here as a function of
# observation and wavelength and input these to the model.
# Correction to parallactic angle and airmass for 2nd-order effects
# such as MLA rotation, mechanical flexures or finite-exposure
# corrections. These values have been trained on faint-std star
# exposures.
#
# `predict_adr_params` uses 'AIRMASS', 'PARANG' and 'CHANNEL' keys
# in input dictionary.
delta, theta = Hyper_PSF3D_PL.predict_adr_params(header)
# check for crazy values of pressure and temperature, and assign default
# values.
pressure = header.get('PRESSURE', 617.)
if not 550. < pressure < 650.:
pressure = 617.
temp = header.get('TEMP', 2.)
if not -20. < temp < 20.:
temp = 2.
adr = ADR(pressure, temp, lref=REFWAVE, delta=delta, theta=theta)
adr_refract = adr.refract(0, 0, wave, unit=SPAXEL_SIZE)
# adr_refract[0, :] corresponds to x, adr_refract[1, :] => y
xctr, yctr = adr_refract
if psftype == 'gaussian-moffat':
return GaussianMoffatPSF(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
elif psftype == 'tabular':
A = gaussian_moffat_psf(sigma, alpha, beta, ellipticity, eta,
yctr, xctr, MODEL_SHAPE, subpix=3)
return TabularPSF(A)
else:
raise ValueError("unknown psf type: " + repr(psftype))
def setup_logging(loglevel, logfname=None):
# if loglevel isn't an integer, parse it as "debug", "info", etc:
if not isinstance(loglevel, int):
loglevel = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel, int):
print('Invalid log level: %s' % loglevel)
exit(1)
# remove logfile if it already exists
if logfname is not None and os.path.exists(logfname):
os.remove(logfname)
logging.basicConfig(filename=logfname, format="%(levelname)s %(message)s",
level=loglevel)
def cubefit(argv=None):
DESCRIPTION = "Fit SN + galaxy model to SNFactory data cubes."
parser = ArgumentParser(prog="cubefit", description=DESCRIPTION)
parser.add_argument("configfile",
help="configuration file name (JSON format)")
parser.add_argument("outfile", help="Output file name (FITS format)")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument("--logfile", help="Write log to this file "
"(default: print to stdout)", default=None)
parser.add_argument("--loglevel", default="info",
help="one of: debug, info, warning (default is info)")
parser.add_argument("--diagdir", default=None,
help="If given, write intermediate diagnostic results "
"to this directory")
parser.add_argument("--refitgal", default=False, action="store_true",
help="Add an iteration where galaxy model is fit "
"using all epochs and then data/SN positions are "
"refit")
parser.add_argument("--mu_wave", default=0.07, type=float,
help="Wavelength regularization parameter. "
"Default is 0.07.")
parser.add_argument("--mu_xy", default=0.001, type=float,
help="Spatial regularization parameter. "
"Default is 0.001.")
parser.add_argument("--psftype", default="gaussian-moffat",
help="Type of PSF: 'gaussian-moffat' or 'tabular'. "
"Currently, tabular means generate a tabular PSF from "
"gaussian-moffat parameters.")
args = parser.parse_args(argv)
setup_logging(args.loglevel, logfname=args.logfile)
# record start time
tstart = datetime.now()
logging.info("cubefit v%s started at %s", __version__,
tstart.strftime("%Y-%m-%d %H:%M:%S"))
tsteps = OrderedDict() # finish time of each step.
logging.info("parameters: mu_wave={:.3g} mu_xy={:.3g} refitgal={}"
.format(args.mu_wave, args.mu_xy, args.refitgal))
logging.info(" psftype={}".format(args.psftype))
logging.info("reading config file")
with open(args.configfile) as f:
cfg = json.load(f)
# basic checks on config contents.
assert (len(cfg["filenames"]) == len(cfg["xcenters"]) ==
len(cfg["ycenters"]) == len(cfg["psf_params"]))
# -------------------------------------------------------------------------
# Load data cubes from the list of FITS files.
nt = len(cfg["filenames"])
logging.info("reading %d data cubes", nt)
cubes = []
for fname in cfg["filenames"]:
logging.debug(" reading %s", fname)
cubes.append(read_datacube(os.path.join(args.dataprefix, fname)))
wave = cubes[0].wave
nw = len(wave)
# assign some local variables for convenience
refs = cfg["refs"]
master_ref = cfg["master_ref"]
if master_ref not in refs:
raise ValueError("master ref choice must be one of the final refs (" +
" ".join(refs.astype(str)) + ")")
nonmaster_refs = [i for i in refs if i != master_ref]
nonrefs = [i for i in range(nt) if i not in refs]
# Ensure that all cubes have the same wavelengths.
if not all(np.all(cubes[i].wave == wave) for i in range(1, nt)):
raise ValueError("all data must have same wavelengths")
# -------------------------------------------------------------------------
# PSF for each observation
logging.info("setting up PSF for all %d epochs", nt)
psfs = [snfpsf(wave, cfg["psf_params"][i], cubes[i].header, args.psftype)
for i in range(nt)]
# -------------------------------------------------------------------------
# Initialize all model parameters to be fit
yctr0 = np.array(cfg["ycenters"])
xctr0 = np.array(cfg["xcenters"])
galaxy = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
sn = np.zeros((nt, nw), dtype=np.float64) # SN spectrum at each epoch
skys = np.zeros((nt, nw), dtype=np.float64) # Sky spectrum at each epoch
yctr = yctr0.copy()
xctr = xctr0.copy()
snctr = (0., 0.)
# For writing out to FITS
modelwcs = {"CRVAL1": -SPAXEL_SIZE * (MODEL_SHAPE[0] - 1) / 2.,
"CRPIX1": 1,
"CDELT1": SPAXEL_SIZE,
"CRVAL2": -SPAXEL_SIZE * (MODEL_SHAPE[1] - 1) / 2.,
"CRPIX2": 1,
"CDELT2": SPAXEL_SIZE,
"CRVAL3": cubes[0].header["CRVAL3"],
"CRPIX3": cubes[0].header["CRPIX3"],
"CDELT3": cubes[0].header["CDELT3"]}
# -------------------------------------------------------------------------
# Position bounds
# Bounds on data position: shape=(nt, 2)
xctrbounds = np.vstack((xctr - POSITION_BOUND, xctr + POSITION_BOUND)).T
yctrbounds = np.vstack((yctr - POSITION_BOUND, yctr + POSITION_BOUND)).T
snctrbounds = (-POSITION_BOUND, POSITION_BOUND)
# For data positions, check that bounds do not extend
# past the edge of the model and adjust the minbound and maxbound.
# This doesn't apply to SN position.
gshape = galaxy.shape[1:3] # model shape
for i in range(nt):
dshape = cubes[i].data.shape[1:3]
(yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)
yctrbounds[i, 0] = max(yctrbounds[i, 0], yminabs)
yctrbounds[i, 1] = min(yctrbounds[i, 1], ymaxabs)
xctrbounds[i, 0] = max(xctrbounds[i, 0], xminabs)
xctrbounds[i, 1] = min(xctrbounds[i, 1], xmaxabs)
# -------------------------------------------------------------------------
# Guess sky
logging.info("guessing sky for all %d epochs", nt)
for i, cube in enumerate(cubes):
skys[i, :] = guess_sky(cube, npix=30)
# -------------------------------------------------------------------------
# Regularization penalty parameters
# Calculate rough average galaxy spectrum from all final refs.
spectra = np.zeros((len(refs), len(wave)), dtype=np.float64)
for j, i in enumerate(refs):
avg_spec = np.average(cubes[i].data, axis=(1, 2)) - skys[i]
mean_spec, bins, bn = scipy.stats.binned_statistic(wave, avg_spec,
bins=len(wave)/10)
spectra[j] = np.interp(wave, bins[:-1] + np.diff(bins)[0]/2.,
mean_spec)
mean_gal_spec = np.average(spectra, axis=0)
# Ensure that there won't be any negative or tiny values in mean:
mean_floor = 0.1 * np.median(mean_gal_spec)
mean_gal_spec[mean_gal_spec < mean_floor] = mean_floor
galprior = np.zeros((nw, MODEL_SHAPE[0], MODEL_SHAPE[1]), dtype=np.float64)
regpenalty = RegularizationPenalty(galprior, mean_gal_spec, args.mu_xy,
args.mu_wave)
tsteps["setup"] = datetime.now()
# -------------------------------------------------------------------------
# Fit just the galaxy model to just the master ref.
data = cubes[master_ref].data - skys[master_ref, :, None, None]
weight = cubes[master_ref].weight
logging.info("fitting galaxy to master ref [%d]", master_ref)
galaxy = fit_galaxy_single(galaxy, data, weight,
(yctr[master_ref], xctr[master_ref]),
psfs[master_ref], regpenalty, LBFGSB_FACTOR)
if args.diagdir:
fname = os.path.join(args.diagdir, 'step1.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
tsteps["fit galaxy to master ref"] = datetime.now()
# -------------------------------------------------------------------------
# Fit the positions of the other final refs
#
# Here we only use spaxels where the *model* has significant flux.
# We define "significant" as some number of median absolute deviations
# (MAD) above the minimum flux in the model. We (temporarily) set the
# weight of "insignificant" spaxels to zero during this process, then
# restore the original weight after we're done.
#
# If there are less than 20 "significant" spaxels, we do not attempt to
# fit the position, but simply leave it as is.
logging.info("fitting position of non-master refs %s", nonmaster_refs)
for i in nonmaster_refs:
cube = cubes[i]
# Evaluate galaxy on this epoch for purpose of masking spaxels.
gal = psfs[i].evaluate_galaxy(galaxy, (cube.ny, cube.nx),
(yctr[i], xctr[i]))
# Set weight of low-valued spaxels to zero.
gal2d = gal.sum(axis=0) # Sum of gal over wavelengths
mad = np.median(np.abs(gal2d - np.median(gal2d)))
mask = gal2d > np.min(gal2d) + MIN_NMAD * mad
if mask.sum() < 20:
continue
weight = cube.weight * mask[None, :, :]
fctr, fsky = fit_position_sky(galaxy, cube.data, weight,
(yctr[i], xctr[i]), psfs[i],
(yctrbounds[i], xctrbounds[i]))
yctr[i], xctr[i] = fctr
skys[i, :] = fsky
tsteps["fit positions of other refs"] = datetime.now()
# -------------------------------------------------------------------------
# Redo model fit, this time including all final refs.
datas = [cubes[i].data for i in refs]
weights = [cubes[i].weight for i in refs]
ctrs = [(yctr[i], xctr[i]) for i in refs]
psfs_refs = [psfs[i] for i in refs]
logging.info("fitting galaxy to all refs %s", refs)
galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
psfs_refs, regpenalty, LBFGSB_FACTOR)
# put fitted skys back in `skys`
for i,j in enumerate(refs):
skys[j, :] = fskys[i]
if args.diagdir:
fname = os.path.join(args.diagdir, 'step2.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
tsteps["fit galaxy to all refs"] = datetime.now()
# -------------------------------------------------------------------------
# Fit position of data and SN in non-references
#
# Now we think we have a good galaxy model. We fix this and fit
# the relative position of the remaining epochs (which presumably
# all have some SN light). We simultaneously fit the position of
# the SN itself.
logging.info("fitting position of all %d non-refs and SN position",
len(nonrefs))
if len(nonrefs) > 0:
datas = [cubes[i].data for i in nonrefs]
weights = [cubes[i].weight for i in nonrefs]
psfs_nonrefs = [psfs[i] for i in nonrefs]
fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
xctrbounds[nonrefs], snctrbounds)
# put fitted results back in parameter lists.
yctr[nonrefs] = fyctr
xctr[nonrefs] = fxctr
for i,j in enumerate(nonrefs):
skys[j, :] = fskys[i]
sn[j, :] = fsne[i]
tsteps["fit positions of nonrefs & SN"] = datetime.now()
# -------------------------------------------------------------------------
# optional step(s)
if args.refitgal and len(nonrefs) > 0:
if args.diagdir:
fname = os.path.join(args.diagdir, 'step3.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
# ---------------------------------------------------------------------
# Redo fit of galaxy, using ALL epochs, including ones with SN
# light. We hold the SN "fixed" simply by subtracting it from the
# data and fitting the remainder.
#
# This is slightly dangerous: any errors in the original SN
# determination, whether due to an incorrect PSF or ADR model
# or errors in the galaxy model will result in residuals. The
# galaxy model will then try to compensate for these.
#
# We should look at the galaxy model at the position of the SN
# before and after this step to see if there is a bias towards
# the galaxy flux increasing.
logging.info("fitting galaxy using all %d epochs", nt)
datas = [cube.data for cube in cubes]
weights = [cube.weight for cube in cubes]
ctrs = [(yctr[i], xctr[i]) for i in range(nt)]
# subtract SN from non-ref cubes.
for i in nonrefs:
s = psfs[i].point_source(snctr, datas[i].shape[1:3], ctrs[i])
# do *not* use in-place operation (-=) here!
datas[i] = cubes[i].data - sn[i, :, None, None] * s
galaxy, fskys = fit_galaxy_sky_multi(galaxy, datas, weights, ctrs,
psfs, regpenalty, LBFGSB_FACTOR)
for i in range(nt):
skys[i, :] = fskys[i] # put fitted skys back in skys
if args.diagdir:
fname = os.path.join(args.diagdir, 'step4.fits')
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, fname)
# ---------------------------------------------------------------------
# Repeat step before last: fit position of data and SN in
# non-references
logging.info("re-fitting position of all %d non-refs and SN position",
len(nonrefs))
if len(nonrefs) > 0:
datas = [cubes[i].data for i in nonrefs]
weights = [cubes[i].weight for i in nonrefs]
psfs_nonrefs = [psfs[i] for i in nonrefs]
fyctr, fxctr, snctr, fskys, fsne = fit_position_sky_sn_multi(
galaxy, datas, weights, yctr[nonrefs], xctr[nonrefs],
snctr, psfs_nonrefs, LBFGSB_FACTOR, yctrbounds[nonrefs],
xctrbounds[nonrefs], snctrbounds)
# put fitted results back in parameter lists.
yctr[nonrefs] = fyctr
xctr[nonrefs] = fxctr
for i, j in enumerate(nonrefs):
skys[j, :] = fskys[i]
sn[j, :] = fsne[i]
# -------------------------------------------------------------------------
# Write results
logging.info("writing results to %s", args.outfile)
write_results(galaxy, skys, sn, snctr, yctr, xctr, yctr0, xctr0,
yctrbounds, xctrbounds, cubes, psfs, modelwcs, args.outfile)
# time info
logging.info("step times:")
maxlen = max(len(key) for key in tsteps)
fmtstr = " %2dm%02ds - %-" + str(maxlen) + "s"
tprev = tstart
for key, tstep in tsteps.items():
t = (tstep - tprev).seconds
logging.info(fmtstr, t//60, t%60, key)
tprev = tstep
tfinish = datetime.now()
logging.info("finished at %s", tfinish.strftime("%Y-%m-%d %H:%M:%S"))
t = (tfinish - tstart).seconds
logging.info("took %3dm%2ds", t // 60, t % 60)
return 0
def cubefit_subtract(argv=None):
DESCRIPTION = \
"""Subtract model determined by cubefit from the original data.
The "outnames" key in the supplied configuration file is used to
determine the output FITS file names. The input FITS header is passed
unaltered to the output file, with the following additions:
(1) A `HISTORY` entry. (2) `CBFT_SNX` and `CBFT_SNY` records giving
the cubefit-determined position of the SN relative to the center of
the data array (at the reference wavelength).
This script also writes fitted SN spectra to individual FITS files.
The "sn_outnames" configuration field determines the output filenames.
"""
import shutil
import fitsio
prog_name = "cubefit-subtract"
prog_name_ver = "{} v{}".format(prog_name, __version__)
parser = ArgumentParser(prog=prog_name, description=DESCRIPTION)
parser.add_argument("configfile", help="configuration file name "
"(JSON format), same as cubefit input.")
parser.add_argument("resultfile", help="Result FITS file from cubefit")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument("--outprefix", default="",
help="path prepended to output file names; default is "
"empty string")
args = parser.parse_args(argv)
setup_logging("info")
# get input & output filenames
with open(args.configfile) as f:
cfg = json.load(f)
fnames = [os.path.join(args.dataprefix, fname)
for fname in cfg["filenames"]]
outfnames = [os.path.join(args.outprefix, fname)
for fname in cfg["outnames"]]
# load results
results = read_results(args.resultfile)
epochs = results["epochs"]
sny, snx = results["snctr"]
if not len(epochs) == len(fnames) == len(outfnames):
raise RuntimeError("number of epochs in result file not equal to "
"number of input and output files in config file")
# subtract and write out.
for fname, outfname, epoch in zip(fnames, outfnames, epochs):
logging.info("writing %s", outfname)
shutil.copy(fname, outfname)
f = fitsio.FITS(outfname, "rw")
data = f[0].read()
data -= epoch["galeval"]
f[0].write(data)
f[0].write_history("galaxy subtracted by " + prog_name_ver)
f[0].write_key("CBFT_SNX", snx - epoch['xctr'],
comment="SN x offset from center at {:.0f} A [spaxels]"
.format(REFWAVE))
f[0].write_key("CBFT_SNY", sny - epoch['yctr'],
comment="SN y offset from center at {:.0f} A [spaxels]"
.format(REFWAVE))
f.close()
# output SN spectra to separate files.
sn_outnames = [os.path.join(args.outprefix, fname)
for fname in cfg["sn_outnames"]]
header = {"CRVAL1": results["header"]["CRVAL3"],
"CRPIX1": results["header"]["CRPIX3"],
"CDELT1": results["header"]["CDELT3"]}
for outfname, epoch in zip(sn_outnames, epochs):
logging.info("writing %s", outfname)
if os.path.exists(outfname): # avoid warning from clobber=True
os.remove(outfname)
with fitsio.FITS(outfname, "rw") as f:
f.write(epoch["sn"], extname="sn", header=header)
f[0].write_history("created by " + prog_name_ver)
return 0
def cubefit_plot(argv=None):
DESCRIPTION = """Plot results and diagnostics from cubefit"""
from .plotting import plot_timeseries, plot_epoch, plot_sn, plot_adr
# arguments are the same as cubefit except an output
parser = ArgumentParser(prog="cubefit-plot", description=DESCRIPTION)
parser.add_argument("configfile", help="configuration filename")
parser.add_argument("resultfile", help="Result filename from cubefit")
parser.add_argument("outprefix", help="output prefix")
parser.add_argument("--dataprefix", default="",
help="path prepended to data file names; default is "
"empty string")
parser.add_argument('-b', '--band', help='timeseries band (U, B, V). '
'Default is a 1000 A wide band in middle of cube.',
default=None, dest='band')
parser.add_argument('--idrfiles', nargs='+', default=None,
help='Prefix of IDR. If given, the cubefit SN '
'spectra are plotted against the production values.')
parser.add_argument("--diagdir", default=None,
help="If given, read intermediate diagnostic "
"results from this directory and include in plot(s)")
parser.add_argument("--plotepochs", default=False, action="store_true",
help="Make diagnostic plots for each epoch")
args = parser.parse_args(argv)
# Read in data
with open(args.configfile) as f:
cfg = json.load(f)
cubes = [read_datacube(os.path.join(args.dataprefix, fname), scale=False)
for fname in cfg["filenames"]]
results = OrderedDict()
# Diagnostic results at each step
if args.diagdir is not None:
fnames = sorted(glob.glob(os.path.join(args.diagdir, "step*.fits")))
for fname in fnames:
name = os.path.basename(fname).split(".")[0]
results[name] = read_results(fname)
# Final result (don't fail if not available)
if os.path.exists(args.resultfile):
results["final"] = read_results(args.resultfile)
# plot time series
plot_timeseries(cubes, results, band=args.band,
fname=(args.outprefix + '_timeseries.png'))
# Plot wave slices and sn, galaxy and sky spectra for all epochs.
if 'final' in results and args.plotepochs:
for i_t in range(len(cubes)):
plot_epoch(cubes[i_t], results['final']['epochs'][i_t],
fname=(args.outprefix + '_epoch%02d.png' % i_t))
# Plot result spectra against IDR spectra.
if 'final' in results and args.idrfiles is not None:
plot_sn(cfg['filenames'], results['final']['epochs']['sn'],
results['final']['wave'], args.idrfiles,
args.outprefix + '_sn.png')
# Plot the x-y coordinates of the adr versus wavelength
# (Skip this for now; contains no interesting information)
#plot_adr(cubes, cubes[0].wave, fname=(args.outprefix + '_adr.png'))
return 0
| snfactory/cubefit | cubefit/main.py | Python | mit | 26,267 |
# -*- coding: utf-8 -*-
def calc_note(count, value):
qnt = 0
if count >= value:
qnt = int(count) / value
print '%d nota(s) de R$ %d.00' % (qnt, value)
return count - qnt * value
n = float(raw_input())
print 'NOTAS:'
n = calc_note(n, 100)
n = calc_note(n, 50)
n = calc_note(n, 20)
n = calc_note(n, 10)
n = calc_note(n, 5)
n = calc_note(n, 2)
print 'MOEDAS:'
print '%d moeda(s) de R$ 1.00' % int(n)
n -= int(n)
m50 = n / 0.50
print '%d moeda(s) de R$ 0.50' % m50
n -= int(m50) * 0.50
m25 = n / 0.25
print '%d moeda(s) de R$ 0.25' % m25
n -= int(m25) * 0.25
m10 = n / 0.10
print '%d moeda(s) de R$ 0.10' % m10
n -= int(m10) * 0.10
if round(n, 2) >= 0.05:
print '1 moeda(s) de R$ 0.05'
m1 = (n - 0.05) * 100
else:
print '0 moeda(s) de R$ 0.05'
m1 = round(n, 2) * 100
if round(m1, 0):
print '%.0f moeda(s) de R$ 0.01' % m1
else:
print '0 moeda(s) de R$ 0.01'
| vicenteneto/online-judge-solutions | URI/1-Beginner/1021.py | Python | mit | 907 |
import datetime
import time
import boto
import redis
import requests
import random
import zlib
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import IntegrityError
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404
from django.conf import settings
from django.core.mail import mail_admins
from django.core.validators import email_re
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.recommendations.models import RecommendedFeed
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts
from apps.search.models import MUserSearch
from apps.statistics.models import MStatistics
# from apps.search.models import SearchStarredStory
try:
from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory
except:
pass
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from apps.social.models import MSocialSubscription, MActivity, MInteraction
from apps.categories.models import MCategory
from apps.social.views import load_social_page
from apps.rss_feeds.tasks import ScheduleImmediateFetches
from utils import json_functions as json
from utils.user_functions import get_user, ajax_login_required
from utils.feed_functions import relative_timesince
from utils.story_functions import format_story_link_date__short
from utils.story_functions import format_story_link_date__long
from utils.story_functions import strip_tags
from utils import log as logging
from utils.view_functions import get_argument_or_404, render_to, is_true
from utils.view_functions import required_params
from utils.ratelimit import ratelimit
from vendor.timezones.utilities import localtime_for_timezone
BANNED_URLS = [
"brentozar.com",
]
@never_cache
@render_to('reader/dashboard.xhtml')
def index(request, **kwargs):
if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']:
username = request.subdomain
try:
if '.' in username:
username = username.split('.')[0]
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
return HttpResponseRedirect('http://%s%s' % (
Site.objects.get_current().domain,
reverse('index')))
return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs)
if request.user.is_anonymous():
return welcome(request, **kwargs)
else:
return dashboard(request, **kwargs)
def dashboard(request, **kwargs):
user = request.user
feed_count = UserSubscription.objects.filter(user=request.user).count()
recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
approved_date__lte=datetime.datetime.now()
).select_related('feed')[:2]
unmoderated_feeds = []
if user.is_staff:
unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
declined_date__isnull=True
).select_related('feed')[:2]
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
start_import_from_google_reader = request.session.get('import_from_google_reader', False)
if start_import_from_google_reader:
del request.session['import_from_google_reader']
if not user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
logging.user(request, "~FBLoading dashboard")
return {
'user_profile' : user.profile,
'feed_count' : feed_count,
'account_images' : range(1, 4),
'recommended_feeds' : recommended_feeds,
'unmoderated_feeds' : unmoderated_feeds,
'statistics' : statistics,
'social_profile' : social_profile,
'start_import_from_google_reader': start_import_from_google_reader,
'debug' : settings.DEBUG,
}, "reader/dashboard.xhtml"
def welcome(request, **kwargs):
user = get_user(request)
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
if request.method == "POST":
if request.POST.get('submit', '').startswith('log'):
login_form = LoginForm(request.POST, prefix='login')
signup_form = SignupForm(prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(request.POST, prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(prefix='signup')
logging.user(request, "~FBLoading welcome")
return {
'user_profile' : hasattr(user, 'profile') and user.profile,
'login_form' : login_form,
'signup_form' : signup_form,
'statistics' : statistics,
'social_profile' : social_profile,
'post_request' : request.method == 'POST',
}, "reader/welcome.xhtml"
@never_cache
def login(request):
code = -1
message = ""
if request.method == "POST":
form = LoginForm(request.POST, prefix='login')
if form.is_valid():
login_user(request, form.get_user())
if request.POST.get('api'):
logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW")
code = 1
else:
logging.user(form.get_user(), "~FG~BBLogin~FW")
return HttpResponseRedirect(reverse('index'))
else:
message = form.errors.items()[0][1][0]
if request.POST.get('api'):
return HttpResponse(json.encode(dict(code=code, message=message)), mimetype='application/json')
else:
return index(request)
@never_cache
def signup(request):
if request.method == "POST":
form = SignupForm(prefix='signup', data=request.POST)
if form.is_valid():
new_user = form.save()
login_user(request, new_user)
logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
if not new_user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
return index(request)
@never_cache
def logout(request):
logging.user(request, "~FG~BBLogout~FW")
logout_user(request)
if request.GET.get('api'):
return HttpResponse(json.encode(dict(code=1)), mimetype='application/json')
else:
return HttpResponseRedirect(reverse('index'))
def autologin(request, username, secret):
next = request.GET.get('next', '')
if not username or not secret:
return HttpResponseForbidden()
profile = Profile.objects.filter(user__username=username, secret_token=secret)
if not profile:
return HttpResponseForbidden()
user = profile[0].user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',))
if next and not next.startswith('/'):
next = '?next=' + next
return HttpResponseRedirect(reverse('index') + next)
elif next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse('index'))
@ratelimit(minutes=1, requests=24)
@never_cache
@json.json_view
def load_feeds(request):
user = get_user(request)
feeds = {}
include_favicons = request.REQUEST.get('include_favicons', False)
flat = request.REQUEST.get('flat', False)
update_counts = request.REQUEST.get('update_counts', False)
version = int(request.REQUEST.get('v', 1))
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if flat == 'false': flat = False
if flat: return load_feeds_flat(request)
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
for sub in user_subs:
pk = sub.feed_id
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[pk] = sub.canonical(include_favicon=include_favicons)
if not sub.active: continue
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % (
len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
'feeds': feeds.values() if version == 2 else feeds,
'social_feeds': social_feeds,
'social_profile': social_profile,
'social_services': social_services,
'user_profile': user.profile,
"is_staff": user.is_staff,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
'starred_counts': starred_counts,
'categories': categories
}
return data
@json.json_view
def load_feed_favicons(request):
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_ids')
if not feed_ids:
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [sub['feed__pk'] for sub in user_subs.values('feed__pk')]
feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)])
return feed_icons
def load_feeds_flat(request):
user = request.user
include_favicons = is_true(request.REQUEST.get('include_favicons', False))
update_counts = is_true(request.REQUEST.get('update_counts', True))
feeds = {}
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
iphone_version = "2.1"
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if not user.is_authenticated():
return HttpResponseForbidden()
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
folders = []
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if not user_subs and folders:
folders.auto_activate()
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
for sub in user_subs:
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
flat_folders = []
if folders:
flat_folders = folders.flatten_folders(feeds=feeds)
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials ~FMflat~FB%s" % (
len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
"flat_folders": flat_folders,
"feeds": feeds,
"social_feeds": social_feeds,
"social_profile": social_profile,
"social_services": social_services,
"user": user.username,
"is_staff": user.is_staff,
"user_profile": user.profile,
"iphone_version": iphone_version,
"categories": categories,
'starred_count': starred_count,
'starred_counts': starred_counts,
}
return data
@ratelimit(minutes=1, requests=10)
@never_cache
@json.json_view
def refresh_feeds(request):
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_id')
check_fetch_status = request.REQUEST.get('check_fetch_status')
favicons_fetching = request.REQUEST.getlist('favicons_fetching')
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids or (not social_feed_ids and not feed_ids):
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids,
check_fetch_status=check_fetch_status)
social_feeds = {}
if social_feed_ids or (not social_feed_ids and not feed_ids):
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
favicons_fetching = [int(f) for f in favicons_fetching if f]
feed_icons = {}
if favicons_fetching:
feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)])
for feed_id, feed in feeds.items():
if feed_id in favicons_fetching and feed_id in feed_icons:
feeds[feed_id]['favicon'] = feed_icons[feed_id].data
feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color
feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching')
user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed')
sub_feed_ids = [s.feed_id for s in user_subs]
if favicons_fetching:
moved_feed_ids = [f for f in favicons_fetching if f not in sub_feed_ids]
for moved_feed_id in moved_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=moved_feed_id)
if duplicate_feeds and duplicate_feeds[0].feed.pk in feeds:
feeds[moved_feed_id] = feeds[duplicate_feeds[0].feed_id]
feeds[moved_feed_id]['dupe_feed_id'] = duplicate_feeds[0].feed_id
if check_fetch_status:
missing_feed_ids = list(set(feed_ids) - set(sub_feed_ids))
if missing_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id__in=missing_feed_ids)
for duplicate_feed in duplicate_feeds:
feeds[duplicate_feed.duplicate_feed_id] = {'id': duplicate_feed.feed_id}
interactions_count = MInteraction.user_unread_count(user.pk)
if True or settings.DEBUG or check_fetch_status:
logging.user(request, "~FBRefreshing %s feeds (%s/%s)" % (
len(feeds.keys()), check_fetch_status, len(favicons_fetching)))
return {
'feeds': feeds,
'social_feeds': social_feeds,
'interactions_count': interactions_count,
}
@json.json_view
def interactions_count(request):
user = get_user(request)
interactions_count = MInteraction.user_unread_count(user.pk)
return {
'interactions_count': interactions_count,
}
@never_cache
@ajax_login_required
@json.json_view
def feed_unread_count(request):
user = request.user
feed_ids = request.REQUEST.getlist('feed_id')
force = request.REQUEST.get('force', False)
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids:
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, force=force)
social_feeds = {}
if social_feed_ids:
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
if len(feed_ids) == 1:
if settings.DEBUG:
feed_title = Feed.get_by_id(feed_ids[0]).feed_title
else:
feed_title = feed_ids[0]
elif len(social_feed_ids) == 1:
feed_title = MSocialProfile.objects.get(user_id=social_feed_ids[0].replace('social:', '')).username
else:
feed_title = "%s feeds" % (len(feeds) + len(social_feeds))
logging.user(request, "~FBUpdating unread count on: %s" % feed_title)
return {'feeds': feeds, 'social_feeds': social_feeds}
def refresh_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
feed = feed.update(force=True, compute_scores=False)
usersub = UserSubscription.objects.get(user=user, feed=feed)
usersub.calculate_feed_scores(silent=False)
logging.user(request, "~FBRefreshing feed: %s" % feed)
return load_single_feed(request, feed_id)
@never_cache
@json.json_view
def load_single_feed(request, feed_id):
start = time.time()
user = get_user(request)
# offset = int(request.REQUEST.get('offset', 0))
# limit = int(request.REQUEST.get('limit', 6))
limit = 6
page = int(request.REQUEST.get('page', 1))
offset = limit * (page-1)
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'all')
query = request.REQUEST.get('query')
include_story_content = is_true(request.REQUEST.get('include_story_content', True))
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
message = None
user_search = None
dupe_feed_id = None
user_profiles = []
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
if not feed_id: raise Http404
feed_address = request.REQUEST.get('feed_address')
feed = Feed.get_by_id(feed_id, feed_address=feed_address)
if not feed:
raise Http404
try:
usersub = UserSubscription.objects.get(user=user, feed=feed)
except UserSubscription.DoesNotExist:
usersub = None
if query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
stories = feed.find_stories(query, order=order, offset=offset, limit=limit)
else:
stories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id=feed_id
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
elif usersub and (read_filter == 'unread' or order == 'oldest'):
stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit,
default_cutoff_date=user.profile.unread_cutoff)
else:
stories = feed.get_stories(offset, limit)
checkpoint1 = time.time()
try:
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
except redis.ConnectionError:
logging.user(request, "~BR~FK~SBRedis is unavailable for shared stories.")
checkpoint2 = time.time()
# Get intelligence classifier for user
if usersub and usersub.is_trained:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = get_classifiers_for_user(user, feed_id=feed_id,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
checkpoint3 = time.time()
unread_story_hashes = []
if stories:
if (read_filter == 'all' or query) and usersub:
unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
feed_ids=[usersub.feed_id],
usersubs=[usersub],
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id=feed.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date', 'user_tags')
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
checkpoint4 = time.time()
for story in stories:
if not include_story_content:
del story['story_content']
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
nowtz = localtime_for_timezone(now, user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if usersub:
story['read_status'] = 1
if (read_filter == 'all' or query) and usersub:
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
elif read_filter == 'unread' and usersub:
story['read_status'] = 0
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
if story['story_hash'] in shared_stories:
story['shared'] = True
shared_date = localtime_for_timezone(shared_stories[story['story_hash']]['shared_date'],
user.profile.timezone)
story['shared_date'] = format_story_link_date__long(shared_date, now)
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
if usersub:
usersub.feed_opens += 1
usersub.needs_unread_recalc = True
usersub.save(update_fields=['feed_opens', 'needs_unread_recalc'])
diff1 = checkpoint1-start
diff2 = checkpoint2-start
diff3 = checkpoint3-start
diff4 = checkpoint4-start
timediff = time.time()-start
last_update = relative_timesince(feed.last_update)
time_breakdown = ""
if timediff > 1 or settings.DEBUG:
time_breakdown = "~SN~FR(~SB%.4s/%.4s/%.4s/%.4s~SN)" % (
diff1, diff2, diff3, diff4)
search_log = "~SN~FG(~SB%s~SN) " % query if query else ""
logging.user(request, "~FYLoading feed: ~SB%s%s (%s/%s) %s%s" % (
feed.feed_title[:22], ('~SN/p%s' % page) if page > 1 else '', order, read_filter, search_log, time_breakdown))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
data = dict(stories=stories,
user_profiles=user_profiles,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
updated=last_update,
user_search=user_search,
feed_id=feed.pk,
elapsed_time=round(float(timediff), 2),
message=message)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
if not usersub:
data.update(feed.canonical())
# if not usersub and feed.num_subscribers <= 1:
# data = dict(code=-1, message="You must be subscribed to this feed.")
# if page <= 3:
# import random
# time.sleep(random.randint(2, 4))
# if page == 2:
# assert False
return data
def load_feed_page(request, feed_id):
if not feed_id:
raise Http404
feed = Feed.get_by_id(feed_id)
if feed and feed.has_page and not feed.has_page_exception:
if settings.BACKED_BY_AWS.get('pages_on_node'):
url = "http://%s/original_page/%s" % (
settings.ORIGINAL_PAGE_SERVER,
feed.pk,
)
page_response = requests.get(url)
if page_response.status_code == 200:
response = HttpResponse(page_response.content, mimetype="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
response['Last-Modified'] = page_response.headers.get('Last-modified')
response['Etag'] = page_response.headers.get('Etag')
response['Content-Length'] = str(len(page_response.content))
logging.user(request, "~FYLoading original page, proxied from node: ~SB%s bytes" %
(len(page_response.content)))
return response
if settings.BACKED_BY_AWS['pages_on_s3'] and feed.s3_page:
if settings.PROXY_S3_PAGES:
key = settings.S3_PAGES_BUCKET.get_key(feed.s3_pages_key)
if key:
compressed_data = key.get_contents_as_string()
response = HttpResponse(compressed_data, mimetype="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
logging.user(request, "~FYLoading original page, proxied: ~SB%s bytes" %
(len(compressed_data)))
return response
else:
logging.user(request, "~FYLoading original page, non-proxied")
return HttpResponseRedirect('//%s/%s' % (settings.S3_PAGES_BUCKET_NAME,
feed.s3_pages_key))
data = MFeedPage.get_data(feed_id=feed_id)
if not data or not feed or not feed.has_page or feed.has_page_exception:
logging.user(request, "~FYLoading original page, ~FRmissing")
return render(request, 'static/404_original_page.xhtml', {},
content_type='text/html',
status=404)
logging.user(request, "~FYLoading original page, from the db")
return HttpResponse(data, mimetype="text/html; charset=utf-8")
@json.json_view
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
query = request.REQUEST.get('query')
order = request.REQUEST.get('order', 'newest')
tag = request.REQUEST.get('tag')
story_hashes = request.REQUEST.getlist('h')[:100]
version = int(request.REQUEST.get('v', 1))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
order_by = '-' if order == "newest" else ""
if page: offset = limit * (page - 1)
if query:
# results = SearchStarredStory.query(user.pk, query)
# story_ids = [result.db_id for result in results]
if user.profile.is_premium:
stories = MStarredStory.find_stories(query, user.pk, tag=tag, offset=offset, limit=limit,
order=order)
else:
stories = []
message = "You must be a premium subscriber to search."
elif tag:
if user.profile.is_premium:
mstories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
stories = []
message = "You must be a premium subscriber to read saved stories by tag."
elif story_hashes:
mstories = MStarredStory.objects(
user_id=user.pk,
story_hash__in=story_hashes
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
mstories = MStarredStory.objects(
user_id=user.pk
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in unsub_feeds)
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, nowtz)
story['starred_timestamp'] = starred_date.strftime('%s')
story['read_status'] = 1
story['starred'] = True
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading starred stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
'feeds': unsub_feeds.values() if version == 2 else unsub_feeds,
"message": message,
}
@json.json_view
def starred_story_hashes(request):
user = get_user(request)
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
mstories = MStarredStory.objects(
user_id=user.pk
).only('story_hash', 'starred_date').order_by('-starred_date')
if include_timestamps:
story_hashes = [(s.story_hash, s.starred_date.strftime("%s")) for s in mstories]
else:
story_hashes = [s.story_hash for s in mstories]
logging.user(request, "~FYLoading ~FCstarred story hashes~FY: %s story hashes" %
(len(story_hashes)))
return dict(starred_story_hashes=story_hashes)
def starred_stories_rss_feed(request, user_id, secret_token, tag_slug):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
try:
tag_counts = MStarredStoryCounts.objects.get(user_id=user_id, slug=tag_slug)
except MStarredStoryCounts.MultipleObjectsReturned:
tag_counts = MStarredStoryCounts.objects(user_id=user_id, slug=tag_slug).first()
except MStarredStoryCounts.DoesNotExist:
raise Http404
data = {}
data['title'] = "Saved Stories - %s" % tag_counts.tag
data['link'] = "%s%s" % (
settings.NEWSBLUR_URL,
reverse('saved-stories-tag', kwargs=dict(tag_name=tag_slug)))
data['description'] = "Stories saved by %s on NewsBlur with the tag \"%s\"." % (user.username,
tag_counts.tag)
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['author_name'] = user.username
data['feed_url'] = "%s%s" % (
settings.NEWSBLUR_URL,
reverse('starred-stories-rss-feed',
kwargs=dict(user_id=user_id, secret_token=secret_token, tag_slug=tag_slug)),
)
rss = feedgenerator.Atom1Feed(**data)
if not tag_counts.tag:
starred_stories = MStarredStory.objects(
user_id=user.pk
).order_by('-starred_date').limit(25)
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag_counts.tag
).order_by('-starred_date').limit(25)
for starred_story in starred_stories:
story_data = {
'title': starred_story.story_title,
'link': starred_story.story_permalink,
'description': (starred_story.story_content_z and
zlib.decompress(starred_story.story_content_z)),
'author_name': starred_story.story_author_name,
'categories': starred_story.story_tags,
'unique_id': starred_story.story_guid,
'pubdate': starred_story.starred_date,
}
rss.add_item(**story_data)
logging.user(request, "~FBGenerating ~SB%s~SN's saved story RSS feed (%s, %s stories): ~FM%s" % (
user.username,
tag_counts.tag,
tag_counts.count,
request.META.get('HTTP_USER_AGENT', "")[:24]
))
return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')
@json.json_view
def load_read_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
order = request.REQUEST.get('order', 'newest')
query = request.REQUEST.get('query')
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
if page: offset = limit * (page - 1)
if query:
stories = []
message = "Not implemented yet."
# if user.profile.is_premium:
# stories = MStarredStory.find_stories(query, user.pk, offset=offset, limit=limit)
# else:
# stories = []
# message = "You must be a premium subscriber to search."
else:
story_hashes = RUserStory.get_read_stories(user.pk, offset=offset, limit=limit, order=order)
mstories = MStory.objects(story_hash__in=story_hashes)
stories = Feed.format_stories(mstories)
stories = sorted(stories, key=lambda story: story_hashes.index(story['story_hash']),
reverse=bool(order=="oldest"))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
starred_stories = MStarredStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, story.starred_date)
for story in starred_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
story['read_status'] = 1
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading read stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
"feeds": unsub_feeds,
"message": message,
}
@json.json_view
def load_river_stories__redis(request):
limit = 12
start = time.time()
user = get_user(request)
message = None
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feeds') if feed_id]
if not feed_ids:
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('f') if feed_id]
story_hashes = request.REQUEST.getlist('h')[:100]
original_feed_ids = list(feed_ids)
page = int(request.REQUEST.get('page', 1))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
query = request.REQUEST.get('query')
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
usersubs = []
code = 1
user_search = None
offset = (page-1) * limit
limit = page * limit
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
if story_hashes:
unread_feed_story_hashes = None
read_filter = 'unread'
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
elif query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter='all')
feed_ids = [sub.feed_id for sub in usersubs]
stories = Feed.find_feed_stories(feed_ids, query, order=order, offset=offset, limit=limit)
mstories = stories
unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
read_filter="unread", order=order,
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
else:
stories = []
mstories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=feed_ids
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter=read_filter)
all_feed_ids = [f for f in feed_ids]
feed_ids = [sub.feed_id for sub in usersubs]
if feed_ids:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
"all_feed_ids": all_feed_ids,
"offset": offset,
"limit": limit,
"order": order,
"read_filter": read_filter,
"usersubs": usersubs,
"cutoff_date": user.profile.unread_cutoff,
}
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
else:
story_hashes = []
unread_feed_story_hashes = []
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
if not usersubs:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=found_feed_ids,
read_filter=read_filter)
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
# Find starred stories
if found_feed_ids:
if read_filter == 'starred':
starred_stories = mstories
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=found_feed_ids
).only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
else:
starred_stories = {}
# Intelligence classifiers for all feeds involved
if found_trained_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids,
social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
# Just need to format stories
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
if read_filter == 'starred':
story['read_status'] = 1
else:
story['read_status'] = 0
if read_filter == 'all' or query:
if (unread_feed_story_hashes is not None and
story['story_hash'] not in unread_feed_story_hashes):
story['read_status'] = 1
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
if not user.profile.is_premium:
message = "The full River of News is a premium feature."
code = 0
# if page > 1:
# stories = []
# else:
# stories = stories[:5]
diff = time.time() - start
timediff = round(float(diff), 2)
logging.user(request, "~FYLoading ~FCriver stories~FY: ~SBp%s~SN (%s/%s "
"stories, ~SN%s/%s/%s feeds, %s/%s)" %
(page, len(stories), len(mstories), len(found_feed_ids),
len(feed_ids), len(original_feed_ids), order, read_filter))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
# if page <= 1:
# import random
# time.sleep(random.randint(0, 6))
data = dict(code=code,
message=message,
stories=stories,
classifiers=classifiers,
elapsed_time=timediff,
user_search=user_search,
user_profiles=user_profiles)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
return data
@json.json_view
def unread_story_hashes__old(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
usersubs = {}
if not feed_ids:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True)
feed_ids = [sub.feed_id for sub in usersubs]
else:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True, feed__in=feed_ids)
unread_feed_story_hashes = {}
story_hash_count = 0
usersubs = dict((sub.feed_id, sub) for sub in usersubs)
for feed_id in feed_ids:
if feed_id in usersubs:
us = usersubs[feed_id]
else:
continue
if not us.unread_count_neutral and not us.unread_count_positive:
continue
unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
withscores=include_timestamps,
hashes_only=True,
default_cutoff_date=user.profile.unread_cutoff)
story_hash_count += len(unread_feed_story_hashes[feed_id])
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hash_count)))
return dict(unread_feed_story_hashes=unread_feed_story_hashes)
@json.json_view
def unread_story_hashes(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
order=order, read_filter=read_filter,
include_timestamps=include_timestamps,
cutoff_date=user.profile.unread_cutoff)
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hashes)))
return dict(unread_feed_story_hashes=story_hashes)
@ajax_login_required
@json.json_view
def mark_all_as_read(request):
code = 1
try:
days = int(request.REQUEST.get('days', 0))
except ValueError:
return dict(code=-1, message="Days parameter must be an integer, not: %s" %
request.REQUEST.get('days'))
read_date = datetime.datetime.utcnow() - datetime.timedelta(days=days)
feeds = UserSubscription.objects.filter(user=request.user)
socialsubs = MSocialSubscription.objects.filter(user_id=request.user.pk)
for subtype in [feeds, socialsubs]:
for sub in subtype:
if days == 0:
sub.mark_feed_read()
else:
if sub.mark_read_date < read_date:
sub.needs_unread_recalc = True
sub.mark_read_date = read_date
sub.save()
logging.user(request, "~FMMarking all as read: ~SB%s days" % (days,))
return dict(code=code)
@ajax_login_required
@json.json_view
def mark_story_as_read(request):
story_ids = request.REQUEST.getlist('story_id')
try:
feed_id = int(get_argument_or_404(request, 'feed_id'))
except ValueError:
return dict(code=-1, errors=["You must pass a valid feed_id: %s" %
request.REQUEST.get('feed_id')])
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
feed_id = duplicate_feed[0].feed_id
try:
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
except (Feed.DoesNotExist):
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
else:
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
except UserSubscription.DoesNotExist:
usersub = None
if usersub:
data = usersub.mark_story_ids_as_read(story_ids, request=request)
else:
data = dict(code=-1, errors=["User is not subscribed to this feed."])
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_story_hashes_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
story_hashes = request.REQUEST.getlist('story_hash')
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
# Also count on original subscription
for feed_id in feed_ids:
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
r.publish(request.user.username, 'feed:%s' % feed_id)
hash_count = len(story_hashes)
logging.user(request, "~FYRead %s %s in feed/socialsubs: %s/%s" % (
hash_count, 'story' if hash_count == 1 else 'stories', feed_ids, friend_ids))
return dict(code=1, story_hashes=story_hashes,
feed_ids=feed_ids, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_stories_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feeds_stories = request.REQUEST.get('feeds_stories', "{}")
feeds_stories = json.decode(feeds_stories)
data = {
'code': -1,
'message': 'Nothing was marked as read'
}
for feed_id, story_ids in feeds_stories.items():
try:
feed_id = int(feed_id)
except ValueError:
continue
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except UserSubscription.DoesNotExist:
return dict(code=-1, error="You are not subscribed to this feed_id: %d" % feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
try:
if not duplicate_feed: raise Feed.DoesNotExist
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
return dict(code=-1, error="No feed exists for feed_id: %d" % feed_id)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_social_stories_as_read(request):
code = 1
errors = []
data = {}
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
users_feeds_stories = request.REQUEST.get('users_feeds_stories', "{}")
users_feeds_stories = json.decode(users_feeds_stories)
for social_user_id, feeds in users_feeds_stories.items():
for feed_id, story_ids in feeds.items():
feed_id = int(feed_id)
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, feed_id, request=request)
except OperationError, e:
code = -1
errors.append("Already read story: %s" % e)
except MSocialSubscription.DoesNotExist:
MSocialSubscription.mark_unsub_story_ids_as_read(request.user.pk, social_user_id,
story_ids, feed_id,
request=request)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, duplicate_feed[0].feed.pk, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
code = -1
errors.append("No feed exists for feed_id %d." % feed_id)
else:
continue
r.publish(request.user.username, 'feed:%s' % feed_id)
r.publish(request.user.username, 'social:%s' % social_user_id)
data.update(code=code, errors=errors)
return data
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_unread(request):
story_id = request.REQUEST.get('story_id', None)
feed_id = int(request.REQUEST.get('feed_id', 0))
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
feed = usersub.feed
except UserSubscription.DoesNotExist:
usersub = None
feed = Feed.get_by_id(feed_id)
if usersub and not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = dict(code=0, payload=dict(story_id=story_id))
story, found_original = MStory.find_story(feed_id, story_id)
if not story:
logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed))
return dict(code=-1, message="Story not found.")
if usersub:
data = usersub.invert_read_stories_after_unread_story(story, request)
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data['code'] = -1
data['message'] = message
return data
social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk,
story_feed_id=feed_id,
story_guid_hash=story.guid_hash)
dirty_count = social_subs and social_subs.count()
dirty_count = ("(%s social_subs)" % dirty_count) if dirty_count else ""
RUserStory.mark_story_hash_unread(user_id=request.user.pk, story_hash=story.story_hash)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
logging.user(request, "~FY~SBUnread~SN story in feed: %s %s" % (feed, dirty_count))
return data
@ajax_login_required
@json.json_view
@required_params('story_hash')
def mark_story_hash_as_unread(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
story_hash = request.REQUEST.get('story_hash')
feed_id, _ = MStory.split_story_hash(story_hash)
story, _ = MStory.find_story(feed_id, story_hash)
if not story:
data = dict(code=-1, message="That story has been removed from the feed, no need to mark it unread.")
return data
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data = dict(code=-1, message=message)
return data
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = usersub.invert_read_stories_after_unread_story(story, request)
r.publish(request.user.username, 'feed:%s' % feed_id)
feed_id, friend_ids = RUserStory.mark_story_hash_unread(request.user.pk, story_hash)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
logging.user(request, "~FYUnread story in feed/socialsubs: %s/%s" % (feed_id, friend_ids))
return dict(code=1, story_hash=story_hash, feed_id=feed_id, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feed_ids = request.REQUEST.getlist('feed_id')
cutoff_timestamp = int(request.REQUEST.get('cutoff_timestamp', 0))
direction = request.REQUEST.get('direction', 'older')
multiple = len(feed_ids) > 1
code = 1
errors = []
cutoff_date = datetime.datetime.fromtimestamp(cutoff_timestamp) if cutoff_timestamp else None
for feed_id in feed_ids:
if 'social:' in feed_id:
user_id = int(feed_id.replace('social:', ''))
try:
sub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=user_id)
except MSocialSubscription.DoesNotExist:
logging.user(request, "~FRCouldn't find socialsub: %s" % user_id)
continue
if not multiple:
sub_user = User.objects.get(pk=sub.subscription_user_id)
logging.user(request, "~FMMarking social feed as read: ~SB%s" % (sub_user.username,))
else:
try:
feed = Feed.objects.get(id=feed_id)
sub = UserSubscription.objects.get(feed=feed, user=request.user)
if not multiple:
logging.user(request, "~FMMarking feed as read: ~SB%s" % (feed,))
except (Feed.DoesNotExist, UserSubscription.DoesNotExist), e:
errors.append("User not subscribed: %s" % e)
continue
except (ValueError), e:
errors.append("Invalid feed_id: %s" % e)
continue
if not sub:
errors.append("User not subscribed: %s" % feed_id)
continue
try:
if direction == "older":
marked_read = sub.mark_feed_read(cutoff_date=cutoff_date)
else:
marked_read = sub.mark_newer_stories_read(cutoff_date=cutoff_date)
if marked_read and not multiple:
r.publish(request.user.username, 'feed:%s' % feed_id)
except IntegrityError, e:
errors.append("Could not mark feed as read: %s" % e)
code = -1
if multiple:
logging.user(request, "~FMMarking ~SB%s~SN feeds as read" % len(feed_ids))
r.publish(request.user.username, 'refresh:%s' % ','.join(feed_ids))
if errors:
logging.user(request, "~FMMarking read had errors: ~FR%s" % errors)
return dict(code=code, errors=errors, cutoff_date=cutoff_date, direction=direction)
def _parse_user_info(user):
return {
'user_info': {
'is_anonymous': json.encode(user.is_anonymous()),
'is_authenticated': json.encode(user.is_authenticated()),
'username': json.encode(user.username if user.is_authenticated() else 'Anonymous')
}
}
@ajax_login_required
@json.json_view
def add_url(request):
code = 0
url = request.POST['url']
folder = request.POST.get('folder', '')
new_folder = request.POST.get('new_folder')
auto_active = is_true(request.POST.get('auto_active', 1))
skip_fetch = is_true(request.POST.get('skip_fetch', False))
feed = None
if not url:
code = -1
message = 'Enter in the website address or the feed URL.'
elif any([(banned_url in url) for banned_url in BANNED_URLS]):
code = -1
message = "The publisher of this website has banned NewsBlur."
else:
if new_folder:
usf, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
usf.add_folder(folder, new_folder)
folder = new_folder
code, message, us = UserSubscription.add_subscription(user=request.user, feed_address=url,
folder=folder, auto_active=auto_active,
skip_fetch=skip_fetch)
feed = us and us.feed
if feed:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:%s' % feed.pk)
MUserSearch.schedule_index_feeds_for_search(feed.pk, request.user.pk)
return dict(code=code, message=message, feed=feed)
@ajax_login_required
@json.json_view
def add_folder(request):
folder = request.POST['folder']
parent_folder = request.POST.get('parent_folder', '')
folders = None
logging.user(request, "~FRAdding Folder: ~SB%s (in %s)" % (folder, parent_folder))
if folder:
code = 1
message = ""
user_sub_folders_object, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
user_sub_folders_object.add_folder(parent_folder, folder)
folders = json.decode(user_sub_folders_object.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
else:
code = -1
message = "Gotta write in a folder name."
return dict(code=code, message=message, folders=folders)
@ajax_login_required
@json.json_view
def delete_feed(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', None)
if not in_folder or in_folder == ' ':
in_folder = ""
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed_id, in_folder)
feed = Feed.objects.filter(pk=feed_id)
if feed:
feed[0].count_subscribers()
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, message="Removed %s from '%s'." % (feed, in_folder))
@ajax_login_required
@json.json_view
def delete_feed_by_url(request):
message = ""
code = 0
url = request.POST['url']
in_folder = request.POST.get('in_folder', '')
if in_folder == ' ':
in_folder = ""
feed = Feed.get_feed_from_url(url, create=False)
if feed:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed.pk, in_folder)
code = 1
feed = Feed.objects.filter(pk=feed.pk)
if feed:
feed[0].count_subscribers()
else:
code = -1
message = "URL not found."
return dict(code=code, message=message)
@ajax_login_required
@json.json_view
def delete_folder(request):
folder_to_delete = request.POST.get('folder_name') or request.POST.get('folder_to_delete')
in_folder = request.POST.get('in_folder', None)
feed_ids_in_folder = [int(f) for f in request.REQUEST.getlist('feed_id') if f]
request.user.profile.send_opml_export_email(reason="You have deleted an entire folder of feeds, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_folder(folder_to_delete, in_folder, feed_ids_in_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@required_params('feeds_by_folder')
@ajax_login_required
@json.json_view
def delete_feeds_by_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
request.user.profile.send_opml_export_email(reason="You have deleted a number of feeds at once, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feeds_by_folder(feeds_by_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@ajax_login_required
@json.json_view
def rename_feed(request):
feed = get_object_or_404(Feed, pk=int(request.POST['feed_id']))
user_sub = UserSubscription.objects.get(user=request.user, feed=feed)
feed_title = request.POST['feed_title']
logging.user(request, "~FRRenaming feed '~SB%s~SN' to: ~SB%s" % (
feed.feed_title, feed_title))
user_sub.user_title = feed_title
user_sub.save()
return dict(code=1)
@ajax_login_required
@json.json_view
def rename_folder(request):
folder_to_rename = request.POST.get('folder_name') or request.POST.get('folder_to_rename')
new_folder_name = request.POST['new_folder_name']
in_folder = request.POST.get('in_folder', '')
code = 0
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# renames all, but only in the same folder parent. But nobody should be doing that, right?
if folder_to_rename and new_folder_name:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.rename_folder(folder_to_rename, new_folder_name, in_folder)
code = 1
else:
code = -1
return dict(code=code)
@ajax_login_required
@json.json_view
def move_feed_to_folders(request):
feed_id = int(request.POST['feed_id'])
in_folders = request.POST.getlist('in_folders', '')
to_folders = request.POST.getlist('to_folders', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folders(feed_id, in_folders=in_folders,
to_folders=to_folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_feed_to_folder(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder,
to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_folder_to_folder(request):
folder_name = request.POST['folder_name']
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@required_params('feeds_by_folder', 'to_folder')
@ajax_login_required
@json.json_view
def move_feeds_by_folder_to_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
to_folder = request.POST['to_folder']
new_folder = request.POST.get('new_folder', None)
request.user.profile.send_opml_export_email(reason="You have moved a number of feeds at once, so here's a backup just in case.")
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
if new_folder:
user_sub_folders.add_folder(to_folder, new_folder)
to_folder = new_folder
user_sub_folders = user_sub_folders.move_feeds_by_folder_to_folder(feeds_by_folder, to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@login_required
def add_feature(request):
if not request.user.is_staff:
return HttpResponseForbidden()
code = -1
form = FeatureForm(request.POST)
if form.is_valid():
form.save()
code = 1
return HttpResponseRedirect(reverse('index'))
return dict(code=code)
@json.json_view
def load_features(request):
user = get_user(request)
page = max(int(request.REQUEST.get('page', 0)), 0)
logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1))
features = Feature.objects.all()[page*3:(page+1)*3+1].values()
features = [{
'description': f['description'],
'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y")
} for f in features]
return features
@ajax_login_required
@json.json_view
def save_feed_order(request):
folders = request.POST.get('folders')
if folders:
# Test that folders can be JSON decoded
folders_list = json.decode(folders)
assert folders_list is not None
logging.user(request, "~FBFeed re-ordering: ~SB%s folders/feeds" % (len(folders_list)))
user_sub_folders = UserSubscriptionFolders.objects.get(user=request.user)
user_sub_folders.folders = folders
user_sub_folders.save()
return {}
@json.json_view
def feeds_trainer(request):
classifiers = []
feed_id = request.REQUEST.get('feed_id')
user = get_user(request)
usersubs = UserSubscription.objects.filter(user=user, active=True)
if feed_id:
feed = get_object_or_404(Feed, pk=feed_id)
usersubs = usersubs.filter(feed=feed)
usersubs = usersubs.select_related('feed').order_by('-feed__stories_last_month')
for us in usersubs:
if (not us.is_trained and us.feed.stories_last_month > 0) or feed_id:
classifier = dict()
classifier['classifiers'] = get_classifiers_for_user(user, feed_id=us.feed.pk)
classifier['feed_id'] = us.feed_id
classifier['stories_last_month'] = us.feed.stories_last_month
classifier['num_subscribers'] = us.feed.num_subscribers
classifier['feed_tags'] = json.decode(us.feed.data.popular_tags) if us.feed.data.popular_tags else []
classifier['feed_authors'] = json.decode(us.feed.data.popular_authors) if us.feed.data.popular_authors else []
classifiers.append(classifier)
user.profile.has_trained_intelligence = True
user.profile.save()
logging.user(user, "~FGLoading Trainer: ~SB%s feeds" % (len(classifiers)))
return classifiers
@ajax_login_required
@json.json_view
def save_feed_chooser(request):
is_premium = request.user.profile.is_premium
approved_feeds = [int(feed_id) for feed_id in request.POST.getlist('approved_feeds') if feed_id]
if not is_premium:
approved_feeds = approved_feeds[:64]
activated = 0
usersubs = UserSubscription.objects.filter(user=request.user)
for sub in usersubs:
try:
if sub.feed_id in approved_feeds:
activated += 1
if not sub.active:
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
elif sub.active:
sub.active = False
sub.save()
except Feed.DoesNotExist:
pass
request.user.profile.queue_new_feeds()
request.user.profile.refresh_stale_feeds(exclude_new=True)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
logging.user(request, "~BB~FW~SBFeed chooser: ~FC%s~SN/~SB%s" % (
activated,
usersubs.count()
))
return {'activated': activated}
@ajax_login_required
def retrain_all_sites(request):
for sub in UserSubscription.objects.filter(user=request.user):
sub.is_trained = False
sub.save()
return feeds_trainer(request)
@login_required
def activate_premium_account(request):
try:
usersubs = UserSubscription.objects.select_related('feed').filter(user=request.user)
for sub in usersubs:
sub.active = True
sub.save()
if sub.feed.premium_subscribers <= 0:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
except Exception, e:
subject = "Premium activation failed"
message = "%s -- %s\n\n%s" % (request.user, usersubs, e)
mail_admins(subject, message, fail_silently=True)
request.user.profile.is_premium = True
request.user.profile.save()
return HttpResponseRedirect(reverse('index'))
@login_required
def login_as(request):
if not request.user.is_staff:
logging.user(request, "~SKNON-STAFF LOGGING IN AS ANOTHER USER!")
assert False
return HttpResponseForbidden()
username = request.GET['user']
user = get_object_or_404(User, username__iexact=username)
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
return HttpResponseRedirect(reverse('index'))
def iframe_buster(request):
logging.user(request, "~FB~SBiFrame bust!")
return HttpResponse(status=204)
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_starred(request):
return _mark_story_as_starred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_starred(request):
return _mark_story_as_starred(request)
def _mark_story_as_starred(request):
code = 1
feed_id = int(request.REQUEST.get('feed_id', 0))
story_id = request.REQUEST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
user_tags = request.REQUEST.getlist('user_tags')
message = ""
if story_hash:
story, _ = MStory.find_story(story_hash=story_hash)
feed_id = story and story.story_feed_id
else:
story, _ = MStory.find_story(story_feed_id=feed_id, story_id=story_id)
if not story:
return {'code': -1, 'message': "Could not find story to save."}
story_db = dict([(k, v) for k, v in story._data.items()
if k is not None and v is not None])
story_db.pop('user_id', None)
story_db.pop('starred_date', None)
story_db.pop('id', None)
story_db.pop('user_tags', None)
now = datetime.datetime.now()
story_values = dict(starred_date=now, user_tags=user_tags, **story_db)
params = dict(story_guid=story.story_guid, user_id=request.user.pk)
starred_story = MStarredStory.objects(**params).limit(1)
created = False
removed_user_tags = []
if not starred_story:
params.update(story_values)
starred_story = MStarredStory.objects.create(**params)
created = True
MActivity.new_starred_story(user_id=request.user.pk,
story_title=story.story_title,
story_feed_id=feed_id,
story_id=starred_story.story_guid)
new_user_tags = user_tags
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=1)
else:
starred_story = starred_story[0]
new_user_tags = list(set(user_tags) - set(starred_story.user_tags or []))
removed_user_tags = list(set(starred_story.user_tags or []) - set(user_tags))
starred_story.user_tags = user_tags
starred_story.save()
for tag in new_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=1)
for tag in removed_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
if random.random() < 0.01:
MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=request.user.pk).count()
if created:
logging.user(request, "~FCStarring: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
else:
logging.user(request, "~FCUpdating starred:~SN~FC ~SB%s~SN (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
return {'code': code, 'message': message, 'starred_count': starred_count, 'starred_counts': starred_counts}
@required_params('story_id')
@ajax_login_required
@json.json_view
def mark_story_as_unstarred(request):
return _mark_story_as_unstarred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_unstarred(request):
return _mark_story_as_unstarred(request)
def _mark_story_as_unstarred(request):
code = 1
story_id = request.POST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
starred_counts = None
starred_story = None
if story_id:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
if not story_id or not starred_story:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_hash=story_hash or story_id)
if starred_story:
starred_story = starred_story[0]
logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story.story_title[:50]))
user_tags = starred_story.user_tags
feed_id = starred_story.story_feed_id
MActivity.remove_starred_story(user_id=request.user.pk,
story_feed_id=starred_story.story_feed_id,
story_id=starred_story.story_guid)
starred_story.user_id = 0
try:
starred_story.save()
except NotUniqueError:
starred_story.delete()
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=-1)
for tag in user_tags:
try:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
except MStarredStoryCounts.DoesNotExist:
pass
# MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts = MStarredStoryCounts.user_counts(request.user.pk)
else:
code = -1
return {'code': code, 'starred_counts': starred_counts}
@ajax_login_required
@json.json_view
def send_story_email(request):
code = 1
message = 'OK'
story_id = request.POST['story_id']
feed_id = request.POST['feed_id']
to_addresses = request.POST.get('to', '').replace(',', ' ').replace(' ', ' ').strip().split(' ')
from_name = request.POST['from_name']
from_email = request.POST['from_email']
email_cc = is_true(request.POST.get('email_cc', 'true'))
comments = request.POST['comments']
comments = comments[:2048] # Separated due to PyLint
from_address = 'share@newsblur.com'
share_user_profile = MSocialProfile.get_user(request.user.pk)
if not to_addresses:
code = -1
message = 'Please provide at least one email address.'
elif not all(email_re.match(to_address) for to_address in to_addresses if to_addresses):
code = -1
message = 'You need to send the email to a valid email address.'
elif not email_re.match(from_email):
code = -1
message = 'You need to provide your email address.'
elif not from_name:
code = -1
message = 'You need to provide your name.'
else:
story, _ = MStory.find_story(feed_id, story_id)
story = Feed.format_story(story, feed_id, text=True)
feed = Feed.get_by_id(story['story_feed_id'])
params = {
"to_addresses": to_addresses,
"from_name": from_name,
"from_email": from_email,
"email_cc": email_cc,
"comments": comments,
"from_address": from_address,
"story": story,
"feed": feed,
"share_user_profile": share_user_profile,
}
text = render_to_string('mail/email_story.txt', params)
html = render_to_string('mail/email_story.xhtml', params)
subject = '%s' % (story['story_title'])
cc = None
if email_cc:
cc = ['%s <%s>' % (from_name, from_email)]
subject = subject.replace('\n', ' ')
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % from_address,
to=to_addresses,
cc=cc,
headers={'Reply-To': '%s <%s>' % (from_name, from_email)})
msg.attach_alternative(html, "text/html")
try:
msg.send()
except boto.ses.connection.ResponseError, e:
code = -1
message = "Email error: %s" % str(e)
logging.user(request, '~BMSharing story by email to %s recipient%s: ~FY~SB%s~SN~BM~FY/~SB%s' %
(len(to_addresses), '' if len(to_addresses) == 1 else 's',
story['story_title'][:50], feed and feed.feed_title[:50]))
return {'code': code, 'message': message}
@json.json_view
def load_tutorial(request):
if request.REQUEST.get('finished'):
logging.user(request, '~BY~FW~SBFinishing Tutorial')
return {}
else:
newsblur_feed = Feed.objects.filter(feed_address__icontains='blog.newsblur.com').order_by('-pk')[0]
logging.user(request, '~BY~FW~SBLoading Tutorial')
return {
'newsblur_feed': newsblur_feed.canonical()
}
| slava-sh/NewsBlur | apps/reader/views.py | Python | mit | 95,205 |
__author__ = "Guillaume"
__license__ = "MIT"
__copyright__ = "2015, ESRF"
import numpy
from freesas.model import SASModel
class Grid:
"""
This class is used to create a grid which include all the input models
"""
def __init__(self, inputfiles):
"""
:param inputfiles: list of pdb files needed for averaging
"""
self.inputs = inputfiles
self.size = []
self.nbknots = None
self.radius = None
self.coordknots = []
def __repr__(self):
return "Grid with %i knots"%self.nbknots
def spatial_extent(self):
"""
Calculate the maximal extent of input models
:return self.size: 6-list with x,y,z max and then x,y,z min
"""
atoms = []
models_fineness = []
for files in self.inputs:
m = SASModel(files)
if len(atoms)==0:
atoms = m.atoms
else:
atoms = numpy.append(atoms, m.atoms, axis=0)
models_fineness.append(m.fineness)
mean_fineness = sum(models_fineness) / len(models_fineness)
coordmin = atoms.min(axis=0) - mean_fineness
coordmax = atoms.max(axis=0) + mean_fineness
self.size = [coordmax[0],coordmax[1],coordmax[2],coordmin[0],coordmin[1],coordmin[2]]
return self.size
def calc_radius(self, nbknots=None):
"""
Calculate the radius of each point of a hexagonal close-packed grid,
knowing the total volume and the number of knots in this grid.
:param nbknots: number of knots wanted for the grid
:return radius: the radius of each knot of the grid
"""
if len(self.size)==0:
self.spatial_extent()
nbknots = nbknots if nbknots is not None else 5000
size = self.size
dx = size[0] - size[3]
dy = size[1] - size[4]
dz = size[2] - size[5]
volume = dx * dy * dz
density = numpy.pi / (3*2**0.5)
radius = ((3 /( 4 * numpy.pi)) * density * volume / nbknots)**(1.0/3)
self.radius = radius
return radius
def make_grid(self):
"""
Create a grid using the maximal size and the radius previously computed.
The geometry used is a face-centered cubic lattice (fcc).
:return knots: 2d-array, coordinates of each dot of the grid. Saved as self.coordknots.
"""
if len(self.size)==0:
self.spatial_extent()
if self.radius is None:
self.calc_radius()
radius = self.radius
a = numpy.sqrt(2.0)*radius
xmax = self.size[0]
xmin = self.size[3]
ymax = self.size[1]
ymin = self.size[4]
zmax = self.size[2]
zmin = self.size[5]
x = 0.0
y = 0.0
z = 0.0
xlist = []
ylist = []
zlist = []
knots = numpy.empty((1,4), dtype="float")
while (zmin + z) <= zmax:
zlist.append(z)
z += a
while (ymin + y) <= ymax:
ylist.append(y)
y += a
while (xmin + x) <= xmax:
xlist.append(x)
x += a
for i in range(len(zlist)):
z = zlist[i]
if i % 2 ==0:
for j in range(len(xlist)):
x = xlist[j]
if j % 2 == 0:
for y in ylist[0:-1:2]:
knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
else:
for y in ylist[1:-1:2]:
knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
else:
for j in range(len(xlist)):
x = xlist[j]
if j % 2 == 0:
for y in ylist[1:-1:2]:
knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
else:
for y in ylist[0:-1:2]:
knots = numpy.append(knots, [[xmin+x, ymin+y, zmin+z, 0.0]], axis=0)
knots = numpy.delete(knots, 0, axis=0)
self.nbknots = knots.shape[0]
self.coordknots = knots
return knots
class AverModels():
"""
Provides tools to create an averaged models using several aligned dummy atom models
"""
def __init__(self, inputfiles, grid):
"""
:param inputfiles: list of pdb files of aligned models
:param grid: 2d-array coordinates of each point of a grid, fourth column full of zeros
"""
self.inputfiles = inputfiles
self.models = []
self.header = []
self.radius = None
self.atoms = []
self.grid = grid
def __repr__(self):
return "Average SAS model with %i atoms"%len(self.atoms)
def read_files(self, reference=None):
"""
Read all the pdb file in the inputfiles list, creating SASModels.
The SASModels created are save in a list, the reference model is the first model in the list.
:param reference: position of the reference model file in the inputfiles list
"""
ref = reference if reference is not None else 0
inputfiles = self.inputfiles
models = []
models.append(SASModel(inputfiles[ref]))
for i in range(len(inputfiles)):
if i==ref:
continue
else:
models.append(SASModel(inputfiles[i]))
self.models = models
return models
def calc_occupancy(self, griddot):
"""
Assign an occupancy and a contribution factor to the point of the grid.
:param griddot: 1d-array, coordinates of a point of the grid
:return tuple: 2-tuple containing (occupancy, contribution)
"""
occ = 0.0
contrib = 0
for model in self.models:
f = model.fineness
for i in range(model.atoms.shape[0]):
dx = model.atoms[i, 0] - griddot[0]
dy = model.atoms[i, 1] - griddot[1]
dz = model.atoms[i, 2] - griddot[2]
dist = dx * dx + dy * dy + dz * dz
add = max(1 - (dist / f), 0)
if add != 0:
contrib += 1
occ += add
return occ, contrib
def assign_occupancy(self):
"""
For each point of the grid, total occupancy and contribution factor are computed and saved.
The grid is then ordered with decreasing value of occupancy.
The fourth column of the array correspond to the occupancy of the point and the fifth to
the contribution for this point.
:return sortedgrid: 2d-array, coordinates of each point of the grid
"""
grid = self.grid
nbknots = grid.shape[0]
grid = numpy.append(grid, numpy.zeros((nbknots, 1), dtype="float"), axis=1)
for i in range(nbknots):
occ, contrib = self.calc_occupancy(grid[i, 0:3])
grid[i, 3] = occ
grid[i, 4] = contrib
order = numpy.argsort(grid, axis=0)[:, -2]
sortedgrid = numpy.empty_like(grid)
for i in range(nbknots):
sortedgrid[nbknots - i - 1, :] = grid[order[i], :]
return sortedgrid
def make_header(self):
"""
Create the layout of the pdb file for the averaged model.
"""
header = []
header.append("Number of files averaged : %s\n"%len(self.inputfiles))
for i in self.inputfiles:
header.append(i + "\n")
header.append("Total number of dots in the grid : %s\n"%self.grid.shape[0])
decade = 1
for i in range(self.grid.shape[0]):
line = "ATOM CA ASP 1 20.00 2 201\n"
line = line[:7] + "%4.i"%(i + 1) + line[11:]
if not (i + 1) % 10:
decade += 1
line = line[:21] + "%4.i"%decade + line[25:]
header.append(line)
self.header = header
return header
def save_aver(self, filename):
"""
Save the position of each occupied dot of the grid, its occupancy and its contribution
in a pdb file.
:param filename: name of the pdb file to write
"""
if len(self.header) == 0:
self.make_header()
assert self.grid.shape[-1] == 5
nr = 0
with open(filename, "w") as pdbout:
for line in self.header:
if line.startswith("ATOM"):
if nr < self.grid.shape[0] and self.grid[nr, 4] != 0:
coord = "%8.3f%8.3f%8.3f" % tuple(self.grid[nr, 0:3])
occ = "%6.2f" % self.grid[nr, 3]
contrib = "%2.f" % self.grid[nr, 4]
line = line[:30] + coord + occ + line[60:66] + contrib + line[68:]
else:
line = ""
nr += 1
pdbout.write(line)
| kif/freesas | freesas/average.py | Python | mit | 9,116 |
#!/usr/bin/env python
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
"""
NAMES
"""
# Project name used for display
PROJECT_NAME = 'quotable'
# Project name in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'quotable'
# The name of the repository containing the source
REPOSITORY_NAME = 'quotable'
REPOSITORY_URL = 'git@github.com:nprapps/%s.git' % REPOSITORY_NAME
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'
# The name to be used in paths on the server
PROJECT_FILENAME = 'quotable'
"""
DEPLOYMENT
"""
FILE_SERVER = 'tools.apps.npr.org'
S3_BUCKET = 'tools.apps.npr.org'
ASSETS_S3_BUCKET = 'assets.apps.npr.org'
# These variables will be set at runtime. See configure_targets() below
DEBUG = True
"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '0AlXMOHKxzQVRdHZuX1UycXplRlBfLVB0UVNldHJYZmc'
"""
SHARING
"""
PROJECT_DESCRIPTION = 'An opinionated project template for (mostly) server-less apps.'
SHARE_URL = 'http://%s/%s/' % (S3_BUCKET, PROJECT_SLUG)
TWITTER = {
'TEXT': PROJECT_NAME,
'URL': SHARE_URL,
# Will be resized to 120x120, can't be larger than 1MB
'IMAGE_URL': ''
}
FACEBOOK = {
'TITLE': PROJECT_NAME,
'URL': SHARE_URL,
'DESCRIPTION': PROJECT_DESCRIPTION,
# Should be square. No documented restrictions on size
'IMAGE_URL': TWITTER['IMAGE_URL'],
'APP_ID': '138837436154588'
}
GOOGLE = {
# Thumbnail image for Google News / Search.
# No documented restrictions on resolution or size
'IMAGE_URL': TWITTER['IMAGE_URL']
}
NPR_DFP = {
'STORY_ID': '203618536',
'TARGET': 'News_NPR_News_Investigations',
'ENVIRONMENT': 'NPRTEST',
'TESTSERVER': 'true'
}
"""
SERVICES
"""
GOOGLE_ANALYTICS_ID = 'UA-5828686-4'
| 18F/quotable | app_config.py | Python | mit | 1,915 |
import os
import logging
from django.core.management.base import BaseCommand
from django.core.mail import send_mail
from django.template.loader import get_template
from workshops.models import Badge, Person, Role
logger = logging.getLogger()
class Command(BaseCommand):
help = 'Report instructors activity.'
def add_arguments(self, parser):
parser.add_argument(
'--send-out-for-real', action='store_true', default=False,
help='Send information to the instructors.',
)
parser.add_argument(
'--no-may-contact-only', action='store_true', default=False,
help='Include instructors not willing to be contacted.',
)
parser.add_argument(
'--django-mailing', action='store_true', default=False,
help='Use Django mailing system. This requires some environmental '
'variables to be set, see `settings.py`.',
)
parser.add_argument(
'-s', '--sender', action='store',
default='workshops@carpentries.org',
help='E-mail used in "from:" field.',
)
def foreign_tasks(self, tasks, person, roles):
"""List of other instructors' tasks, per event."""
return [
task.event.task_set.filter(role__in=roles)
.exclude(person=person)
.select_related('person')
for task in tasks
]
def fetch_activity(self, may_contact_only=True):
roles = Role.objects.filter(name__in=['instructor', 'helper'])
instructor_badges = Badge.objects.instructor_badges()
instructors = Person.objects.filter(badges__in=instructor_badges)
instructors = instructors.exclude(email__isnull=True)
if may_contact_only:
instructors = instructors.exclude(may_contact=False)
# let's get some things faster
instructors = instructors.select_related('airport') \
.prefetch_related('task_set', 'lessons',
'award_set', 'badges')
# don't repeat the records
instructors = instructors.distinct()
result = []
for person in instructors:
tasks = person.task_set.filter(role__in=roles) \
.select_related('event', 'role')
record = {
'person': person,
'lessons': person.lessons.all(),
'instructor_awards': person.award_set.filter(
badge__in=person.badges.instructor_badges()
),
'tasks': zip(tasks,
self.foreign_tasks(tasks, person, roles)),
}
result.append(record)
return result
def make_message(self, record):
tmplt = get_template('mailing/instructor_activity.txt')
return tmplt.render(context=record)
def subject(self, record):
# in future we can vary the subject depending on the record details
return 'Updating your Software Carpentry information'
def recipient(self, record):
return record['person'].email
def send_message(self, subject, message, sender, recipient, for_real=False,
django_mailing=False):
if for_real:
if django_mailing:
send_mail(subject, message, sender, [recipient])
else:
command = 'mail -s "{subject}" -r {sender} {recipient}'.format(
subject=subject,
sender=sender,
recipient=recipient,
)
writer = os.popen(command, 'w')
writer.write(message)
writer.close()
if self.verbosity >= 2:
# write only a header
self.stdout.write('-' * 40 + '\n')
self.stdout.write('To: {}\n'.format(recipient))
self.stdout.write('Subject: {}\n'.format(subject))
self.stdout.write('From: {}\n'.format(sender))
if self.verbosity >= 3:
# write whole message out
self.stdout.write(message + '\n')
def handle(self, *args, **options):
# default is dummy run - only actually send mail if told to
send_for_real = options['send_out_for_real']
# by default include only instructors who have `may_contact==True`
no_may_contact_only = options['no_may_contact_only']
# use mailing options from settings.py or the `mail` system command?
django_mailing = options['django_mailing']
# verbosity option is added by Django
self.verbosity = int(options['verbosity'])
sender = options['sender']
results = self.fetch_activity(not no_may_contact_only)
for result in results:
message = self.make_message(result)
subject = self.subject(result)
recipient = self.recipient(result)
self.send_message(subject, message, sender, recipient,
for_real=send_for_real,
django_mailing=django_mailing)
if self.verbosity >= 1:
self.stdout.write('Sent {} emails.\n'.format(len(results)))
| swcarpentry/amy | amy/workshops/management/commands/instructors_activity.py | Python | mit | 5,305 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
scope: str,
policy_assignment_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_group_request(
resource_group_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_request(
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"parentResourcePath": _SERIALIZER.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
"resourceType": _SERIALIZER.url("resource_type", resource_type, 'str', skip_quote=True),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_by_id_request(
policy_assignment_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_by_id_request(
policy_assignment_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_by_id_request(
policy_assignment_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-12-01"
accept = "application/json, text/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PolicyAssignmentsOperations(object):
"""PolicyAssignmentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2016_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def delete(
self,
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> Optional["_models.PolicyAssignment"]:
"""Deletes a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to delete.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PolicyAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def create(
self,
scope: str,
policy_assignment_name: str,
parameters: "_models.PolicyAssignment",
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyAssignment')
request = build_create_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def get(
self,
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Gets a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to get.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def list_for_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Gets policy assignments for the resource group.
:param resource_group_name: The name of the resource group that contains policy assignments.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_for_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Gets policy assignments for a resource.
:param resource_group_name: The name of the resource group containing the resource. The name is
case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource with policy assignments.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_for_resource.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Gets all the policy assignments for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def delete_by_id(
self,
policy_assignment_id: str,
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Deletes a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to delete. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_by_id_request(
policy_assignment_id=policy_assignment_id,
template_url=self.delete_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
@distributed_trace
def create_by_id(
self,
policy_assignment_id: str,
parameters: "_models.PolicyAssignment",
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group. When providing a scope
for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyAssignment')
request = build_create_by_id_request(
policy_assignment_id=policy_assignment_id,
content_type=content_type,
json=_json,
template_url=self.create_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
@distributed_trace
def get_by_id(
self,
policy_assignment_id: str,
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Gets a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to get. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_id_request(
policy_assignment_id=policy_assignment_id,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_12_01/operations/_policy_assignments_operations.py | Python | mit | 38,695 |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 25 16:20:12 2015
@author: Balázs Hidasi
@lastmodified: Loreto Parisi (loretoparisi at gmail dot com)
"""
import sys
import os
import numpy as np
import pandas as pd
import datetime as dt
# To redirect output to file
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
sys.stdout = Logger( os.environ['HOME' ] + '/theano.log' )
PATH_TO_ORIGINAL_DATA = os.environ['HOME'] + '/'
PATH_TO_PROCESSED_DATA = os.environ['HOME'] + '/'
data = pd.read_csv(PATH_TO_ORIGINAL_DATA + 'yoochoose-clicks.dat', sep=',', header=None, usecols=[0,1,2], dtype={0:np.int32, 1:str, 2:np.int64})
data.columns = ['SessionId', 'TimeStr', 'ItemId']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp()) #This is not UTC. It does not really matter.
del(data['TimeStr'])
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=5].index)]
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>=2].index)]
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_full.txt', sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_test.txt', sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_tr.txt', sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(PATH_TO_PROCESSED_DATA + 'rsc15_train_valid.txt', sep='\t', index=False) | loretoparisi/docker | theano/rsc15/preprocess.py | Python | mit | 3,325 |
from attributes import *
from constants import *
# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
"""
UnitManager class -- manages a pool
"""
# --------------------------------------------------------------------------
#
def __init__ (self, url=None, scheduler='default', session=None) :
Attributes.__init__ (self)
# --------------------------------------------------------------------------
#
def add_pilot (self, pid) :
"""
add (Compute or Data)-Pilot(s) to the pool
"""
raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_pilots (self, ptype=ANY) :
"""
List IDs of data and/or compute pilots
"""
raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def remove_pilot (self, pid, drain=False) :
"""
Remove pilot(s) (does not cancel the pilot(s), but removes all units
from the pilot(s).
`drain` determines what happens to the units which are managed by the
removed pilot(s). If `True`, the pilot removal is delayed until all
units reach a final state. If `False` (the default), then `RUNNING`
units will be canceled, and `PENDING` units will be re-assinged to the
unit managers for re-scheduling to other pilots.
"""
raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def submit_unit (self, description) :
"""
Instantiate and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_units (self, utype=ANY) :
"""
List IDs of data and/or compute units
"""
raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def get_unit (self, uids) :
"""
Reconnect to and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
"""
Wait for given unit(s) to enter given state
"""
raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def cancel_units (self, uids) :
"""
Cancel given unit(s)
"""
raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)
# ------------------------------------------------------------------------------
#
| JensTimmerman/radical.pilot | docs/architecture/api_draft/unit_manager.py | Python | mit | 3,311 |
import datetime
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from .models import Action
def create_action(user, verb, target=None):
now = timezone.now()
last_minute = now - datetime.timedelta(seconds=60)
similar_actions = Action.objects.filter(user_id=user.id, verb=verb, created__gte=last_minute)
if target:
target_ct = ContentType.objects.get_for_model(target)
similar_actions = Action.objects.filter(target_ct=target_ct, target_id=target.id)
if not similar_actions:
action = Action(user=user, verb=verb, target=target)
action.save()
return True
return False
| EssaAlshammri/django-by-example | bookmarks/bookmarks/actions/utils.py | Python | mit | 679 |
# -*- coding: utf-8 -*-
#
# RedPipe documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 19 13:22:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import os
import sys
from os import path
ROOTDIR = path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, ROOTDIR)
import redpipe # noqa
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RedPipe'
copyright = u'2017, John Loehrer'
author = u'John Loehrer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = redpipe.__version__
# The full version, including alpha/beta/rc tags.
release = redpipe.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'redpipe-logo.gif',
'github_banner': True,
'github_user': '72squared',
'github_repo': 'redpipe',
'travis_button': True,
'analytics_id': 'UA-98626018-1',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedPipedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RedPipe.tex', u'%s Documentation' % project,
u'John Loehrer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, u'%s Documentation' % project,
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'%s Documentation' % project,
author, project, 'making redis pipelines easy in python',
'Miscellaneous'),
]
suppress_warnings = ['image.nonlocal_uri']
| 72squared/redpipe | docs/conf.py | Python | mit | 5,400 |
from src.tools.dictionaries import PostLoadedDict
# Utility class
################################################
class ServerImplementationDict(PostLoadedDict):
def __missing__(self, key):
try:
return super().__missing__(key)
except KeyError:
return NotImplemented
################################################
class Server():
def __init__(self, shortname, loader):
# Not preloaded
# loaders must produce dictionaries (or an appropriate iterable)
# with the required keys.
# The reason for this is that code for certain servers need not be loaded
# if it's not going to be used at all
# It also prevents import loop collisions.
global __ServerImplementationDict
self.__data = ServerImplementationDict(loader)
self.__shortname = shortname
@property
def shortname(self):
# This is the only property provided from above
return self.__shortname
def __str__(self):
return str(self.__shortname)
# All other properties must come from canonical sources
# provided by the server loader
# CONSTANTS (STRINGS, BOOLEANS, INTS, ETC.)
@property
def name(self):
return self.__data['str_name']
@property
def internal_shortname(self):
return self.__data['str_shortname']
@property
def beta(self):
return self.__data['bool_tester']
# CLASSES
# 1- Credentials:
@property
def Auth(self): # I really don't know how to call this.
return self.__data['cls_auth']
@property
def auth_fields(self):
return self.__data['list_authkeys']
# 2- Server Elements:
@property
def Player(self):
return self.__data['cls_player']
@property
def Tournament(self):
return self.__data['cls_tournament'] | juanchodepisa/sbtk | SBTK_League_Helper/src/interfacing/servers.py | Python | mit | 1,946 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON.
jsonpickle can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.
The object must be accessible globally via a module and must
inherit from object (AKA new-style classes).
Create an object::
class Thing(object):
def __init__(self, name):
self.name = name
obj = Thing('Awesome')
Use jsonpickle to transform the object into a JSON string::
import jsonpickle
frozen = jsonpickle.encode(obj)
Use jsonpickle to recreate a Python object from a JSON string::
thawed = jsonpickle.decode(frozen)
.. warning::
Loading a JSON string from an untrusted source represents a potential
security vulnerability. jsonpickle makes no attempt to sanitize the input.
The new object has the same type and data, but essentially is now a copy of
the original.
.. code-block:: python
assert obj.name == thawed.name
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON::
oneway = jsonpickle.encode(obj, unpicklable=False)
result = jsonpickle.decode(oneway)
assert obj.name == result['name'] == 'Awesome'
"""
import sys, os
from music21 import common
sys.path.append(common.getSourceFilePath() + os.path.sep + 'ext')
from jsonpickle import pickler
from jsonpickle import unpickler
from jsonpickle.backend import JSONBackend
from jsonpickle.version import VERSION
# ensure built-in handlers are loaded
__import__('jsonpickle.handlers')
__all__ = ('encode', 'decode')
__version__ = VERSION
json = JSONBackend()
# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
enable_fallthrough = json.enable_fallthrough
def encode(value,
unpicklable=True,
make_refs=True,
keys=False,
max_depth=None,
backend=None,
warn=False,
max_iter=None):
"""Return a JSON formatted representation of value, a Python object.
:param unpicklable: If set to False then the output will not contain the
information necessary to turn the JSON data back into Python objects,
but a simpler JSON stream is produced.
:param max_depth: If set to a non-negative integer then jsonpickle will
not recurse deeper than 'max_depth' steps into the object. Anything
deeper than 'max_depth' is represented using a Python repr() of the
object.
:param make_refs: If set to False jsonpickle's referencing support is
disabled. Objects that are id()-identical won't be preserved across
encode()/decode(), but the resulting JSON stream will be conceptually
simpler. jsonpickle detects cyclical objects and will break the cycle
by calling repr() instead of recursing when make_refs is set False.
:param keys: If set to True then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
:param warn: If set to True then jsonpickle will warn when it
returns None for an object which it cannot pickle
(e.g. file descriptors).
:param max_iter: If set to a non-negative integer then jsonpickle will
consume at most `max_iter` items when pickling iterators.
>>> encode('my string')
'"my string"'
>>> encode(36)
'36'
>>> encode({'foo': True})
'{"foo": true}'
>>> encode({'foo': True}, max_depth=0)
'"{\\'foo\\': True}"'
>>> encode({'foo': True}, max_depth=1)
'{"foo": "True"}'
"""
if backend is None:
backend = json
return pickler.encode(value,
backend=backend,
unpicklable=unpicklable,
make_refs=make_refs,
keys=keys,
max_depth=max_depth,
warn=warn)
def decode(string, backend=None, keys=False):
"""Convert a JSON string into a Python object.
The keyword argument 'keys' defaults to False.
If set to True then jsonpickle will decode non-string dictionary keys
into python objects via the jsonpickle protocol.
>>> str(decode('"my string"'))
'my string'
>>> decode('36')
36
"""
if backend is None:
backend = json
return unpickler.decode(string, backend=backend, keys=keys)
# json.load(),loads(), dump(), dumps() compatibility
dumps = encode
loads = decode
| arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/ext/jsonpickle/__init__.py | Python | mit | 5,049 |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 09 13:04:53 2015
* If TimerTool.exe is running, kill the process.
* If input parameter is given, start TimerTool and set clock resolution
Starts TimerTool.exe and sets the clock resolution to argv[0] ms
Ex: python set_clock_resolution 0.5
@author: marcus
"""
import time, datetime
from socket import gethostname, gethostbyname
import os
import numpy as np
def main():
my_path = os.path.join('C:',os.sep,'Share','sync_clocks')
os.chdir(my_path)
# Initial timestamps
t1 = time.clock()
t2 = time.time()
t3 = datetime.datetime.now()
td1 = []
td2 = []
td3 = []
for i in xrange(100):
td1.append(time.clock()-t1)
td2.append(time.time() -t2)
td3.append((datetime.datetime.now()-t3).total_seconds())
time.sleep(0.001)
# Create text file and write header
t = datetime.datetime.now()
ip = gethostbyname(gethostname()).split('.')[-1]
f_name = '_'.join([ip,'test_clock_res',str(t.year),str(t.month),str(t.day),
str(t.hour),str(t.minute),str(t.second)])
f = open(f_name+'.txt','w')
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %
('mean_clock','median_clock','sd_clock',
'mean_time','median_time','sd_time',
'mean_datetime','median_datetime','sd_datetime',))
# Write results to text file
f.write('%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n' %
(np.mean(np.diff(td1))*1000, np.median(np.diff(td1))*1000,np.std(np.diff(td1))*1000,
np.mean(np.diff(td2))*1000, np.median(np.diff(td2))*1000,np.std(np.diff(td2))*1000,
np.mean(np.diff(td3))*1000, np.median(np.diff(td3))*1000,np.std(np.diff(td3))*1000))
f.close()
if __name__ == "__main__":
main() | marcus-nystrom/share-gaze | sync_clocks/test_clock_resolution.py | Python | mit | 1,930 |
blocklevel = ["blockquote", "div", "form", "p", "table", "video", "h1", "h2", "h3", "h4", "h5", "h6", "hr", "details", "article", "header", "main"]
def normalizeEnter(src):
#Deletes all user defined for readability reason existing line breaks that are issues for the HTML output
for elem in blocklevel:
while src.find("\r<" + elem) > -1:
src = src.replace("\r<" + elem, "<" + elem)
while src.find("</" + elem + ">\r") > -1:
src = src.replace("</" + elem + ">\r", "</" + elem + ">")
while src.find(">\r") > -1:
src = src.replace(">\r", ">") #It is really needed, it created some other bugs?!
while src.find("\r</") > -1:
src = src.replace("\r</", "</") ##It is really needed, it created some other bugs?!
return src
def main(islinput, inputfile, pluginData, globalData):
currentIndex = 0
for item in islinput:
item = normalizeEnter(item) #Deletes not wanted line breaks in order to prevent the problem we have with Markdown.
islinput[currentIndex] = item
currentIndex += 1
return islinput, pluginData, globalData
| ValorNaram/isl | inputchangers/002.py | Python | mit | 1,044 |
import cairo
from gi.repository import Gtk
from gi.repository import Gdk
from pylsner import plugin
class Window(Gtk.Window):
def __init__(self):
super(Window, self).__init__(skip_pager_hint=True,
skip_taskbar_hint=True,
)
self.set_title('Pylsner')
screen = self.get_screen()
self.width = screen.get_width()
self.height = screen.get_height()
self.set_size_request(self.width, self.height)
self.set_position(Gtk.WindowPosition.CENTER)
rgba = screen.get_rgba_visual()
self.set_visual(rgba)
self.override_background_color(Gtk.StateFlags.NORMAL,
Gdk.RGBA(0, 0, 0, 0),
)
self.set_wmclass('pylsner', 'pylsner')
self.set_type_hint(Gdk.WindowTypeHint.DOCK)
self.stick()
self.set_keep_below(True)
drawing_area = Gtk.DrawingArea()
drawing_area.connect('draw', self.redraw)
self.refresh_cnt = 0
self.add(drawing_area)
self.connect('destroy', lambda q: Gtk.main_quit())
self.widgets = []
self.show_all()
def refresh(self, force=False):
self.refresh_cnt += 1
if self.refresh_cnt >= 60000:
self.refresh_cnt = 0
redraw_required = False
for wid in self.widgets:
if (self.refresh_cnt % wid.metric.refresh_rate == 0) or force:
wid.refresh()
redraw_required = True
if redraw_required:
self.queue_draw()
return True
def redraw(self, _, ctx):
ctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
for wid in self.widgets:
wid.redraw(ctx)
class Widget:
def __init__(self,
name='default',
metric={'plugin': 'time'},
indicator={'plugin': 'arc'},
fill={'plugin': 'rgba_255'},
):
self.name = name
MetricPlugin = plugin.load_plugin('metrics', metric['plugin'])
self.metric = MetricPlugin(**metric)
IndicatorPlugin = plugin.load_plugin('indicators', indicator['plugin'])
self.indicator = IndicatorPlugin(**indicator)
FillPlugin = plugin.load_plugin('fills', fill['plugin'])
self.fill = FillPlugin(**fill)
def refresh(self):
self.metric.refresh()
self.fill.refresh(self.metric.value)
def redraw(self, ctx):
ctx.set_source(self.fill.pattern)
self.indicator.redraw(ctx, self.metric.value)
| mrmrwat/pylsner | pylsner/gui.py | Python | mit | 2,624 |
# Given the list values = [] , write code that fills the list with each set of numbers below.
# a.1 2 3 4 5 6 7 8 9 10
list = []
for i in range(11):
list.append(i)
print(list) | futurepr0n/Books-solutions | Python-For-Everyone-Horstmann/Chapter6-Lists/R6.1A.py | Python | mit | 203 |
"""
Tests for L{monotone}.
"""
from hypothesis import given, strategies as st
import errno
from monotone import get_clock_info, monotonic
from monotone import _api, _bindings
import os
import platform
import pytest
needs_posix = pytest.mark.skipif(
os.name == "posix" and platform.system() == "Darwin",
reason="POSIX-only tests (clock_gettime(3))",
)
needs_macos = pytest.mark.skipif(
platform.system() != "Darwin",
reason="macOS-only tests (mach_absolute_time(3))",
)
@pytest.fixture
def errno_value():
"""
A particular errno.
"""
return errno.EINVAL
@pytest.fixture
def strerror(errno_value):
"""
The string representation of a particular errno
"""
return "[Errno {}] Invalid argument".format(errno_value)
@pytest.fixture
def apply_failing_clock_call(monkeypatch):
"""
Return a callable that patches in a failing system call fake that
fails and return a list of calls to that fake.
"""
def _apply_failing_clock_call(name, errno_value):
calls = []
def _failing_clock_call(clock_id, timespec):
calls.append((clock_id, timespec))
monkeypatch.setattr(_api.ffi, "errno", errno.EINVAL)
return -1
monkeypatch.setattr(_api, name, _failing_clock_call)
return calls
return _apply_failing_clock_call
@pytest.fixture
def apply_timespec(monkeypatch):
"""
Return a callable that patches in a fake over the specified clock
call that sets the specified resolution and returns a list of
calls to that fake.
"""
def _apply_timespec(name, goal_timespec):
calls = []
def _fake_clock_call(clock_id, timespec):
calls.append((clock_id, timespec))
timespec[0] = goal_timespec[0]
return 0
monkeypatch.setattr(_api, name, _fake_clock_call)
return calls
return _apply_timespec
class TestSimpleNamespace(object):
"""
Tests for L{_SimpleNamespace}.
"""
def test_init(self):
"""
The initializer updates the instance's C{__dict__} with its
keyword arguments.
"""
namespace = _api._SimpleNamespace(x=1)
assert namespace.x == 1
def test_repr(self):
"""
The instance's repr reflects its C{__dict__}
"""
namespace = _api._SimpleNamespace()
namespace.y = 2
assert repr(namespace) == "namespace(y=2)"
def test_eq(self):
"""
Two instances with equal C{__dict__}s are equal.
"""
assert _api._SimpleNamespace(a=1) == _api._SimpleNamespace(a=1)
@needs_posix
class TestGetClockInfoPosix(object):
"""
Tests for L{get_clock_info}.
"""
def test_non_monotonic(self):
"""
L{get_clock_info} only knows about the monotonic clock.
"""
with pytest.raises(ValueError):
get_clock_info("not monotonic")
def test_failure(self, apply_failing_clock_call, errno_value, strerror):
"""
A failure in C{clock_getres} results in an L{OSError} that
presents the failure's errno.
"""
calls = apply_failing_clock_call('_clock_getres', errno_value)
with pytest.raises(OSError) as exc:
get_clock_info("monotonic")
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
assert str(exc.value) == strerror
@given(
clock_getres_spec=st.fixed_dictionaries({
"tv_sec": st.sampled_from([0, 1]),
"tv_nsec": st.sampled_from([0, 1]),
}),
)
def test_info(self, clock_getres_spec, apply_timespec):
"""
The reported info always includes a nanosecond resolution when
C{clock_getres} indicates nanosecond resolution.
"""
calls = apply_timespec(
"_clock_getres",
_bindings.ffi.new("struct timespec *", clock_getres_spec),
)
expected_info = _api._SimpleNamespace(
adjustable=False,
implementation="clock_gettime(MONOTONIC)",
monotonic=True,
resolution=None, # checked separately
)
if clock_getres_spec['tv_nsec']:
expected_resolution = 1e-09
else:
expected_resolution = 1.0
info = get_clock_info("monotonic")
resolution, info.resolution = info.resolution, None
assert info == expected_info
assert resolution - expected_resolution == pytest.approx(0.0)
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
@needs_macos
class TestGetClockInfoMacOS(object):
"""
Tests for L{get_clock_info}.
"""
def test_non_monotonic(self):
"""
L{get_clock_info} only knows about the monotonic clock.
"""
with pytest.raises(ValueError):
get_clock_info("not monotonic")
def test_info(self):
"""
The reported info always includes a nanosecond resolution.
"""
expected_info = _api._SimpleNamespace(
adjustable=False,
implementation="mach_absolute_time()",
monotonic=True,
resolution=None, # checked separately
)
expected_resolution = 1e-09
info = get_clock_info("monotonic")
resolution, info.resolution = info.resolution, None
assert info == expected_info
assert resolution - expected_resolution == pytest.approx(0.0)
@needs_posix
def test_monotonic_fails_posix(apply_failing_clock_call,
errno_value,
strerror):
"""
A failure in C{clock_gettime} results in an L{OSError} that
presents the failure's errno.
"""
calls = apply_failing_clock_call('_clock_gettime', errno_value)
with pytest.raises(OSError) as exc:
monotonic()
assert len(calls) == 1
assert calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
assert str(exc.value) == strerror
@needs_posix
@given(
clock_gettime_spec=st.fixed_dictionaries({
"tv_sec": st.integers(min_value=0, max_value=2 ** 32 - 1),
"tv_nsec": st.integers(min_value=0, max_value=2 ** 32 - 1),
}),
)
def test_clock(clock_gettime_spec, apply_timespec):
"""
For any given time resolution, the monotonic time equals the
sum of the seconds and nanoseconds.
"""
clock_gettime_calls = apply_timespec(
'_clock_gettime',
_bindings.ffi.new("struct timespec *", clock_gettime_spec),
)
# we a float, representing the current seconds plus the
# nanoseconds (offset by a billion) iff the resolution is accurate
# to the nanosecond.
expected = float(clock_gettime_spec['tv_sec']) + (
clock_gettime_spec['tv_nsec'] * 1e-09)
result = monotonic()
assert result - expected == pytest.approx(0.0)
assert clock_gettime_calls[0][0] == _bindings.lib.CLOCK_MONOTONIC
def test_clock_increases():
"""
A monotonic moment is never greater than a succeeding monotonic
moment.
"""
assert monotonic() <= monotonic()
| mrwsr/monotone | test/test_monotone.py | Python | mit | 7,143 |
"""
http://community.topcoder.com/stat?c=problem_statement&pm=1667
Single Round Match 147 Round 1 - Division II, Level One
"""
class CCipher:
def decode(self, cipherText, shift):
a = ord('A')
decoder = [a + (c - shift if c >= shift else c - shift + 26) for c in range(26)]
plain = [chr(decoder[ord(c) - a]) for c in cipherText]
return ''.join(plain)
| warmsea/tc-srm | srm147/CCipher.py | Python | mit | 389 |
from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers
from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals
from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers
class DummyPlatform():
"""
:note: all processors has to be callable with only one parameter
which is actual Unit/RtlNetlist instance
"""
def __init__(self):
self.beforeToRtl = []
self.beforeToRtlImpl = []
self.afterToRtlImpl = []
self.beforeHdlArchGeneration = [
extract_part_drivers,
removeUnconnectedSignals,
markVisibilityOfSignalsAndCheckDrivers,
]
self.afterToRtl = []
| Nic30/HWToolkit | hwt/synthesizer/dummyPlatform.py | Python | mit | 775 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
RJUST = 12
def format_fans(fans):
return format_line(prefix='fans'.rjust(RJUST), values=fans)
def format_rpms(rpms):
return format_line(prefix='rpms'.rjust(RJUST), values=rpms)
def format_pwms(pwms):
return format_line(prefix='pwms'.rjust(RJUST), values=pwms)
def format_tmps(tmps):
return format_line(prefix='temps'.rjust(RJUST), values=tmps)
def format_names(names):
return format_line(prefix='names'.rjust(RJUST), values=names)
def format_ports(ports):
return format_line(prefix='ports'.rjust(RJUST), values=ports)
def format_temps(temps):
return format_line(prefix='temps'.rjust(RJUST), values=temps)
def format_ambients(ambients):
return format_line(prefix='ambients'.rjust(RJUST), values=ambients)
def format_limits(limits):
return format_line(prefix='limits'.rjust(RJUST), values=limits)
def format_buffers(buffers):
return format_line(prefix='buffers'.rjust(RJUST), values=buffers)
def format_headrooms(headrooms):
return format_line(prefix='headrooms'.rjust(RJUST), values=headrooms)
def format_directions(directions):
return format_line(prefix='directions'.rjust(RJUST), values=directions)
def format_differences(differences):
return format_line(prefix='differences'.rjust(RJUST), values=differences)
def format_pwms_new(pwms_new):
return format_line(prefix='new pwms'.rjust(RJUST), values=pwms_new)
def format_line(prefix, values):
line = ''
line += prefix
line += ': '
line += '['
for value in values:
try:
if value >= 1:
value = int(round(value, 0))
if 1 > value != 0:
value = str(value)[1:4].ljust(3, '0')
except TypeError:
# value is None
pass
value = str(value) if value is not None else ''
line += value.rjust(6)
line += ', '
line = line[:-len(', ')]
line += ']'
return line
| Bengt/AL-FanControl | python/fancontrol/ui/cli_util.py | Python | mit | 2,042 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Package'
db.create_table(u'api_package', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500, db_index=True)),
('url', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500)),
('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'api', ['Package'])
# Adding unique constraint on 'Package', fields ['name', 'url']
db.create_unique(u'api_package', ['name', 'url'])
def backwards(self, orm):
# Removing unique constraint on 'Package', fields ['name', 'url']
db.delete_unique(u'api_package', ['name', 'url'])
# Deleting model 'Package'
db.delete_table(u'api_package')
models = {
u'api.package': {
'Meta': {'unique_together': "(('name', 'url'),)", 'object_name': 'Package'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'})
}
}
complete_apps = ['api'] | toranb/django-bower-registry | api/migrations/0001_initial.py | Python | mit | 1,703 |
from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from plotly.tests.utils import compare_dict
from plotly.tests.test_optional.optional_utils import run_fig
from plotly.tests.test_optional.test_matplotlylib.data.annotations import *
def test_annotations():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], 'b-')
ax.plot([3, 2, 1], 'b-')
ax.text(0.001, 0.999,
'top-left', transform=ax.transAxes, va='top', ha='left')
ax.text(0.001, 0.001,
'bottom-left', transform=ax.transAxes, va='baseline', ha='left')
ax.text(0.999, 0.999,
'top-right', transform=ax.transAxes, va='top', ha='right')
ax.text(0.999, 0.001,
'bottom-right', transform=ax.transAxes, va='baseline', ha='right')
renderer = run_fig(fig)
for data_no, data_dict in enumerate(renderer.plotly_fig['data']):
equivalent, msg = compare_dict(data_dict,
ANNOTATIONS['data'][data_no])
assert equivalent, msg
for no, note in enumerate(renderer.plotly_fig['layout']['annotations']):
equivalent, msg = compare_dict(note,
ANNOTATIONS['layout']['annotations'][no])
assert equivalent, msg
| ee-in/python-api | plotly/tests/test_optional/test_matplotlylib/test_annotations.py | Python | mit | 1,342 |
from distutils.core import setup
setup(
# Application name:
name="streaker",
# Version number (initial):
version="0.0.1",
# Application author details:
author="Aldi Alimucaj",
author_email="aldi.alimucaj@gmail.com",
# Packages
packages=["streaker"],
scripts=['bin/streaker'],
# Include additional files into the package
include_package_data=True,
# Details
url="http://pypi.python.org/pypi/Streaker_v001/",
#
license="MIT",
description="GitHub streak manipulator",
# long_description=open("README.txt").read(),
# Dependent packages (distributions)
install_requires=[
# "",
],
)
| aldialimucaj/Streaker | setup.py | Python | mit | 680 |
from __future__ import absolute_import, division, print_function, unicode_literals
# Statsd client. Loosely based on the version by Steve Ivy <steveivy@gmail.com>
import logging
import random
import socket
import time
from contextlib import contextmanager
log = logging.getLogger(__name__)
class StatsD(object):
def __init__(self, host='localhost', port=8125, enabled=True, prefix=''):
self.addr = None
self.enabled = enabled
if enabled:
self.set_address(host, port)
self.prefix = prefix
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def set_address(self, host, port=8125):
try:
self.addr = (socket.gethostbyname(host), port)
except socket.gaierror:
self.addr = None
self.enabled = False
@contextmanager
def timed(self, stat, sample_rate=1):
log.debug('Entering timed context for %r' % (stat,))
start = time.time()
yield
duration = int((time.time() - start) * 1000)
log.debug('Exiting timed context for %r' % (stat,))
self.timing(stat, duration, sample_rate)
def timing(self, stats, time, sample_rate=1):
"""
Log timing information
"""
unit = 'ms'
log.debug('%r took %s %s' % (stats, time, unit))
self.update_stats(stats, "%s|%s" % (time, unit), sample_rate)
def increment(self, stats, sample_rate=1):
"""
Increments one or more stats counters
"""
self.update_stats(stats, 1, sample_rate)
def decrement(self, stats, sample_rate=1):
"""
Decrements one or more stats counters
"""
self.update_stats(stats, -1, sample_rate)
def update_stats(self, stats, delta=1, sampleRate=1):
"""
Updates one or more stats counters by arbitrary amounts
"""
if not self.enabled or self.addr is None:
return
if type(stats) is not list:
stats = [stats]
data = {}
for stat in stats:
data["%s%s" % (self.prefix, stat)] = "%s|c" % delta
self.send(data, sampleRate)
def send(self, data, sample_rate):
sampled_data = {}
if sample_rate < 1:
if random.random() <= sample_rate:
for stat, value in data.items():
sampled_data[stat] = "%s|@%s" % (value, sample_rate)
else:
sampled_data = data
try:
for stat, value in sampled_data.items():
self.udp_sock.sendto("%s:%s" % (stat, value), self.addr)
except Exception as e:
log.exception('Failed to send data to the server: %r', e)
if __name__ == '__main__':
sd = StatsD()
for i in range(1, 100):
sd.increment('test')
| smarkets/smk_python_sdk | smarkets/statsd.py | Python | mit | 2,824 |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='fcit',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.2.0',
description='A decision-tree based conditional independence test',
long_description=long_description,
# The project's main homepage.
url = 'https://github.com/kjchalup/fcit',
# Author details
author = 'Krzysztof Chalupka',
author_email = 'janchatko@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='machine learning statistics decision trees',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'sklearn', 'scipy', 'joblib'],
)
| kjchalup/dtit | setup.py | Python | mit | 2,610 |
import pandas as pd
import os
import time
from datetime import datetime
import re
from time import mktime
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
style.use("dark_background")
# path = "X:/Backups/intraQuarter" # for Windows with X files :)
# if git clone'ed then use relative path,
# assuming you extracted the downloaded zip into this project's folder:
path = "intraQuarter"
def Key_Stats(gather="Total Debt/Equity (mrq)"):
statspath = path+'/_KeyStats'
stock_list = [x[0] for x in os.walk(statspath)]
df = pd.DataFrame(
columns = [
'Date',
'Unix',
'Ticker',
'DE Ratio',
'Price',
'stock_p_change',
'SP500',
'sp500_p_change',
'Difference',
'Status'
]
)
sp500_df = pd.DataFrame.from_csv("YAHOO-INDEX_GSPC.csv")
ticker_list = []
for each_dir in stock_list[1:25]:
each_file = os.listdir(each_dir)
# ticker = each_dir.split("\\")[1] # Windows only
# ticker = each_dir.split("/")[1] # this didn't work so do this:
ticker = os.path.basename(os.path.normpath(each_dir))
# print(ticker) # uncomment to verify
ticker_list.append(ticker)
starting_stock_value = False
starting_sp500_value = False
if len(each_file) > 0:
for file in each_file:
date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html')
unix_time = time.mktime(date_stamp.timetuple())
full_file_path = each_dir + '/' + file
source = open(full_file_path,'r').read()
try:
try:
value = float(source.split(gather+':</td><td class="yfnc_tabledata1">')[1].split('</td>')[0])
except:
value = float(source.split(gather+':</td>\n<td class="yfnc_tabledata1">')[1].split('</td>')[0])
try:
sp500_date = datetime.fromtimestamp(unix_time).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row['Adjusted Close'])
except:
sp500_date = datetime.fromtimestamp(unix_time-259200).strftime('%Y-%m-%d')
row = sp500_df[(sp500_df.index == sp500_date)]
sp500_value = float(row['Adjusted Close'])
try:
stock_price = float(source.split('</small><big><b>')[1].split('</b></big>')[0])
except:
try:
stock_price = (source.split('</small><big><b>')[1].split('</b></big>')[0])
#print(stock_price)
stock_price = re.search(r'(\d{1,8}\.\d{1,8})', stock_price)
stock_price = float(stock_price.group(1))
#print(stock_price)
except:
try:
stock_price = (source.split('<span class="time_rtq_ticker">')[1].split('</span>')[0])
#print(stock_price)
stock_price = re.search(r'(\d{1,8}\.\d{1,8})', stock_price)
stock_price = float(stock_price.group(1))
#print(stock_price)
except:
print('wtf stock price lol',ticker,file, value)
time.sleep(5)
if not starting_stock_value:
starting_stock_value = stock_price
if not starting_sp500_value:
starting_sp500_value = sp500_value
stock_p_change = ((stock_price - starting_stock_value) / starting_stock_value) * 100
sp500_p_change = ((sp500_value - starting_sp500_value) / starting_sp500_value) * 100
location = len(df['Date'])
difference = stock_p_change-sp500_p_change
if difference > 0:
status = "outperform"
else:
status = "underperform"
df = df.append({'Date':date_stamp,
'Unix':unix_time,
'Ticker':ticker,
'DE Ratio':value,
'Price':stock_price,
'stock_p_change':stock_p_change,
'SP500':sp500_value,
'sp500_p_change':sp500_p_change,
############################
'Difference':difference,
'Status':status},
ignore_index=True)
except Exception as e:
pass
#print(ticker,e,file, value)
#print(ticker_list)
#print(df)
for each_ticker in ticker_list:
try:
plot_df = df[(df['Ticker'] == each_ticker)]
plot_df = plot_df.set_index(['Date'])
if plot_df['Status'][-1] == 'underperform':
color = 'r'
else:
color = 'g'
plot_df['Difference'].plot(label=each_ticker, color=color)
plt.legend()
except Exception as e:
print(str(e))
plt.show()
save = gather.replace(' ','').replace(')','').replace('(','').replace('/','')+str('.csv')
print(save)
df.to_csv(save)
Key_Stats()
| PythonProgramming/Support-Vector-Machines---Basics-and-Fundamental-Investing-Project | p10.py | Python | mit | 4,949 |
from __future__ import absolute_import
from .base import WhiteNoise
__version__ = '2.0.3'
__all__ = ['WhiteNoise']
| KnockSoftware/whitenoise | whitenoise/__init__.py | Python | mit | 118 |
import struct
''' Refer to docs for all the exact formats. There are many so check them out before converting things yourself '''
''' If there's a specific offset you want to do things from, use pack_into and unack_into from the docs '''
#Integer to string
i1= 1234
print "Int to string as 8 byte little endian", repr(struct.pack("<Q",i1))
print "Int to string as 8 byte big endian", repr(struct.pack(">Q",i1))
#String to integer. Make sure size of destination matches the length of the string
s1= '1234'
print "String to 4 byte integer little endian", struct.unpack("<i", s1)
print "String to 4 byte integer big endian", struct.unpack(">i", s1)
''' Whenever you want to convert to and from binary, think of binascii '''
import binascii
h1= binascii.b2a_hex(s1)
print "String to hex", h1
uh1= binascii.a2b_hex(h1)
print "Hex to string, even a binary string", uh1
| arvinddoraiswamy/LearnPython | 17.py | Python | mit | 867 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
TODO...
"""
__all__ = ['GreedyPlayer']
import random
from jdhp.tictactoe.player.abstract import Player
class GreedyPlayer(Player):
"""
TODO...
"""
def play(self, game, state):
"""
TODO...
"""
action_list = game.getSetOfValidActions(state)
choosen_action = None
# Choose actions that lead to immediate victory...
for action in action_list:
next_state = game.nextState(state, action, self)
if game.hasWon(self, next_state):
choosen_action = action
break
# ... otherwise choose randomly
if choosen_action is None:
#print("randomly choose action") # debug
choosen_action = random.choice(action_list)
return choosen_action
| jeremiedecock/tictactoe-py | jdhp/tictactoe/player/greedy.py | Python | mit | 1,951 |
__author__ = 'besta'
class BestaPlayer:
def __init__(self, fichier, player):
self.fichier = fichier
self.grille = self.getFirstGrid()
self.best_hit = 0
self.players = player
def getFirstGrid(self):
"""
Implements function to get the first grid.
:return: the grid.
"""
li = []
with open(self.fichier, 'r') as fi:
for line in fi.readlines():
li.append(line)
return li
def updateGrid(self):
"""
Implements function to update the grid to alter n-1
round values
"""
with open(self.fichier, 'r') as fi:
for line in fi.readlines():
i = 0
for car in line:
j = 0
if car != '\n':
self.grille[i][j] = car
j += 1
i += 1
def grilleEmpty(self):
"""
Implement function to check if the grid is empty.
"""
for line in self.grille:
for car in line[:len(line) - 1]:
if car != '0':
return False
return True
def checkLines(self, player, inARow):
"""
Implements function to check the current lines setup to evaluate best combinaison.
:param player: check for your numbers (your player number) or those of your opponent.
:param inARow: how many tokens in a row (3 or 2).
:return: true or false
"""
count = 0
flag = False
for line_number, line in enumerate(self.grille):
count = 0
for car_pos, car in enumerate(line[:len(line) - 1]):
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == inARow:
if car_pos - inARow >= 0 and self.canPlayLine(line_number, car_pos - inARow):
return True, car_pos - inARow
if car_pos + 1 <= 6 and self.canPlayLine(line_number, car_pos + 1):
return True, car_pos + 1
else:
count = 0
return False, 0
def canPlayLine(self, line, col):
"""
Function to check if we can fill the line with a token.
:param line: which line
:param col: which column
:return: true or false
"""
if line == 5:
return self.grille[line][col] == '0'
else:
return self.grille[line][col] == '0' and self.grille[line + 1][col] != '0'
def changeColumnInLines(self):
"""
Implements function to transform columns in lines to make tests eaiser.
:return: a reverse matrice
"""
column = []
for x in xrange(7):
col = ''
for y in xrange(6):
col += self.grille[y][x]
column.append(col)
return column
def checkColumns(self, player, inARow):
"""
Implements function to check the current columns setup to evaluate best combinaison.
:param player: check for your numbers (your player number) or those of your opponent.
:param inARow: how many tokens in a row (3 or 2).
:return: true or false
"""
column = self.changeColumnInLines()
count = 0
flag = False
for col_number, line in enumerate(column):
count = 0
for car_pos, car in enumerate(line):
if int(car) == player and not flag:
count = 1
flag = True
elif int(car) == player and flag:
count += 1
if count == inARow and car_pos - inARow >= 0 and self.grille[car_pos - inARow][col_number] == '0':
return True, col_number
else:
count = 0
return False, 0
def checkDiagonalLeftToRight(self, player, inARow):
"""
Implements function to check the current diagonal to evaluate best combinaison.
:param player: check for your numbers or opponent ones.
:param inARow: how many tokens in a row (3 or 2).
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 0
while x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y_int + 1] != '0':
return True, y_int + 1
else:
count = 0
flag = False
x_int -= 1
y_int += 1
x += 1
y = 1
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int <= 6 and x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int + 1 <= 6 and x_int - 1 >= 0 and self.grille[x_int][y + 1] != '0':
return True, y_int + 1
else:
count = 0
flage = False
x_int -= 1
y_int += 1
y += 1
return False, 0
def checkDiagonalRightToLeft(self, player, inARow):
"""
Implements function to check the current diagonal to evaluate best combinaison.
:param player: check for your numbers or opponent ones.
:param inARow: how many tokens in a row (3 or 2).
:return:
"""
x = 3
flag = False
while x < 6:
count = 0
x_int = x
y_int = 6
while x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y_int - 1] != '0':
return True, y_int - 1
else:
count = 0
flag = False
x_int -= 1
y_int -= 1
x += 1
y = 5
flag = False
while y <= 3:
count = 0
x_int = 5
y_int = y
while y_int >= 3 and x_int >= 0:
if int(self.grille[x_int][y_int]) == player and not flag:
count = 1
flag = True
elif int(self.grille[x_int][y_int]) == player and flag:
count += 1
if count == inARow and y_int - 1 >= 0 and x_int - 1 >= 0 and self.grille[x_int][y - 1] != '0':
return True, y_int - 1
else:
count = 0
flage = False
x_int -= 1
y_int -= 1
y -= 1
return False, 0
def checkDiagonals(self, player, inARow):
"""
Calls two diagonal functional.
:return: an int, representing the column where to play or 0 and False if there is no pattern search.
"""
check = self.checkDiagonalLeftToRight(player, inARow)
if check[0]:
return check
else:
return self.checkDiagonalRightToLeft(player, inARow)
def playSomeColumn(self, player, inARow):
"""
Call all function for a player and a number of tokens given.
:param player: which player
:param inARow: how many token
:return: true or false (col number if true)
"""
methods = {'checklines': self.checkLines, 'checkcolumn': self.checkColumns, 'checkdiagonal': self.checkDiagonals}
for key, function in methods.items():
which_col = function(player, inARow)
if which_col[0]:
return which_col
return False, 0
def findFirstColumnEmpty(self):
"""
Implements function to get the first column where a slot remain.
:return: the column
"""
for col in xrange(7):
if self.grille[0][col] == '0':
return col
return -1
def decideColumn(self):
"""
Implements main function : to decide what is the better hit to do.
:return: an int, representing the column where we play
"""
if self.grilleEmpty():
return 3
li_sequence = [3, 2, 1]
li_players = [self.players[0], self.players[1]]
for sequence in li_sequence:
for player in li_players:
choosen_col = self.playSomeColumn(player, sequence)
if choosen_col[0]:
return choosen_col[1]
return self.findFirstColumnEmpty()
| KeserOner/puissance4 | bestaplayer.py | Python | mit | 9,518 |
# User info wrapper object
import logging
class User(object):
"""
Wrapper object around an entry in users.json. Behaves like a read-only dictionary if
asked, but adds some useful logic to decouple the front end from the JSON structure.
"""
_NAME_KEYS = ["display_name", "real_name"]
_DEFAULT_IMAGE_KEY = "image_512"
def __init__(self, raw_data):
self._raw = raw_data
def __getitem__(self, key):
return self._raw[key]
@property
def display_name(self):
"""
Find the most appropriate display name for a user: look for a "display_name", then
a "real_name", and finally fall back to the always-present "name".
"""
for k in self._NAME_KEYS:
if self._raw.get(k):
return self._raw[k]
if "profile" in self._raw and self._raw["profile"].get(k):
return self._raw["profile"][k]
return self._raw["name"]
@property
def email(self):
"""
Shortcut property for finding the e-mail address or bot URL.
"""
if "profile" in self._raw:
email = self._raw["profile"].get("email")
elif "bot_url" in self._raw:
email = self._raw["bot_url"]
else:
email = None
if not email:
logging.debug("No email found for %s", self._raw.get("name"))
return email
def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY]
def deleted_user(id):
"""
Create a User object for a deleted user.
"""
deleted_user = {
"id": id,
"name": "deleted-" + id,
"deleted": True,
"is_bot": False,
"is_app_user": False,
}
return User(deleted_user)
| hfaran/slack-export-viewer | slackviewer/user.py | Python | mit | 2,188 |
#!/hpf/largeprojects/ccmbio/naumenko/tools/bcbio/anaconda/bin/python
"""
Looks for a specific sample
"""
import re
import sys
import os
import os.path
sample = sys.argv[1]
family,sample_only = sample.split("_")
match = re.match('\d*',family)
if match:
prefix=str(int(match.group(0))/100)
report_path = prefix+'x/'+family
report=0
bam=0
errors = []
if os.path.isfile(report_path+'/'+family+'.csv'):
#print("Report exists")
report=1
else:
errors.append('Error: no report')
if os.path.isfile(report_path+'/'+sample+'.bam'):
#print("Bam exists")
bam=1
else:
errors.append(' ERROR: no bam')
if (bam==1 and report==1):
print(sample+'\t'+os.getcwd()+"/"+report_path+"\t"+os.getcwd()+"/"+report_path+'/'+sample+'.bam')
else:
print(sample+'\t'+' '.join(errors))
else:
print("Family ID is not starting with digital")
| naumenko-sa/cre | cre.locate_sample.py | Python | mit | 895 |
# -*- coding: utf-8 -*-
"""
Date: 2/2/2017
Team: Satoshi Nakamoto
@Authors: Alex Levering and Hector Muro
Non-standard dependencies:
* Twython
* NLTK
* Folium
* Geocoder
* psycopg2
TO DO BEFOREHAND:
The following steps are non-automatable and have to be performed manually.
* Have the NLTK vader lexicon locally (nltk.download("vader_lexicon"))
* Have PostGIS installed on PostgreSQL
* Set the file paths specified below to wherever your folder is
* Upgrade folium to the latest version (0.2.1)
"""
# Naming options for tables, intermediates and outputs are available in the wrapper.
if __name__ == "__main__":
"""
The tool is not supplied with Tweets out-of-the-box. Set 'gather_data' to True and leave it
running for a while. If loop is false it will terminate in a minute or so and create a map from the results automatically
This tool was tested and intended for OSGeo Live installs used in the GeoScripting course.
"""
import tweetAnalysisWrapper
tweetAnalysisWrapper.performTweetResearch(folder_path = r"/home/user/git/SatoshiNakamotoGeoscripting/Final_assignment",
defaultdb = "postgres", # Making a new database requires connecting to an existing database
user = "user", # PostgreSQL username (user is default value on OSGeo Live)
password = "user", # PostgreSQL password (user is default on OSGeo Live)
ouputdb = "tweet_research", # Specify the output database that is to be created
tweet_table_name = "tweets", # Output table where the Tweets are stored
gather_data = True, # When True: Will gather data from the Twitter stream
search_terms = ["Trump"], # Twitter terms to search for
loop_gathering = False, # When True: Will not stop gathering when terminated - use for prolonged gathering
APP_KEY = "", # Get these from developer.twitter.com when you make your application
APP_SECRET = "",
OAUTH_TOKEN = "",
OAUTH_TOKEN_SECRET = "")
| SatoshiNakamotoGeoscripting/SatoshiNakamotoGeoscripting | Final_assignment/main.py | Python | mit | 2,489 |
#!/usr/bin/python
#coding: utf-8
from __future__ import unicode_literals
import os
import unittest
import xlrd
import msp.schedule_parser as schedule_parser
__author__ = "Andrey Konovalov"
__copyright__ = "Copyright (C) 2014 Andrey Konovalov"
__license__ = "MIT"
__version__ = "0.1"
this_dir, this_filename = os.path.split(__file__)
SCHEDULE_PATH = os.path.join(this_dir, "..", "data", "2013_fall", "4kurs.xls")
class WeekdayRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayRange(0), (4, 11))
self.assertEqual(self.schedule.GetWeekdayRange(1), (12, 19))
self.assertEqual(self.schedule.GetWeekdayRange(2), (20, 27))
self.assertEqual(self.schedule.GetWeekdayRange(3), (28, 37))
self.assertEqual(self.schedule.GetWeekdayRange(4), (38, 47))
self.assertEqual(self.schedule.GetWeekdayRange(5), (48, 57))
class DepartmentCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentCount(), 9)
class DepartmentRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentRange(0), (2, 11))
self.assertEqual(self.schedule.GetDepartmentRange(1), (13, 20))
self.assertEqual(self.schedule.GetDepartmentRange(2), (22, 32))
self.assertEqual(self.schedule.GetDepartmentRange(3), (34, 36))
self.assertEqual(self.schedule.GetDepartmentRange(4), (38, 43))
self.assertEqual(self.schedule.GetDepartmentRange(5), (45, 53))
self.assertEqual(self.schedule.GetDepartmentRange(6), (55, 62))
self.assertEqual(self.schedule.GetDepartmentRange(7), (64, 71))
self.assertEqual(self.schedule.GetDepartmentRange(8), (73, 77))
class DepartmentsRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentsRow(), 3)
class HoursColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursColumn(), 1)
class HoursRangesTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursRanges(0), [(4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11)])
self.assertEqual(self.schedule.GetHoursRanges(3), [(28, 30), (30, 31), (31, 32), (32, 34), (34, 35), (35, 36), (36, 37)])
self.assertEqual(self.schedule.GetHoursRanges(5), [(48, 49), (49, 50), (50, 52), (52, 53), (53, 54), (54, 56), (56, 57)])
class GroupCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupCount(0), 9)
self.assertEqual(self.schedule.GetGroupCount(1), 7)
self.assertEqual(self.schedule.GetGroupCount(2), 8)
self.assertEqual(self.schedule.GetGroupCount(3), 2)
self.assertEqual(self.schedule.GetGroupCount(4), 5)
self.assertEqual(self.schedule.GetGroupCount(5), 8)
self.assertEqual(self.schedule.GetGroupCount(6), 7)
self.assertEqual(self.schedule.GetGroupCount(7), 7)
self.assertEqual(self.schedule.GetGroupCount(8), 4)
class GroupListTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupList(0), ['011', '012', '013', '014', '015', '016', '017', '018', '019'])
self.assertEqual(self.schedule.GetGroupList(1), ['021', '022', '023', '024', '025', '026', '028'])
self.assertEqual(self.schedule.GetGroupList(3), ['041', '042'])
self.assertEqual(self.schedule.GetGroupList(8), ['0111', '0112', '0113', '0114'])
class GroupRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupRange(0, 0), (2, 3))
self.assertEqual(self.schedule.GetGroupRange(0, 1), (3, 4))
self.assertEqual(self.schedule.GetGroupRange(2, 1), (23, 25))
self.assertEqual(self.schedule.GetGroupRange(2, 2), (25, 26))
self.assertEqual(self.schedule.GetGroupRange(2, 3), (26, 28))
self.assertEqual(self.schedule.GetGroupRange(5, 3), (48, 49))
self.assertEqual(self.schedule.GetGroupRange(8, 0), (73, 74))
self.assertEqual(self.schedule.GetGroupRange(8, 3), (76, 77))
class WeekdayByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayByRow(4), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(5), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(10), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(13), 1)
self.assertEqual(self.schedule.GetWeekdayByRow(25), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(26), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(28), 3)
self.assertEqual(self.schedule.GetWeekdayByRow(44), 4)
self.assertEqual(self.schedule.GetWeekdayByRow(48), 5)
self.assertEqual(self.schedule.GetWeekdayByRow(56), 5)
class PairByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetPairByRow(4), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(5), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(10), (6, 0))
self.assertEqual(self.schedule.GetPairByRow(12), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(28), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(29), (0, 1))
self.assertEqual(self.schedule.GetPairByRow(30), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(33), (3, 1))
self.assertEqual(self.schedule.GetPairByRow(56), (6, 0))
class DepartmentByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(2), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(3), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(10), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(13), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(18), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(19), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(22), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(24), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(31), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(39), 4)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(64), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(70), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(73), 8)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(76), 8)
class GroupByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupIndexByColumn(2), (0, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(3), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(10), (8, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(23), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(24), (1, 1))
self.assertEqual(self.schedule.GetGroupIndexByColumn(25), (2, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(26), (3, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(27), (3, 1))
self.assertEqual(self.schedule.GetGroupIndexByColumn(76), (3, 0))
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(WeekdayRangeTest())
suite.addTest(DepartmentCountTest())
suite.addTest(DepartmentRangeTest())
suite.addTest(DepartmentsRowTest())
suite.addTest(HoursColumnTest())
suite.addTest(HoursRangesTest())
suite.addTest(GroupCountTest())
suite.addTest(GroupListTest())
suite.addTest(GroupRangeTest())
suite.addTest(WeekdayByRowTest())
suite.addTest(PairByRowTest())
suite.addTest(DepartmentByColumnTest())
suite.addTest(GroupByColumnTest())
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| xairy/mipt-schedule-parser | msp/test/schedule_tests.py | Python | mit | 8,974 |
import gzip
import glob
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from scipy.stats import spearmanr
def get_num_lines_gz(filename):
num_lines = 0
with gzip.open(filename, "r") as fp:
for line in fp:
num_lines += 1
return num_lines
def main():
"""get stats from PAS-seq
- num reads per file
- gene quant level spearman correlations
"""
# files
DATA_DIR = "/mnt/lab_data/kundaje/projects/skin/data/bds/processed.chipseq.2017-01-23.histones"
# params
marks = ["H3K27ac", "H3K4me1", "H3K27me3", "CTCF"]
days = np.arange(0, 7, 3)
days = ["d{}".format(day).replace(".", "") for day in days]
reps = ["1", "2"]
# results
results = {}
results["mark_or_tf"] = []
results["timepoint"] = []
results["replicate"] = []
#results["num_input_reads"] = []
results["num_nodup_reads"] = []
results["NRF"] = []
results["PBC1"] = []
results["PBC2"] = []
results["num_macs2_peaks"] = []
results["num_overlap_peaks"] = []
results["num_idr_peaks"] = []
for mark in marks:
print mark
for day in days:
for rep in reps:
# timepoint, rep
results["mark_or_tf"].append(mark)
results["timepoint"].append(day)
results["replicate"].append(rep)
# nodup reads
nodup_log = glob.glob(
"{}/*{}*{}*/qc/rep{}/*nodup.flagstat.qc".format(
DATA_DIR, day, mark, rep))[0]
with open(nodup_log, "r") as fp:
for line in fp:
if "in total" in line:
num_nodup_reads = line.split("+")[0].strip()
results["num_nodup_reads"].append(num_nodup_reads)
# NRF/PBC1/PBC2
lib_log = glob.glob(
"{}/*{}*{}*/qc/rep{}/*nodup.pbc.qc".format(
DATA_DIR, day, mark, rep))[0]
with open(lib_log, "r") as fp:
# cols 5,6,7 is NRF/PBC1/PBC2
for line in fp:
fields = line.strip().split()
results["NRF"].append(fields[4])
results["PBC1"].append(fields[5])
results["PBC2"].append(fields[6])
# peak files
macs2_peaks = glob.glob(
"{}/*{}*{}*/peak/macs2/rep{}/*narrowPeak.gz".format(
DATA_DIR, day, mark, rep))[0]
num_macs2 = get_num_lines_gz(macs2_peaks)
results["num_macs2_peaks"].append(num_macs2)
if "CTCF" in mark:
idr_peaks = glob.glob(
"{}/*{}*{}*/peak/idr/true_reps/rep1-rep2/*filt.narrowPeak.gz".format(
DATA_DIR, day, mark))[0]
num_idr = get_num_lines_gz(idr_peaks)
results["num_idr_peaks"].append(num_idr)
results["num_overlap_peaks"].append("NA")
else:
results["num_idr_peaks"].append("NA")
overlap_peaks = glob.glob(
"{}/*{}*{}*/peak/macs2/overlap/*filt.narrowPeak.gz".format(
DATA_DIR, day, mark, rep))[0]
num_overlap = get_num_lines_gz(overlap_peaks)
results["num_overlap_peaks"].append(num_overlap)
# dataframe
results = pd.DataFrame(results)
ordered_headers = [
"mark_or_tf",
"timepoint",
"replicate",
#"num_input_reads",
"num_nodup_reads",
"NRF",
"PBC1",
"PBC2",
"num_macs2_peaks",
"num_overlap_peaks",
"num_idr_peaks"]
results = results[ordered_headers]
out_file = "ggr.ChIP-seq.QC.summary.txt"
results.to_csv(out_file, sep="\t", header=True, index=False)
return
main()
| vervacity/ggr-project | scripts/data_qc/summarize_chipseq_qc.py | Python | mit | 4,085 |
student_phoneNumber_name = {1: 'a', 3: 'c', 2: 'b'}
def Handler() :
while (1) :
choice = eval(input("Enter :\t 1 - to search student name \n \t 2 - to insert new student record \n \t 0 - to quit\n"))
print(choice)
if (choice == 1) :
if (student_phoneNumber_name) :
phone_number = input("Enter student's phone number : ")
name = SearchRecord(phone_number)
if (name) :
print("name : " + name )
else :
print(str(phone_number) + "Does not exist in record" + str(name))
else :
print("Record is empty ")
elif (choice == 2) :
phone_number = input("Enter student's phone number : ")
name = input("Enter student's name : ") #best example to understand input() and raw_input()
InsertRecord(phone_number, name)
elif (choice == 0) :
break
else:
print("Enter correct choice")
def InsertRecord(x, y):
student_phoneNumber_name[x] = y
return;
def SearchRecord(x):
print(x)
if (x in student_phoneNumber_name) :
return student_phoneNumber_name[x]
return False
Handler()
print(student_phoneNumber_name) | ajitghz582/PythonLearning | DAY_1_ASSIGNMENTS/1_name_phone_number.py | Python | mit | 1,070 |
""" Tests for Dynamo3 """
import sys
import unittest
from decimal import Decimal
from pickle import dumps, loads
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from mock import ANY, MagicMock, patch
from dynamo3 import (
Binary,
Dynamizer,
DynamoDBConnection,
DynamoDBError,
DynamoKey,
GlobalIndex,
Limit,
Table,
ThroughputException,
)
from dynamo3.constants import STRING
from dynamo3.result import Capacity, ConsumedCapacity, Count, ResultSet, add_dicts
class BaseSystemTest(unittest.TestCase):
"""Base class for system tests"""
dynamo: DynamoDBConnection = None # type: ignore
def setUp(self):
super(BaseSystemTest, self).setUp()
# Clear out any pre-existing tables
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
def tearDown(self):
super(BaseSystemTest, self).tearDown()
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
self.dynamo.clear_hooks()
class TestMisc(BaseSystemTest):
"""Tests that don't fit anywhere else"""
def tearDown(self):
super(TestMisc, self).tearDown()
self.dynamo.default_return_capacity = False
def test_connection_host(self):
"""Connection can access host of endpoint"""
urlparse(self.dynamo.host)
def test_connection_region(self):
"""Connection can access name of connected region"""
self.assertTrue(isinstance(self.dynamo.region, str))
def test_connect_to_region(self):
"""Can connect to a dynamo region"""
conn = DynamoDBConnection.connect("us-west-1")
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds(self):
"""Can connect to a dynamo region with credentials"""
conn = DynamoDBConnection.connect(
"us-west-1", access_key="abc", secret_key="12345"
)
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session(self):
"""Can connect to a dynamo host without passing in a session"""
conn = DynamoDBConnection.connect("us-west-1", host="localhost")
self.assertIsNotNone(conn.host)
@patch("dynamo3.connection.time")
def test_retry_on_throughput_error(self, time):
"""Throughput exceptions trigger a retry of the request"""
def call(*_, **__):
"""Dummy service call"""
response = {
"ResponseMetadata": {
"HTTPStatusCode": 400,
},
"Error": {
"Code": "ProvisionedThroughputExceededException",
"Message": "Does not matter",
},
}
raise ClientError(response, "list_tables")
with patch.object(self.dynamo, "client") as client:
client.list_tables.side_effect = call
with self.assertRaises(ThroughputException):
self.dynamo.call("list_tables")
self.assertEqual(len(time.sleep.mock_calls), self.dynamo.request_retries - 1)
self.assertTrue(time.sleep.called)
def test_describe_missing(self):
"""Describing a missing table returns None"""
ret = self.dynamo.describe_table("foobar")
self.assertIsNone(ret)
def test_magic_table_props(self):
"""Table can look up properties on response object"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
ret = self.dynamo.describe_table("foobar")
assert ret is not None
self.assertEqual(ret.item_count, ret["ItemCount"])
with self.assertRaises(KeyError):
self.assertIsNotNone(ret["Missing"])
def test_magic_index_props(self):
"""Index can look up properties on response object"""
index = GlobalIndex.all("idx-name", DynamoKey("id"))
index.response = {"FooBar": 2}
self.assertEqual(index["FooBar"], 2)
with self.assertRaises(KeyError):
self.assertIsNotNone(index["Missing"])
def test_describe_during_delete(self):
"""Describing a table during a delete operation should not crash"""
response = {
"ItemCount": 0,
"ProvisionedThroughput": {
"NumberOfDecreasesToday": 0,
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
"TableName": "myTableName",
"TableSizeBytes": 0,
"TableStatus": "DELETING",
}
table = Table.from_response(response)
self.assertEqual(table.status, "DELETING")
def test_delete_missing(self):
"""Deleting a missing table returns False"""
ret = self.dynamo.delete_table("foobar")
self.assertTrue(not ret)
def test_re_raise_passthrough(self):
"""DynamoDBError can re-raise itself if missing original exception"""
err = DynamoDBError(400, Code="ErrCode", Message="Ouch", args={})
caught = False
try:
err.re_raise()
except DynamoDBError as e:
caught = True
self.assertEqual(err, e)
self.assertTrue(caught)
def test_re_raise(self):
"""DynamoDBError can re-raise itself with stacktrace of original exc"""
caught = False
try:
try:
raise Exception("Hello")
except Exception as e1:
err = DynamoDBError(
400,
Code="ErrCode",
Message="Ouch",
args={},
exc_info=sys.exc_info(),
)
err.re_raise()
except DynamoDBError as e:
caught = True
import traceback
tb = traceback.format_tb(e.__traceback__)
self.assertIn("Hello", tb[-1])
self.assertEqual(e.status_code, 400)
self.assertTrue(caught)
def test_default_return_capacity(self):
"""When default_return_capacity=True, always return capacity"""
self.dynamo.default_return_capacity = True
with patch.object(self.dynamo, "call") as call:
call().get.return_value = None
rs = self.dynamo.scan("foobar")
list(rs)
call.assert_called_with(
"scan",
TableName="foobar",
ReturnConsumedCapacity="INDEXES",
ConsistentRead=False,
)
def test_list_tables_page(self):
"""Call to ListTables should page results"""
hash_key = DynamoKey("id")
for i in range(120):
self.dynamo.create_table("table%d" % i, hash_key=hash_key)
tables = list(self.dynamo.list_tables(110))
self.assertEqual(len(tables), 110)
def test_limit_complete(self):
"""A limit with item_capacity = 0 is 'complete'"""
limit = Limit(item_limit=0)
self.assertTrue(limit.complete)
def test_wait_create_table(self):
"""Create table shall wait for the table to come online."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
self.assertIsNotNone(self.dynamo.describe_table(tablename))
def test_wait_delete_table(self):
"""Delete table shall wait for the table to go offline."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
result = self.dynamo.delete_table(tablename, wait=True)
self.assertTrue(result)
class TestDataTypes(BaseSystemTest):
"""Tests for Dynamo data types"""
def make_table(self):
"""Convenience method for making a table"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
def test_string(self):
"""Store and retrieve a string"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc"})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["id"], "abc")
self.assertTrue(isinstance(item["id"], str))
def test_int(self):
"""Store and retrieve an int"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": 1})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["num"], 1)
def test_float(self):
"""Store and retrieve a float"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": 1.1})
item = list(self.dynamo.scan("foobar"))[0]
self.assertAlmostEqual(float(item["num"]), 1.1)
def test_decimal(self):
"""Store and retrieve a Decimal"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": Decimal("1.1")})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["num"], Decimal("1.1"))
def test_binary(self):
"""Store and retrieve a binary"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "data": Binary("abc")})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["data"].value, b"abc")
def test_binary_bytes(self):
"""Store and retrieve bytes as a binary"""
self.make_table()
data = {"a": 1, "b": 2}
self.dynamo.put_item("foobar", {"id": "a", "data": Binary(dumps(data))})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(loads(item["data"].value), data)
def test_string_set(self):
"""Store and retrieve a string set"""
self.make_table()
item = {
"id": "a",
"datas": set(["a", "b"]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_number_set(self):
"""Store and retrieve a number set"""
self.make_table()
item = {
"id": "a",
"datas": set([1, 2, 3]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_binary_set(self):
"""Store and retrieve a binary set"""
self.make_table()
item = {
"id": "a",
"datas": set([Binary("a"), Binary("b")]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_binary_equal(self):
"""Binary should eq other Binaries and also raw bytestrings"""
self.assertEqual(Binary("a"), Binary("a"))
self.assertEqual(Binary("a"), b"a")
self.assertFalse(Binary("a") != Binary("a"))
def test_binary_repr(self):
"""Binary repr should wrap the contained value"""
self.assertEqual(repr(Binary("a")), "Binary(%r)" % b"a")
def test_binary_converts_unicode(self):
"""Binary will convert unicode to bytes"""
b = Binary("a")
self.assertTrue(isinstance(b.value, bytes))
def test_binary_force_string(self):
"""Binary must wrap a string type"""
with self.assertRaises(TypeError):
Binary(2) # type: ignore
def test_bool(self):
"""Store and retrieve a boolean"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc", "b": True})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["b"], True)
self.assertTrue(isinstance(item["b"], bool))
def test_list(self):
"""Store and retrieve a list"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc", "l": ["a", 1, False]})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["l"], ["a", 1, False])
def test_dict(self):
"""Store and retrieve a dict"""
self.make_table()
data = {
"i": 1,
"s": "abc",
"n": None,
"l": ["a", 1, True],
"b": False,
}
self.dynamo.put_item("foobar", {"id": "abc", "d": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["d"], data)
def test_nested_dict(self):
"""Store and retrieve a nested dict"""
self.make_table()
data = {
"s": "abc",
"d": {
"i": 42,
},
}
self.dynamo.put_item("foobar", {"id": "abc", "d": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["d"], data)
def test_nested_list(self):
"""Store and retrieve a nested list"""
self.make_table()
data = [
1,
[
True,
None,
"abc",
],
]
self.dynamo.put_item("foobar", {"id": "abc", "l": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["l"], data)
def test_unrecognized_type(self):
"""Dynamizer throws error on unrecognized type"""
value = {
"ASDF": "abc",
}
with self.assertRaises(TypeError):
self.dynamo.dynamizer.decode(value)
class TestDynamizer(unittest.TestCase):
"""Tests for the Dynamizer"""
def test_register_encoder(self):
"""Can register a custom encoder"""
from datetime import datetime
dynamizer = Dynamizer()
dynamizer.register_encoder(datetime, lambda d, v: (STRING, v.isoformat()))
now = datetime.utcnow()
self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat()))
def test_encoder_missing(self):
"""If no encoder is found, raise ValueError"""
from datetime import datetime
dynamizer = Dynamizer()
with self.assertRaises(ValueError):
dynamizer.encode(datetime.utcnow())
class TestResultModels(unittest.TestCase):
"""Tests for the model classes in results.py"""
def test_add_dicts_base_case(self):
"""add_dict where one argument is None returns the other"""
f = object()
self.assertEqual(add_dicts(f, None), f)
self.assertEqual(add_dicts(None, f), f)
def test_add_dicts(self):
"""Merge two dicts of values together"""
a = {
"a": 1,
"b": 2,
}
b = {
"a": 3,
"c": 4,
}
ret = add_dicts(a, b)
self.assertEqual(
ret,
{
"a": 4,
"b": 2,
"c": 4,
},
)
def test_count_repr(self):
"""Count repr"""
count = Count(0, 0)
self.assertEqual(repr(count), "Count(0)")
def test_count_addition(self):
"""Count addition"""
count = Count(4, 2)
self.assertEqual(count + 5, 9)
def test_count_subtraction(self):
"""Count subtraction"""
count = Count(4, 2)
self.assertEqual(count - 2, 2)
def test_count_multiplication(self):
"""Count multiplication"""
count = Count(4, 2)
self.assertEqual(2 * count, 8)
def test_count_division(self):
"""Count division"""
count = Count(4, 2)
self.assertEqual(count / 2, 2)
def test_count_add_none_capacity(self):
"""Count addition with one None consumed_capacity"""
cap = Capacity(3, 0)
count = Count(4, 2)
count2 = Count(5, 3, cap)
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity, cap)
def test_count_add_capacity(self):
"""Count addition with consumed_capacity"""
count = Count(4, 2, Capacity(3, 0))
count2 = Count(5, 3, Capacity(2, 0))
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity.read, 5)
def test_capacity_math(self):
"""Capacity addition and equality"""
cap = Capacity(2, 4)
s = set([cap])
self.assertIn(Capacity(2, 4), s)
self.assertNotEqual(Capacity(1, 4), cap)
self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3))
def test_capacity_format(self):
"""String formatting for Capacity"""
c = Capacity(1, 3)
self.assertEqual(str(c), "R:1.0 W:3.0")
c = Capacity(0, 0)
self.assertEqual(str(c), "0")
def test_total_consumed_capacity(self):
"""ConsumedCapacity can parse results with only Total"""
response = {
"TableName": "foobar",
"ReadCapacityUnits": 4,
"WriteCapacityUnits": 5,
}
cap = ConsumedCapacity.from_response(response)
self.assertEqual(cap.total, (4, 5))
self.assertIsNone(cap.table_capacity)
def test_consumed_capacity_equality(self):
"""ConsumedCapacity addition and equality"""
cap = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
},
{
"g-index": Capacity(0, 3),
},
)
c2 = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
"l-index2": Capacity(0, 7),
},
)
self.assertNotEqual(cap, c2)
c3 = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
},
{
"g-index": Capacity(0, 3),
},
)
self.assertIn(cap, set([c3]))
combined = cap + c2
self.assertEqual(
cap + c2,
ConsumedCapacity(
"foobar",
Capacity(0, 20),
Capacity(0, 4),
{
"l-index": Capacity(0, 8),
"l-index2": Capacity(0, 7),
},
{
"g-index": Capacity(0, 3),
},
),
)
self.assertIn(str(Capacity(0, 3)), str(combined))
def test_add_different_tables(self):
"""Cannot add ConsumedCapacity of two different tables"""
c1 = ConsumedCapacity("foobar", Capacity(1, 28))
c2 = ConsumedCapacity("boofar", Capacity(3, 0))
with self.assertRaises(TypeError):
c1 += c2
def test_always_continue_query(self):
"""Regression test.
If result has no items but does have LastEvaluatedKey, keep querying.
"""
conn = MagicMock()
conn.dynamizer.decode_keys.side_effect = lambda x: x
items = ["a", "b"]
results = [
{"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
{"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
{"Items": items},
]
conn.call.side_effect = lambda *_, **__: results.pop(0)
rs = ResultSet(conn, Limit())
results = list(rs)
self.assertEqual(results, items)
class TestHooks(BaseSystemTest):
"""Tests for connection callback hooks"""
def tearDown(self):
super(TestHooks, self).tearDown()
for hooks in self.dynamo._hooks.values():
while hooks:
hooks.pop()
def test_precall(self):
"""precall hooks are called before an API call"""
hook = MagicMock()
self.dynamo.subscribe("precall", hook)
def throw(**_):
"""Throw an exception to terminate the request"""
raise Exception()
with patch.object(self.dynamo, "client") as client:
client.describe_table.side_effect = throw
with self.assertRaises(Exception):
self.dynamo.describe_table("foobar")
hook.assert_called_with(self.dynamo, "describe_table", {"TableName": "foobar"})
def test_postcall(self):
"""postcall hooks are called after API call"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
calls = []
def hook(*args):
"""Log the call into a list"""
calls.append(args)
self.dynamo.subscribe("postcall", hook)
self.dynamo.describe_table("foobar")
self.assertEqual(len(calls), 1)
args = calls[0]
self.assertEqual(len(args), 4)
conn, command, kwargs, response = args
self.assertEqual(conn, self.dynamo)
self.assertEqual(command, "describe_table")
self.assertEqual(kwargs["TableName"], "foobar")
self.assertEqual(response["Table"]["TableName"], "foobar")
def test_capacity(self):
"""capacity hooks are called whenever response has ConsumedCapacity"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
hook = MagicMock()
self.dynamo.subscribe("capacity", hook)
with patch.object(self.dynamo, "client") as client:
client.scan.return_value = {
"Items": [],
"ConsumedCapacity": {
"TableName": "foobar",
"ReadCapacityUnits": 4,
},
}
rs = self.dynamo.scan("foobar")
list(rs)
cap = ConsumedCapacity("foobar", Capacity(4, 0))
hook.assert_called_with(self.dynamo, "scan", ANY, ANY, cap)
def test_subscribe(self):
"""Can subscribe and unsubscribe from hooks"""
hook = lambda: None
self.dynamo.subscribe("precall", hook)
self.assertEqual(len(self.dynamo._hooks["precall"]), 1)
self.dynamo.unsubscribe("precall", hook)
self.assertEqual(len(self.dynamo._hooks["precall"]), 0)
| stevearc/dynamo3 | tests/__init__.py | Python | mit | 22,100 |
Alchemy sentiment analysis: fb12d2c55fff36e1e268584e261b6b010b37279f
Africa Is Talking: 676dbd926bbb04fa69ce90ee81d3f5ffee2692aaf80eb5793bd70fe93e77dc2e
| crakama/bc_7_twitment | keys.py | Python | mit | 156 |
#!/usr/bin/env python3
"""
Categorize and analyze user sessions.
Read in ecfs_obfuscated_filtered.gz file, output some fancy results.
"""
from collections import defaultdict
from collections import Counter
import sys
import time
import os
import resource
import json
import fnmatch
from pipes import Pipes
import operator
from operation import Operation
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
PB = TB * 1024
MONITOR_LINES = 100000
class UserSession():
def __init__(self, user_id):
self.user_id = user_id
self.from_ts = 0
self.till_ts = 0
self.get_requests = 0
self.reget_requests = 0
self.put_requests = 0
self.get_bytes = 0
self.put_bytes = 0
self.rename_requests = 0
self.del_requests = 0
self.get_dirs = 0
self.put_dirs = 0
self.put_files_per_dir = 0.0
self.get_files_per_dir = 0.0
self.window_seconds = 0
self.file_cnt_gets = Counter()
self.file_cnt_puts = Counter()
self.dir_cnt_gets = Counter()
self.dir_cnt_puts = Counter()
self.num_ops = 0
self.last_ts = 0
def add_op(self, op):
self.num_ops += 1
if op.ts < self.last_ts:
raise Exception("Timestamp too old")
else:
self.last_ts = op.ts
if op.optype == 'g':
self.get_requests += 1
self.get_bytes += op.size
self.file_cnt_gets[op.obj_id] += 1
self.dir_cnt_gets[op.parent_dir_id] += 1
elif op.optype == 'p':
self.put_requests += 1
self.put_bytes += op.size
self.file_cnt_puts[op.obj_id] += 1
self.dir_cnt_puts[op.parent_dir_id] += 1
elif op.optype == 'd':
self.del_requests += 1
elif op.optype == 'r':
self.rename_requests += 1
#update last time stamp in the session
self.till_ts = op.ts + op.execution_time
def finish(self):
self.get_dirs = len(self.dir_cnt_gets)
if self.get_dirs > 0:
self.get_files_per_dir = float(self.get_requests) / self.get_dirs
self.put_dirs = len(self.dir_cnt_puts)
if self.put_dirs > 0:
self.put_files_per_dir = float(self.put_requests) / self.put_dirs
"""
set reget_counter
:param counter: contains [ 1, 1, 5] counts of objects. value > 1 is a re-retrieval.
:return:
"""
for c in self.file_cnt_gets.values():
if c > 1:
self.reget_requests += (c - 1)
# self.announce()
return ";".join([str(x) for x in [
self.user_id,
self.from_ts,
self.till_ts,
self.till_ts - self.from_ts,
self.get_requests,
self.reget_requests,
self.put_requests,
self.get_bytes,
self.put_bytes,
self.rename_requests,
self.del_requests,
self.get_dirs,
self.put_dirs,
self.put_files_per_dir,
self.get_files_per_dir,
self.window_seconds
]]
)
def announce(self):
print("closed session. gets: %r, regets: %r, puts: %r, dels: %r, renames: %r get_dirs: %r, put_dirs: %r, get_bytes: %r put_bytes: %r window_seconds: %d" % \
(self.get_requests, self.reget_requests, self.put_requests, self.del_requests, self.rename_requests, self.get_dirs, self.put_dirs, self.get_bytes, self.put_bytes, self.window_seconds))
def find_clusters(atimes):
foo = Counter()
bar = dict()
for i in xrange(120, 3660, 10):
clusters = get_clusters(atimes, i)
cs = len(clusters)
foo[cs] += 1
# note first occurance of this cluster size.
if cs not in bar:
bar[cs] = i
# print(len(atimes), i, cs)
return bar[foo.most_common()[0][0]]
def get_clusters(data, maxgap):
'''Arrange data into groups where successive elements
differ by no more than *maxgap*
>>> cluster([1, 6, 9, 100, 102, 105, 109, 134, 139], maxgap=10)
[[1, 6, 9], [100, 102, 105, 109], [134, 139]]
>>> cluster([1, 6, 9, 99, 100, 102, 105, 134, 139, 141], maxgap=10)
[[1, 6, 9], [99, 100, 102, 105], [134, 139, 141]]
'''
data.sort()
groups = [[data[0]]]
for x in data[1:]:
if abs(x - groups[-1][-1]) <= maxgap:
groups[-1].append(x)
else:
groups.append([x])
return groups
def analyze_user_session(user_session_file, out_pipeline, target_file_name):
with open(user_session_file, 'r') as sf:
ops = list()
atimes = list()
for line in sf:
op = Operation()
op.init(line.strip())
ops.append(op)
atimes.append(op.ts)
ops.sort(key=operator.attrgetter('ts'))
atimes.sort()
window_seconds = find_clusters(atimes)
session_counter = 1
uf = os.path.basename(user_session_file)
user_id = uf[:uf.find(".user_session.csv")]
session = UserSession(user_id)
session.window_seconds = window_seconds
for op in ops:
if session.from_ts == 0:
session.from_ts = op.ts
session.till_ts = op.ts + op.execution_time
if (session.till_ts + window_seconds) < op.ts:
# this session is over, so archive it.
out_pipeline.write_to(target_file_name, session.finish())
del session
session = UserSession(user_id)
session.window_seconds = window_seconds
session_counter += 1
session.add_op(op)
if session.num_ops > 0:
out_pipeline.write_to(target_file_name, session.finish())
print("sessions: %d with window_seconds: %d" %(session_counter, window_seconds))
if __name__ == "__main__":
source_dir = os.path.abspath(sys.argv[1])
result = os.path.abspath(sys.argv[2])
results_dir = os.path.dirname(result)
target_file_name = os.path.basename(result)
users_session_files = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(source_dir)
for f in fnmatch.filter(files, '*.user_session.csv')]
#remove the old log file, as outpipe is append only.
if os.path.exists(os.path.join(results_dir, target_file_name)):
os.remove(os.path.join(results_dir, target_file_name))
out_pipe = Pipes(results_dir)
csv_header = ";".join(["user_id",
"from_ts",
"till_ts",
"session_lifetime",
"get_requests",
"reget_requests",
"put_requests",
"get_bytes",
"put_bytes",
"rename_requests",
"del_requests",
"get_dirs",
"put_dirs",
"put_files_per_dir",
"get_files_per_dir",
"window_seconds"
])
out_pipe.write_to(target_file_name, csv_header)
cnt = 0
for sf in users_session_files:
cnt += 1
print ("working on %d/%d" % (cnt, len(users_session_files)))
analyze_user_session(sf, out_pipe, target_file_name)
# if cnt >=20:
# break
out_pipe.close()
print("wrote results to %s: " % (os.path.join(results_dir, target_file_name)))
| zdvresearch/fast15-paper-extras | ecfs_user_sessions/src/analyze_user_sessions.py | Python | mit | 7,526 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Stephan Reith
@date 31.08.2016
This is a simple example to demonstrate how the ROS Spinnaker Interface can be used.
You will also need a ROS Listener and a ROS Talker to send and receive data.
Make sure they communicate over the same ROS topics and std_msgs.Int64 ROS Messages used in here.
"""
import spynnaker.pyNN as pynn
from ros_spinnaker_interface import ROS_Spinnaker_Interface
# import transfer_functions as tf
from ros_spinnaker_interface import SpikeSourcePoisson
from ros_spinnaker_interface import SpikeSinkSmoothing
ts = 0.1
n_neurons = 1
simulation_time = 10000 # ms
pynn.setup(timestep=ts, min_delay=ts, max_delay=2.0*ts)
pop = pynn.Population(size=n_neurons, cellclass=pynn.IF_curr_exp, cellparams={}, label='pop')
# The ROS_Spinnaker_Interface just needs to be initialised. The following parameters are possible:
ros_interface = ROS_Spinnaker_Interface(
n_neurons_source=n_neurons, # number of neurons of the injector population
Spike_Source_Class=SpikeSourcePoisson, # the transfer function ROS Input -> Spikes you want to use.
Spike_Sink_Class=SpikeSinkSmoothing, # the transfer function Spikes -> ROS Output you want to use.
# You can choose from the transfer_functions module
# or write one yourself.
output_population=pop, # the pynn population you wish to receive the
# live spikes from.
ros_topic_send='to_spinnaker', # the ROS topic used for the incoming ROS values.
ros_topic_recv='from_spinnaker', # the ROS topic used for the outgoing ROS values.
clk_rate=1000, # mainloop clock (update) rate in Hz.
ros_output_rate=10) # number of ROS messages send out per second.
# Build your network, run the simulation and optionally record the spikes and voltages.
pynn.Projection(ros_interface, pop, pynn.OneToOneConnector(weights=5, delays=1))
pop.record()
pop.record_v()
pynn.run(simulation_time)
spikes = pop.getSpikes()
pynn.end()
# Plot
import pylab
spike_times = [spike[1] for spike in spikes]
spike_ids = [spike[0] for spike in spikes]
pylab.plot(spike_times, spike_ids, ".")
pylab.xlabel('Time (ms)')
pylab.ylabel('Neuron ID')
pylab.title('Spike Plot')
pylab.xlim(xmin=0)
pylab.show()
| reiths/ros_spinnaker_interface | examples/example_ros_spinnaker_interface.py | Python | mit | 2,533 |
# -*- coding: utf-8 -*-
""" Resource Import Tools
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# @todo: remove all interactive error reporting out of the _private methods, and raise exceptions instead.
__all__ = ["S3Importer", "S3ImportJob", "S3ImportItem"]
import os
import sys
import cPickle
import tempfile
from datetime import datetime
from copy import deepcopy
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage, Messages
from gluon.tools import callback
from s3utils import SQLTABLES3
from s3crud import S3CRUD
from s3xml import S3XML
from s3utils import s3_mark_required, s3_has_foreign_key, s3_get_foreign_key
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3IMPORTER: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3Importer(S3CRUD):
"""
Transformable formats (XML, JSON, CSV) import handler
"""
UPLOAD_TABLE_NAME = "s3_import_upload"
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply CRUD methods
@param r: the S3Request
@param attr: dictionary of parameters for the method handler
@returns: output object to send to the view
Known means of communicating with this module:
It expects a URL of the form: /prefix/name/import
It will interpret the http requests as follows:
GET will trigger the upload
POST will trigger either commits or display the import details
DELETE will trigger deletes
It will accept one of the following control vars:
item: to specify a single item in the import job
job: to specify a job
It should not receive both so job takes precedent over item
For CSV imports, the calling controller can add extra fields
to the upload form to add columns to each row in the CSV. To add
the extra fields, pass a named parameter "csv_extra_fields" to the
s3_rest_controller call (or the S3Request call, respectively):
s3_rest_controller(module, resourcename,
csv_extra_fields=[
dict(label="ColumnLabelInTheCSV",
field=field_instance)
])
The Field instance "field" will be added to the upload form, and
the user input will be added to each row of the CSV under the
label as specified. If the "field" validator has options, the
input value will be translated into the option representation,
otherwise the value will be used as-is.
Note that the "label" in the dict is the column label in the CSV,
whereas the field label for the form is to be set in the Field
instance passed as "field".
You can add any arbitrary number of csv_extra_fields to the list.
Additionally, you may want to allow the user to choose whether
the import shall first remove all existing data in the target
table. To do so, pass a label for the "replace_option" to the
request:
s3_rest_controller(module, resourcename,
replace_option=T("Remove existing data before import"))
This will add the respective checkbox to the upload form.
You may also want to provide a link to download a CSV template from
the upload form. To do that, add the resource name to the request
attributes:
s3_rest_controller(module, resourcename,
csv_template="<resourcename>")
This will provide a link to:
- static/formats/s3csv/<controller>/<resourcename>.csv
at the top of the upload form.
"""
_debug("S3Importer.apply_method(%s)" % r)
# Messages
T = current.T
messages = self.messages = Messages(T)
messages.download_template = "Download Template"
messages.invalid_file_format = "Invalid File Format"
messages.unsupported_file_type = "Unsupported file type of %s"
messages.stylesheet_not_found = "No Stylesheet %s could be found to manage the import file."
messages.no_file = "No file submitted"
messages.file_open_error = "Unable to open the file %s"
messages.file_not_found = "The file to upload is missing"
messages.no_records_to_import = "No records to import"
messages.no_job_to_delete = "No job to delete, maybe it has already been deleted."
messages.title_job_read = "Details of the selected import job"
messages.title_job_list = "List of import items"
messages.file_uploaded = "Import file uploaded"
messages.upload_submit_btn = "Upload Data File"
messages.open_btn = "Open"
messages.view_btn = "View"
messages.delete_btn = "Delete"
messages.item_show_details = "Display Details"
messages.job_total_records = "Total records in the Import Job"
messages.job_records_selected = "Records selected"
messages.job_deleted = "Import job deleted"
messages.job_completed = "Job run on %s. With result of (%s)"
messages.import_file = "Import File"
messages.import_file_comment = "Upload a file formatted according to the Template."
messages.user_name = "User Name"
messages.commit_total_records_imported = "%s records imported"
messages.commit_total_records_ignored = "%s records ignored"
messages.commit_total_errors = "%s records in error"
try:
self.uploadTitle = current.response.s3.crud_strings[self.tablename].title_upload
except:
self.uploadTitle = T("Upload a %s import file" % r.function)
# @todo: correct to switch this off for the whole session?
current.session.s3.ocr_enabled = False
# Reset all errors/warnings
self.error = None
self.warning = None
# CSV upload configuration
if "csv_stylesheet" in attr:
self.csv_stylesheet = attr["csv_stylesheet"]
else:
self.csv_stylesheet = None
self.csv_extra_fields = None
self.csv_extra_data = None
# Environment
self.controller = r.controller
self.function = r.function
# Target table for the data import
self.controller_resource = self.resource
self.controller_table = self.table
self.controller_tablename = self.tablename
# Table for uploads
self.__define_table()
self.upload_resource = None
self.item_resource = None
# XSLT Path
self.xslt_path = os.path.join(r.folder, r.XSLT_PATH)
self.xslt_extension = r.XSLT_EXTENSION
# Check authorization
authorised = self.permit("create", self.upload_tablename) and \
self.permit("create", self.controller_tablename)
if not authorised:
if r.method is not None:
r.unauthorised()
else:
return dict(form=None)
# @todo: clean this up
source = None
transform = None
upload_id = None
items = None
# @todo get the data from either get_vars or post_vars appropriately
# for post -> commit_items would need to add the uploadID
if "transform" in r.get_vars:
transform = r.get_vars["transform"]
if "filename" in r.get_vars:
source = r.get_vars["filename"]
if "job" in r.post_vars:
upload_id = r.post_vars["job"]
elif "job" in r.get_vars:
upload_id = r.get_vars["job"]
items = self._process_item_list(upload_id, r.vars)
if "delete" in r.get_vars:
r.http = "DELETE"
# If we have an upload ID, then get upload and import job
self.upload_id = upload_id
query = (self.upload_table.id == upload_id)
self.upload_job = current.db(query).select(limitby=(0, 1)).first()
if self.upload_job:
self.job_id = self.upload_job.job_id
else:
self.job_id = None
# Now branch off to the appropriate controller function
if r.http == "GET":
if source != None:
self.commit(source, transform)
output = self.upload(r, **attr)
if upload_id != None:
output = self.display_job(upload_id)
else:
output = self.upload(r, **attr)
elif r.http == "POST":
if items != None:
output = self.commit_items(upload_id, items)
else:
output = self.generate_job(r, **attr)
elif r.http == "DELETE":
if upload_id != None:
output = self.delete_job(upload_id)
else:
r.error(405, current.manager.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def upload(self, r, **attr):
"""
This will display the upload form
It will ask for a file to be uploaded or for a job to be selected.
If a file is uploaded then it will guess at the file type and
ask for the transform file to be used. The transform files will
be in a dataTable with the module specific files shown first and
after those all other known transform files. Once the transform
file is selected the import process can be started which will
generate an importJob, and a "POST" method will occur
If a job is selected it will have two actions, open and delete.
Open will mean that a "GET" method will occur, with the job details
passed in.
Whilst the delete action will trigger a "DELETE" method.
"""
_debug("S3Importer.upload()")
request = self.request
form = self._upload_form(r, **attr)
output = self._create_upload_dataTable()
if request.representation == "aadata":
return output
output.update(form=form, title=self.uploadTitle)
return output
# -------------------------------------------------------------------------
def generate_job(self, r, **attr):
"""
Generate an ImportJob from the submitted upload form
"""
_debug("S3Importer.display()")
response = current.response
s3 = response.s3
db = current.db
table = self.upload_table
title=self.uploadTitle
form = self._upload_form(r, **attr)
r = self.request
r.read_body()
sfilename = form.vars.file
try:
ofilename = r.post_vars["file"].filename
except:
form.errors.file = self.messages.no_file
if form.errors:
response.flash = ""
output = self._create_upload_dataTable()
output.update(form=form, title=title)
elif not sfilename or \
ofilename not in r.files or r.files[ofilename] is None:
response.flash = ""
response.error = self.messages.file_not_found
output = self._create_upload_dataTable()
output.update(form=form, title=title)
else:
output = dict()
query = (table.file == sfilename)
db(query).update(controller=self.controller,
function=self.function,
filename=ofilename,
user_id=current.session.auth.user.id)
# must commit here to separate this transaction from
# the trial import phase which will be rolled back.
db.commit()
extension = ofilename.rsplit(".", 1).pop()
if extension not in ("csv", "xls"):
response.flash = None
response.error = self.messages.invalid_file_format
return self.upload(r, **attr)
upload_file = r.files[ofilename]
if extension == "xls":
if "xls_parser" in s3:
upload_file.seek(0)
upload_file = s3.xls_parser(upload_file.read())
extension = "csv"
if upload_file is None:
response.flash = None
response.error = self.messages.file_not_found
return self.upload(r, **attr)
else:
upload_file.seek(0)
row = db(query).select(table.id, limitby=(0, 1)).first()
upload_id = row.id
if "single_pass" in r.vars:
single_pass = r.vars["single_pass"]
else:
single_pass = None
self._generate_import_job(upload_id,
upload_file,
extension,
commit_job = single_pass)
if upload_id is None:
row = db(query).update(status = 2) # in error
if self.error != None:
response.error = self.error
if self.warning != None:
response.warning = self.warning
response.flash = ""
return self.upload(r, **attr)
else:
if single_pass:
current.session.flash = self.messages.file_uploaded
# For a single pass retain the vars from the original URL
next_URL = URL(r=self.request,
f=self.function,
args=["import"],
vars=current.request.get_vars
)
redirect(next_URL)
s3.dataTable_vars = {"job" : upload_id}
return self.display_job(upload_id)
return output
# -------------------------------------------------------------------------
def display_job(self, upload_id):
"""
@todo: docstring?
"""
_debug("S3Importer.display_job()")
request = self.request
response = current.response
db = current.db
table = self.upload_table
job_id = self.job_id
output = dict()
if job_id == None:
# redirect to the start page (removes all vars)
query = (table.id == upload_id)
row = db(query).update(status = 2) # in error
current.session.warning = self.messages.no_records_to_import
redirect(URL(r=request, f=self.function, args=["import"]))
# Get the status of the upload job
query = (table.id == upload_id)
row = db(query).select(table.status,
table.modified_on,
table.summary_added,
table.summary_error,
table.summary_ignored,
limitby=(0, 1)).first()
status = row.status
# completed display details
if status == 3: # Completed
# @todo currently this is an unnecessary server call,
# change for completed records to be a display details
# and thus avoid the round trip.
# but keep this code to protect against hand-crafted URLs
# (and the 'go back' syndrome on the browser)
result = (row.summary_added,
row.summary_error,
row.summary_ignored,
)
self._display_completed_job(result, row.modified_on)
redirect(URL(r=request, f=self.function, args=["import"]))
# otherwise display import items
response.view = self._view(request, "list.html")
output = self._create_import_item_dataTable(upload_id, job_id)
if request.representation == "aadata":
return output
if response.s3.error_report:
error_report = "Errors|" + "|".join(response.s3.error_report)
error_tip = A("All Errors",
_class="errortip",
_title=error_report)
else:
# @todo: restore the error tree from all items?
error_tip = ""
rowcount = len(self._get_all_items(upload_id))
rheader = DIV(TABLE(
TR(
TH("%s: " % self.messages.job_total_records),
TD(rowcount, _id="totalAvaliable"),
TH("%s: " % self.messages.job_records_selected),
TD(0, _id="totalSelected"),
TH(error_tip)
),
))
output["title"] = self.messages.title_job_read
output["rheader"] = rheader
output["subtitle"] = self.messages.title_job_list
return output
# -------------------------------------------------------------------------
def commit(self, source, transform):
"""
@todo: docstring?
"""
_debug("S3Importer.commit(%s, %s)" % (source, transform))
db = current.db
session = current.session
request = self.request
try:
openFile = open(source, "r")
except:
session.error = self.messages.file_open_error % source
redirect(URL(r=request, f=self.function))
# @todo: manage different file formats
# @todo: find file format from request.extension
fileFormat = "csv"
# insert data in the table and get the ID
try:
user = session.auth.user.id
except:
user = None
upload_id = self.upload_table.insert(controller=self.controller,
function=self.function,
filename = source,
user_id = user,
status = 1)
db.commit()
# create the import job
result = self._generate_import_job(upload_id,
openFile,
fileFormat,
stylesheet=transform
)
if result == None:
if self.error != None:
if session.error == None:
session.error = self.error
else:
session.error += self.error
if self.warning != None:
if session.warning == None:
session.warning = self.warning
else:
session.warning += self.warning
else:
items = self._get_all_items(upload_id, True)
# commit the import job
self._commit_import_job(upload_id, items)
result = self._update_upload_job(upload_id)
# get the results and display
msg = "%s : %s %s %s" % (source,
self.messages.commit_total_records_imported,
self.messages.commit_total_errors,
self.messages.commit_total_records_ignored)
msg = msg % result
if session.flash == None:
session.flash = msg
else:
session.flash += msg
# @todo: return the upload_id?
# -------------------------------------------------------------------------
def commit_items(self, upload_id, items):
"""
@todo: docstring?
"""
_debug("S3Importer.commit_items(%s, %s)" % (upload_id, items))
# Save the import items
self._commit_import_job(upload_id, items)
# Update the upload table
# change the status to completed
# record the summary details
# delete the upload file
result = self._update_upload_job(upload_id)
# redirect to the start page (removes all vars)
self._display_completed_job(result)
redirect(URL(r=self.request, f=self.function, args=["import"]))
# -------------------------------------------------------------------------
def delete_job(self, upload_id):
"""
Delete an uploaded file and the corresponding import job
@param upload_id: the upload ID
"""
_debug("S3Importer.delete_job(%s)" % (upload_id))
db = current.db
request = self.request
resource = request.resource # use self.resource?
response = current.response
# Get the import job ID
job_id = self.job_id
# Delete the import job (if any)
if job_id:
result = resource.import_xml(None,
id = None,
tree = None,
job_id = job_id,
delete_job = True)
# @todo: check result
# now delete the upload entry
query = (self.upload_table.id == upload_id)
count = db(query).delete()
# @todo: check that the record has been deleted
# Now commit the changes
db.commit()
result = count
# return to the main import screen
# @todo: check result properly
if result == False:
response.warning = self.messages.no_job_to_delete
else:
response.flash = self.messages.job_deleted
# redirect to the start page (remove all vars)
self.next = self.request.url(vars=dict())
return
# ========================================================================
# Utility methods
# ========================================================================
def _upload_form(self, r, **attr):
"""
Create and process the upload form, including csv_extra_fields
"""
EXTRA_FIELDS = "csv_extra_fields"
TEMPLATE = "csv_template"
REPLACE_OPTION = "replace_option"
session = current.session
response = current.response
s3 = response.s3
request = self.request
table = self.upload_table
formstyle = s3.crud.formstyle
response.view = self._view(request, "list_create.html")
if REPLACE_OPTION in attr:
replace_option = attr[REPLACE_OPTION]
if replace_option is not None:
table.replace_option.readable = True
table.replace_option.writable = True
table.replace_option.label = replace_option
fields = [f for f in table if f.readable or f.writable and not f.compute]
if EXTRA_FIELDS in attr:
extra_fields = attr[EXTRA_FIELDS]
if extra_fields is not None:
fields.extend([f["field"] for f in extra_fields if "field" in f])
self.csv_extra_fields = extra_fields
labels, required = s3_mark_required(fields)
if required:
s3.has_required = True
form = SQLFORM.factory(table_name=self.UPLOAD_TABLE_NAME,
labels=labels,
formstyle=formstyle,
upload = os.path.join(request.folder, "uploads", "imports"),
separator = "",
message=self.messages.file_uploaded,
*fields)
args = ["s3csv"]
template = attr.get(TEMPLATE, True)
if template is True:
args.extend([self.controller, "%s.csv" % self.function])
elif isinstance(template, basestring):
args.extend([self.controller, "%s.csv" % template])
elif isinstance(template, (tuple, list)):
args.extend(template[:-1])
args.append("%s.csv" % template[-1])
else:
template = None
if template is not None:
url = URL(r=request, c="static", f="formats", args=args)
try:
# only add the download link if the template can be opened
open("%s/../%s" % (r.folder, url))
form[0][0].insert(0, TR(TD(A(self.messages.download_template,
_href=url)),
_id="template__row"))
except:
pass
if form.accepts(r.post_vars, session,
formname="upload_form"):
upload_id = table.insert(**table._filter_fields(form.vars))
if self.csv_extra_fields:
self.csv_extra_data = Storage()
for f in self.csv_extra_fields:
label = f.get("label", None)
if not label:
continue
field = f.get("field", None)
value = f.get("value", None)
if field:
if field.name in form.vars:
data = form.vars[field.name]
else:
data = field.default
value = data
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
try:
options = requires.options()
except:
pass
else:
for k, v in options:
if k == str(data):
value = v
elif value is None:
continue
self.csv_extra_data[label] = value
s3.no_formats = True
return form
# -------------------------------------------------------------------------
def _create_upload_dataTable(self):
"""
List of previous Import jobs
"""
db = current.db
request = self.request
controller = self.controller
function = self.function
s3 = current.response.s3
table = self.upload_table
s3.filter = (table.controller == controller) & \
(table.function == function)
fields = ["id",
"filename",
"created_on",
"user_id",
"replace_option",
"status"]
self._use_upload_table()
# Hide the list of prior uploads for now
#output = self._dataTable(fields, sort_by = [[2,"desc"]])
output = dict()
self._use_controller_table()
if request.representation == "aadata":
return output
query = (table.status != 3) # Status of Pending or in-Error
rows = db(query).select(table.id)
restrictOpen = [str(row.id) for row in rows]
query = (table.status == 3) # Status of Completed
rows = db(query).select(table.id)
restrictView = [str(row.id) for row in rows]
s3.actions = [
dict(label=str(self.messages.open_btn),
_class="action-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]"}),
restrict = restrictOpen
),
dict(label=str(self.messages.view_btn),
_class="action-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]"}),
restrict = restrictView
),
dict(label=str(self.messages.delete_btn),
_class="delete-btn",
url=URL(r=request,
c=controller,
f=function,
args=["import"],
vars={"job":"[id]",
"delete":"True"
}
)
),
]
# Display an Error if no job is attached with this record
query = (table.status == 1) # Pending
rows = db(query).select(table.id)
s3.dataTableStyleAlert = [str(row.id) for row in rows]
query = (table.status == 2) # in error
rows = db(query).select(table.id)
s3.dataTableStyleWarning = [str(row.id) for row in rows]
return output
# -------------------------------------------------------------------------
def _create_import_item_dataTable(self, upload_id, job_id):
"""
@todo: docstring?
"""
s3 = current.response.s3
represent = {"element" : self._item_element_represent}
self._use_import_item_table(job_id)
# Add a filter to the dataTable query
s3.filter = (self.table.job_id == job_id) & \
(self.table.tablename == self.controller_tablename)
# Get a list of the records that have an error of None
query = (self.table.job_id == job_id) & \
(self.table.tablename == self.controller_tablename)
rows = current.db(query).select(self.table.id, self.table.error)
select_list = []
error_list = []
for row in rows:
if row.error:
error_list.append(str(row.id))
else:
select_list.append("%s" % row.id)
select_id = ",".join(select_list)
output = self._dataTable(["id", "element", "error"],
sort_by = [[1, "asc"]],
represent=represent)
self._use_controller_table()
if self.request.representation == "aadata":
return output
# Highlight rows in error in red
s3.dataTableStyleWarning = error_list
s3.dataTableSelectable = True
s3.dataTablePostMethod = True
table = output["items"]
job = INPUT(_type="hidden", _id="importUploadID", _name="job",
_value="%s" % upload_id)
mode = INPUT(_type="hidden", _id="importMode", _name="mode",
_value="Inclusive")
# only select the rows with no errors
selected = INPUT(_type="hidden", _id="importSelected",
_name="selected", _value="[%s]" % select_id)
form = FORM(table, job, mode, selected)
output["items"] = form
s3.dataTableSelectSubmitURL = "import?job=%s&" % upload_id
s3.actions = [
dict(label= str(self.messages.item_show_details),
_class="action-btn",
_jqclick="$('.importItem.'+id).toggle();",
),
]
return output
# -------------------------------------------------------------------------
def _generate_import_job(self,
upload_id,
openFile,
fileFormat,
stylesheet=None,
commit_job=False):
"""
This will take a s3_import_upload record and
generate the importJob
@param uploadFilename: The name of the uploaded file
@todo: complete parameter descriptions
"""
_debug("S3Importer._generate_import_job(%s, %s, %s, %s)" % (upload_id,
openFile,
fileFormat,
stylesheet
)
)
db = current.db
request = self.request
resource = request.resource
# ---------------------------------------------------------------------
# CSV
if fileFormat == "csv" or fileFormat == "comma-separated-values":
fmt = "csv"
src = openFile
# ---------------------------------------------------------------------
# XML
# @todo: implement
#elif fileFormat == "xml":
# ---------------------------------------------------------------------
# S3JSON
# @todo: implement
#elif fileFormat == "s3json":
# ---------------------------------------------------------------------
# PDF
# @todo: implement
#elif fileFormat == "pdf":
# ---------------------------------------------------------------------
# Unsupported Format
else:
msg = self.messages.unsupported_file_type % fileFormat
self.error = msg
_debug(msg)
return None
# Get the stylesheet
if stylesheet == None:
stylesheet = self._get_stylesheet()
if stylesheet == None:
return None
# before calling import tree ensure the db.table is the controller_table
self.table = self.controller_table
self.tablename = self.controller_tablename
# Pass stylesheet arguments
args = Storage()
mode = request.get_vars.get("xsltmode", None)
if mode is not None:
args.update(mode=mode)
# Generate the import job
resource.import_xml(src,
format=fmt,
extra_data=self.csv_extra_data,
stylesheet=stylesheet,
ignore_errors = True,
commit_job = commit_job,
**args)
job = resource.job
if job is None:
if resource.error:
# Error
self.error = resource.error
return None
else:
# Nothing to import
self.warning = self.messages.no_records_to_import
return None
else:
# Job created
job_id = job.job_id
errors = current.xml.collect_errors(job)
if errors:
current.response.s3.error_report = errors
query = (self.upload_table.id == upload_id)
result = db(query).update(job_id=job_id)
# @todo: add check that result == 1, if not we are in error
# Now commit the changes
db.commit()
self.job_id = job_id
return True
# -------------------------------------------------------------------------
def _get_stylesheet(self, file_format="csv"):
"""
Get the stylesheet for transformation of the import
@param file_format: the import source file format
"""
if file_format == "csv":
xslt_path = os.path.join(self.xslt_path, "s3csv")
else:
xslt_path = os.path.join(self.xslt_path, file_format, "import.xsl")
return xslt_path
# Use the "csv_stylesheet" parameter to override the CSV stylesheet subpath
# and filename, e.g.
# s3_rest_controller(module, resourcename,
# csv_stylesheet=("inv", "inv_item.xsl"))
if self.csv_stylesheet:
if isinstance(self.csv_stylesheet, (tuple, list)):
stylesheet = os.path.join(xslt_path,
*self.csv_stylesheet)
else:
stylesheet = os.path.join(xslt_path,
self.controller,
self.csv_stylesheet)
else:
xslt_filename = "%s.%s" % (self.function, self.xslt_extension)
stylesheet = os.path.join(xslt_path,
self.controller,
xslt_filename)
if os.path.exists(stylesheet) is False:
msg = self.messages.stylesheet_not_found % stylesheet
self.error = msg
_debug(msg)
return None
return stylesheet
# -------------------------------------------------------------------------
def _commit_import_job(self, upload_id, items):
"""
This will save all of the selected import items
@todo: parameter descriptions?
"""
_debug("S3Importer._commit_import_job(%s, %s)" % (upload_id, items))
db = current.db
resource = self.request.resource
# Load the items from the s3_import_item table
self.importDetails = dict()
table = self.upload_table
query = (table.id == upload_id)
row = db(query).select(table.job_id,
table.replace_option,
limitby=(0, 1)).first()
if row is None:
return False
else:
job_id = row.job_id
current.response.s3.import_replace = row.replace_option
itemTable = S3ImportJob.define_item_table()
if itemTable != None:
#****************************************************************
# EXPERIMENTAL
# This doesn't delete related items
# but import_tree will tidy it up later
#****************************************************************
# get all the items selected for import
rows = self._get_all_items(upload_id, as_string=True)
# loop through each row and delete the items not required
self._store_import_details(job_id, "preDelete")
for id in rows:
if str(id) not in items:
# @todo: replace with a helper method from the API
_debug("Deleting item.id = %s" % id)
query = (itemTable.id == id)
db(query).delete()
#****************************************************************
# EXPERIMENTAL
#****************************************************************
# set up the table we will import data into
self.table = self.controller_table
self.tablename = self.controller_tablename
self._store_import_details(job_id, "preImportTree")
# Now commit the remaining items
msg = resource.import_xml(None,
job_id = job_id,
ignore_errors = True)
return resource.error is None
# -------------------------------------------------------------------------
def _store_import_details(self, job_id, key):
"""
This will store the details from an importJob
@todo: parameter descriptions?
"""
_debug("S3Importer._store_import_details(%s, %s)" % (job_id, key))
itemTable = S3ImportJob.define_item_table()
query = (itemTable.job_id == job_id) & \
(itemTable.tablename == self.controller_tablename)
rows = current.db(query).select(itemTable.data, itemTable.error)
items = [dict(data=row.data, error=row.error) for row in rows]
self.importDetails[key] = items
# -------------------------------------------------------------------------
def _update_upload_job(self, upload_id):
"""
This will record the results from the import, and change the
status of the upload job
@todo: parameter descriptions?
@todo: report errors in referenced records, too
"""
_debug("S3Importer._update_upload_job(%s)" % (upload_id))
request = self.request
resource = request.resource
db = current.db
totalPreDelete = len(self.importDetails["preDelete"])
totalPreImport = len(self.importDetails["preImportTree"])
totalIgnored = totalPreDelete - totalPreImport
if resource.error_tree is None:
totalErrors = 0
else:
totalErrors = len(resource.error_tree.findall(
"resource[@name='%s']" % resource.tablename))
totalRecords = totalPreImport - totalErrors
if totalRecords < 0:
totalRecords = 0
query = (self.upload_table.id == upload_id)
result = db(query).update(summary_added=totalRecords,
summary_error=totalErrors,
summary_ignored = totalIgnored,
status = 3)
# Now commit the changes
db.commit()
return (totalRecords, totalErrors, totalIgnored)
# -------------------------------------------------------------------------
def _display_completed_job(self, totals, timestmp=None):
"""
Generate a summary flash message for a completed import job
@param totals: the job totals as tuple
(total imported, total errors, total ignored)
@param timestmp: the timestamp of the completion
"""
session = current.session
msg = "%s - %s - %s" % \
(self.messages.commit_total_records_imported,
self.messages.commit_total_errors,
self.messages.commit_total_records_ignored)
msg = msg % totals
if timestmp != None:
session.flash = self.messages.job_completed % \
(self.date_represent(timestmp), msg)
elif totals[1] is not 0:
session.error = msg
elif totals[2] is not 0:
session.warning = msg
else:
session.flash = msg
# -------------------------------------------------------------------------
def _dataTable(self,
list_fields = [],
sort_by = [[1, "asc"]],
represent={},
):
"""
Method to get the data for the dataTable
This can be either a raw html representation or
and ajax call update
Additional data will be cached to limit calls back to the server
@param list_fields: list of field names
@param sort_by: list of sort by columns
@param represent: a dict of field callback functions used
to change how the data will be displayed
@return: a dict()
In html representations this will be a table of the data
plus the sortby instructions
In ajax this will be a json response
In addition the following values will be made available:
totalRecords Number of records in the filtered data set
totalDisplayRecords Number of records to display
start Start point in the ordered data set
limit Number of records in the ordered set
NOTE: limit - totalDisplayRecords = total cached
"""
# ********************************************************************
# Common tasks
# ********************************************************************
db = current.db
session = current.session
request = self.request
response = current.response
resource = self.resource
s3 = response.s3
representation = request.representation
table = self.table
tablename = self.tablename
vars = request.get_vars
output = dict()
# Check permission to read this table
authorised = self.permit("read", tablename)
if not authorised:
request.unauthorised()
# List of fields to select from
# fields is a list of Field objects
# list_field is a string list of field names
if list_fields == []:
fields = resource.readable_fields()
else:
fields = [table[f] for f in list_fields if f in table.fields]
if not fields:
fields = []
# attach any represent callbacks
for f in fields:
if f.name in represent:
f.represent = represent[f.name]
# Make sure that we have the table id as the first column
if fields[0].name != table.fields[0]:
fields.insert(0, table[table.fields[0]])
list_fields = [f.name for f in fields]
# Filter
if s3.filter is not None:
self.resource.add_filter(s3.filter)
# ********************************************************************
# ajax call
# ********************************************************************
if representation == "aadata":
start = vars.get("iDisplayStart", None)
limit = vars.get("iDisplayLength", None)
if limit is not None:
try:
start = int(start)
limit = int(limit)
except ValueError:
start = None
limit = None # use default
else:
start = None # use default
# Using the sort variables sent from dataTables
if vars.iSortingCols:
orderby = self.ssp_orderby(resource, list_fields)
# Echo
sEcho = int(vars.sEcho or 0)
# Get the list
items = resource.sqltable(fields=list_fields,
start=start,
limit=limit,
orderby=orderby,
download_url=self.download_url,
as_page=True) or []
# Ugly hack to change any occurrence of [id] with the true id
# Needed because the represent doesn't know the id
for i in range(len(items)):
id = items[i][0]
for j in range(len(items[i])):
new = items[i][j].replace("[id]",id)
items[i][j] = new
totalrows = self.resource.count()
result = dict(sEcho = sEcho,
iTotalRecords = totalrows,
iTotalDisplayRecords = totalrows,
aaData = items)
output = jsons(result)
# ********************************************************************
# html 'initial' call
# ********************************************************************
else: # catch all
start = 0
limit = 1
# Sort by
vars["iSortingCols"] = len(sort_by)
# generate the dataTables.js variables for sorting
index = 0
for col in sort_by:
colName = "iSortCol_%s" % str(index)
colValue = col[0]
dirnName = "sSortDir_%s" % str(index)
if len(col) > 1:
dirnValue = col[1]
else:
dirnValue = "asc"
vars[colName] = colValue
vars[dirnName] = dirnValue
# Now using these sort variables generate the order by statement
orderby = self.ssp_orderby(resource, list_fields)
del vars["iSortingCols"]
for col in sort_by:
del vars["iSortCol_%s" % str(index)]
del vars["sSortDir_%s" % str(index)]
# Get the first row for a quick up load
items = resource.sqltable(fields=list_fields,
start=start,
limit=1,
orderby=orderby,
download_url=self.download_url)
totalrows = resource.count()
if items:
if totalrows:
if s3.dataTable_iDisplayLength:
limit = 2 * s3.dataTable_iDisplayLength
else:
limit = 50
# Add a test on the first call here:
# Now get the limit rows for ajax style update of table
sqltable = resource.sqltable(fields=list_fields,
start=start,
limit=limit,
orderby=orderby,
download_url=self.download_url,
as_page=True)
aadata = dict(aaData = sqltable or [])
# Ugly hack to change any occurrence of [id] with the true id
# Needed because the represent doesn't know the id
for i in range(len(aadata["aaData"])):
id = aadata["aaData"][i][0]
for j in range(len(aadata["aaData"][i])):
new = aadata["aaData"][i][j].replace("[id]",id)
aadata["aaData"][i][j] = new
aadata.update(iTotalRecords=totalrows,
iTotalDisplayRecords=totalrows)
response.aadata = jsons(aadata)
s3.start = 0
s3.limit = limit
else: # No items in database
# s3import tables don't have a delete field but kept for the record
if "deleted" in table:
available_records = db(table.deleted == False)
else:
available_records = db(table.id > 0)
# check for any records on an unfiltered table
if available_records.select(table.id,
limitby=(0, 1)).first():
items = self.crud_string(tablename, "msg_no_match")
else:
items = self.crud_string(tablename, "msg_list_empty")
output.update(items=items, sortby=sort_by)
# Value to be added to the dataTable ajax call
s3.dataTable_Method = "import"
return output
# -------------------------------------------------------------------------
def _item_element_represent(self, value):
"""
Represent the element in an import item for dataTable display
@param value: the string containing the element
"""
T = current.T
db = current.db
value = S3XML.xml_decode(value)
try:
element = etree.fromstring(value)
except:
# XMLSyntaxError: return the element as-is
return DIV(value)
tablename = element.get("name")
table = current.db[tablename]
output = DIV()
details = TABLE(_class="importItem [id]")
header, rows = self._add_item_details(element.findall("data"), table)
if header is not None:
output.append(header)
# Add components, if present
components = element.findall("resource")
for component in components:
ctablename = component.get("name")
ctable = db[ctablename]
self._add_item_details(component.findall("data"), ctable,
details=rows, prefix=True)
if rows:
details.append(TBODY(rows))
# Add error messages, if present
errors = current.xml.collect_errors(element)
if errors:
details.append(TFOOT(TR(TH("%s:" % T("Errors")),
TD(UL([LI(e) for e in errors])))))
if rows == [] and components == []:
# At this stage we don't have anything to display to see if we can
# find something to show. This could be the case when a table being
# imported is a resolver for a many to many relationship
refdetail = TABLE(_class="importItem [id]")
references = element.findall("reference")
for reference in references:
tuid = reference.get("tuid")
resource = reference.get("resource")
refdetail.append(TR(TD(resource), TD(tuid)))
output.append(refdetail)
else:
output.append(details)
return str(output)
# -------------------------------------------------------------------------
@staticmethod
def _add_item_details(data, table, details=None, prefix=False):
"""
Add details of the item element
@param data: the list of data elements in the item element
@param table: the table for the data
@param details: the existing details rows list (to append to)
"""
tablename = table._tablename
if details is None:
details = []
first = None
firstString = None
header = None
for child in data:
f = child.get("field", None)
if f not in table.fields:
continue
elif f == "wkt":
# Skip bulky WKT fields
continue
field = table[f]
ftype = str(field.type)
value = child.get("value", None)
if not value:
value = child.text
try:
value = S3Importer._decode_data(field, value)
except:
pass
if value:
value = S3XML.xml_encode(unicode(value))
else:
value = ""
if f != None and value != None:
headerText = P(B("%s: " % f), value)
if not first:
first = headerText
if ftype == "string" and not firstString:
firstString = headerText
if f == "name":
header = headerText
if prefix:
details.append(TR(TH("%s.%s:" % (tablename, f)), TD(value)))
else:
details.append(TR(TH("%s:" % f), TD(value)))
if not header:
if firstString:
header = firstString
else:
header = first
return (header, details)
# -------------------------------------------------------------------------
@staticmethod
def _decode_data(field, value):
"""
Try to decode string data into their original type
@param field: the Field instance
@param value: the stringified value
@todo: replace this by ordinary decoder
"""
if field.type == "string" or \
field.type == "string" or \
field.type == "password" or \
field.type == "upload" or \
field.type == "text":
return value
elif field.type == "integer" or field.type == "id":
return int(value)
elif field.type == "double" or field.type == "decimal":
return double(value)
elif field.type == 'boolean':
if value and not str(value)[:1].upper() in ["F", "0"]:
return "T"
else:
return "F"
elif field.type == "date":
return value # @todo fix this to get a date
elif field.type == "time":
return value # @todo fix this to get a time
elif field.type == "datetime":
return value # @todo fix this to get a datetime
else:
return value
# -------------------------------------------------------------------------
@staticmethod
def date_represent(date_obj):
"""
Represent a datetime object as string
@param date_obj: the datetime object
@todo: replace by S3DateTime method?
"""
return date_obj.strftime("%d %B %Y, %I:%M%p")
# -------------------------------------------------------------------------
def _process_item_list(self, upload_id, vars):
"""
Get the list of IDs for the selected items from the "mode"
and "selected" request variables
@param upload_id: the upload_id
@param vars: the request variables
"""
items = None
if "mode" in vars:
mode = vars["mode"]
if "selected" in vars:
selected = vars["selected"].split(",")
else:
selected = []
if mode == "Inclusive":
items = selected
elif mode == "Exclusive":
all_items = self._get_all_items(upload_id, as_string=True)
items = [i for i in all_items if i not in selected]
return items
# -------------------------------------------------------------------------
def _get_all_items(self, upload_id, as_string=False):
""" Get a list of the record IDs of all import items for
the the given upload ID
@param upload_id: the upload ID
@param as_string: represent each ID as string
"""
item_table = S3ImportJob.define_item_table()
upload_table = self.upload_table
query = (upload_table.id == upload_id) & \
(item_table.job_id == upload_table.job_id) & \
(item_table.tablename == self.controller_tablename)
rows = current.db(query).select(item_table.id)
if as_string:
items = [str(row.id) for row in rows]
else:
items = [row.id for row in rows]
return items
# -------------------------------------------------------------------------
def _use_upload_table(self):
"""
Set the resource and the table to being s3_import_upload
"""
if self.upload_resource == None:
from s3resource import S3Resource
(prefix, name) = self.UPLOAD_TABLE_NAME.split("_",1)
self.upload_resource = S3Resource(prefix, name)
self.resource = self.upload_resource
self.table = self.upload_table
self.tablename = self.upload_tablename
# -------------------------------------------------------------------------
def _use_controller_table(self):
"""
Set the resource and the table to be the imported resource
"""
self.resource = self.controller_resource
self.table = self.controller_table
self.tablename = self.controller_tablename
# -------------------------------------------------------------------------
def _use_import_item_table(self, job_id):
"""
Set the resource and the table to being s3_import_item
"""
if self.item_resource == None:
from s3resource import S3Resource
(prefix, name) = S3ImportJob.ITEM_TABLE_NAME.split("_",1)
self.item_resource = S3Resource(prefix, name)
self.resource = self.item_resource
self.tablename = S3ImportJob.ITEM_TABLE_NAME
self.table = S3ImportJob.define_item_table()
# -------------------------------------------------------------------------
def __define_table(self):
""" Configures the upload table """
_debug("S3Importer.__define_table()")
T = current.T
db = current.db
request = current.request
self.upload_tablename = self.UPLOAD_TABLE_NAME
import_upload_status = {
1: T("Pending"),
2: T("In error"),
3: T("Completed"),
}
def user_name_represent(id):
# @todo: use s3_present_user?
rep_str = "-"
table = db.auth_user
query = (table.id == id)
row = db(query).select(table.first_name,
table.last_name,
limitby=(0, 1)).first()
if row:
rep_str = "%s %s" % (row.first_name, row.last_name)
return rep_str
def status_represent(index):
if index == None:
return "Unknown" # @todo: use messages (internationalize)
else:
return import_upload_status[index]
now = request.utcnow
table = self.define_upload_table()
table.file.upload_folder = os.path.join(request.folder,
"uploads",
#"imports"
)
table.file.comment = DIV(_class="tooltip",
_title="%s|%s" %
(self.messages.import_file,
self.messages.import_file_comment))
table.file.label = self.messages.import_file
table.status.requires = IS_IN_SET(import_upload_status, zero=None)
table.status.represent = status_represent
table.user_id.label = self.messages.user_name
table.user_id.represent = user_name_represent
table.created_on.default = now
table.created_on.represent = self.date_represent
table.modified_on.default = now
table.modified_on.update = now
table.modified_on.represent = self.date_represent
table.replace_option.label = T("Replace")
self.upload_table = db[self.UPLOAD_TABLE_NAME]
# -------------------------------------------------------------------------
@classmethod
def define_upload_table(cls):
""" Defines the upload table """
db = current.db
uploadfolder = os.path.join(current.request.folder,
"uploads",
)
if cls.UPLOAD_TABLE_NAME not in db:
upload_table = db.define_table(cls.UPLOAD_TABLE_NAME,
Field("controller",
readable=False,
writable=False),
Field("function",
readable=False,
writable=False),
Field("file", "upload",
uploadfolder=os.path.join(current.request.folder, "uploads", "imports"),
autodelete=True),
Field("filename",
readable=False,
writable=False),
Field("status", "integer",
default=1,
readable=False,
writable=False),
Field("extra_data",
readable=False,
writable=False),
Field("replace_option", "boolean",
default=False,
readable=False,
writable=False),
Field("job_id",
length=128,
readable=False,
writable=False),
Field("user_id", "integer",
readable=False,
writable=False),
Field("created_on", "datetime",
readable=False,
writable=False),
Field("modified_on", "datetime",
readable=False,
writable=False),
Field("summary_added", "integer",
readable=False,
writable=False),
Field("summary_error", "integer",
readable=False,
writable=False),
Field("summary_ignored", "integer",
readable=False,
writable=False),
Field("completed_details", "text",
readable=False,
writable=False))
else:
upload_table = db[cls.UPLOAD_TABLE_NAME]
return upload_table
# =============================================================================
class S3ImportItem(object):
""" Class representing an import item (=a single record) """
METHOD = Storage(
CREATE="create",
UPDATE="update",
DELETE="delete"
)
POLICY = Storage(
THIS="THIS", # keep local instance
OTHER="OTHER", # update unconditionally
NEWER="NEWER", # update if import is newer
MASTER="MASTER" # update if import is master
)
# -------------------------------------------------------------------------
def __init__(self, job):
"""
Constructor
@param job: the import job this item belongs to
"""
self.job = job
self.ERROR = current.manager.ERROR
# Locking and error handling
self.lock = False
self.error = None
# Identification
import uuid
self.item_id = uuid.uuid4() # unique ID for this item
self.id = None
self.uid = None
# Data elements
self.table = None
self.tablename = None
self.element = None
self.data = None
self.original = None
self.components = []
self.references = []
self.load_components = []
self.load_references = []
self.parent = None
self.skip = False
# Conflict handling
self.mci = 2
self.mtime = datetime.utcnow()
self.modified = True
self.conflict = False
# Allowed import methods
self.strategy = job.strategy
# Update and conflict resolution policies
self.update_policy = job.update_policy
self.conflict_policy = job.conflict_policy
# Actual import method
self.method = None
self.onvalidation = None
self.onaccept = None
# Item import status flags
self.accepted = None
self.permitted = False
self.committed = False
# Writeback hook for circular references:
# Items which need a second write to update references
self.update = []
# -------------------------------------------------------------------------
def __repr__(self):
""" Helper method for debugging """
_str = "<S3ImportItem %s {item_id=%s uid=%s id=%s error=%s data=%s}>" % \
(self.table, self.item_id, self.uid, self.id, self.error, self.data)
return _str
# -------------------------------------------------------------------------
def parse(self,
element,
original=None,
table=None,
tree=None,
files=None):
"""
Read data from a <resource> element
@param element: the element
@param table: the DB table
@param tree: the import tree
@param files: uploaded files
@returns: True if successful, False if not (sets self.error)
"""
db = current.db
xml = current.xml
manager = current.manager
validate = manager.validate
s3db = current.s3db
self.element = element
if table is None:
tablename = element.get(xml.ATTRIBUTE.name, None)
try:
table = s3db[tablename]
except:
self.error = self.ERROR.BAD_RESOURCE
element.set(xml.ATTRIBUTE.error, self.error)
return False
self.table = table
self.tablename = table._tablename
if original is None:
original = manager.original(table, element)
data = xml.record(table, element,
files=files,
original=original,
validate=validate)
if data is None:
self.error = self.ERROR.VALIDATION_ERROR
self.accepted = False
if not element.get(xml.ATTRIBUTE.error, False):
element.set(xml.ATTRIBUTE.error, str(self.error))
return False
self.data = data
if original is not None:
self.original = original
self.id = original[table._id.name]
if xml.UID in original:
self.uid = original[xml.UID]
self.data.update({xml.UID:self.uid})
elif xml.UID in data:
self.uid = data[xml.UID]
if xml.MTIME in data:
self.mtime = data[xml.MTIME]
if xml.MCI in data:
self.mci = data[xml.MCI]
_debug("New item: %s" % self)
return True
# -------------------------------------------------------------------------
def deduplicate(self):
RESOLVER = "deduplicate"
if self.id:
return
table = self.table
if table is None:
return
if self.original is not None:
original = self.original
else:
original = current.manager.original(table, self.data)
if original is not None:
self.original = original
self.id = original[table._id.name]
UID = current.xml.UID
if UID in original:
self.uid = original[UID]
self.data.update({UID:self.uid})
self.method = self.METHOD.UPDATE
else:
resolve = current.s3db.get_config(self.tablename, RESOLVER)
if self.data and resolve:
resolve(self)
return
# -------------------------------------------------------------------------
def authorize(self):
"""
Authorize the import of this item, sets self.permitted
"""
db = current.db
manager = current.manager
authorize = manager.permit
self.permitted = False
if not self.table:
return False
prefix = self.tablename.split("_", 1)[0]
if prefix in manager.PROTECTED:
return False
if not authorize:
self.permitted = True
self.method = self.METHOD.CREATE
if self.id:
if self.data.deleted is True:
self.method = self.METHOD.DELETE
self.accepted = True
else:
if not self.original:
query = (self.table.id == self.id)
self.original = db(query).select(limitby=(0, 1)).first()
if self.original:
self.method = self.METHOD.UPDATE
if self.method == self.METHOD.CREATE:
self.id = 0
if authorize:
self.permitted = authorize(self.method,
self.tablename,
record_id=self.id)
return self.permitted
# -------------------------------------------------------------------------
def validate(self):
"""
Validate this item (=record onvalidation), sets self.accepted
"""
if self.accepted is not None:
return self.accepted
if self.data is None or not self.table:
self.accepted = False
return False
form = Storage()
form.method = self.method
form.vars = self.data
if self.id:
form.vars.id = self.id
form.errors = Storage()
tablename = self.tablename
key = "%s_onvalidation" % self.method
s3db = current.s3db
onvalidation = s3db.get_config(tablename, key,
s3db.get_config(tablename, "onvalidation"))
if onvalidation:
try:
callback(onvalidation, form, tablename=tablename)
except:
pass # @todo need a better handler here.
self.accepted = True
if form.errors:
error = current.xml.ATTRIBUTE.error
for k in form.errors:
e = self.element.findall("data[@field='%s']" % k)
if not e:
e = self.element.findall("reference[@field='%s']" % k)
if not e:
e = self.element
form.errors[k] = "[%s] %s" % (k, form.errors[k])
else:
e = e[0]
e.set(error,
str(form.errors[k]).decode("utf-8"))
self.error = self.ERROR.VALIDATION_ERROR
self.accepted = False
return self.accepted
# -------------------------------------------------------------------------
def commit(self, ignore_errors=False):
"""
Commit this item to the database
@param ignore_errors: skip invalid components
(still reports errors)
"""
db = current.db
s3db = current.s3db
xml = current.xml
manager = current.manager
table = self.table
# Check if already committed
if self.committed:
# already committed
return True
# If the parent item gets skipped, then skip this item as well
if self.parent is not None and self.parent.skip:
return True
_debug("Committing item %s" % self)
# Resolve references
self._resolve_references()
# Validate
if not self.validate():
_debug("Validation error: %s (%s)" % (self.error, xml.tostring(self.element, pretty_print=True)))
self.skip = True
return ignore_errors
elif self.components:
for component in self.components:
if not component.validate():
if hasattr(component, "tablename"):
tn = component.tablename
else:
tn = None
_debug("Validation error, component=%s" % tn)
component.skip = True
# Skip this item on any component validation errors
# unless ignore_errors is True
if ignore_errors:
continue
else:
self.skip = True
return False
# De-duplicate
self.deduplicate()
# Log this item
if manager.log is not None:
manager.log(self)
# Authorize item
if not self.authorize():
_debug("Not authorized - skip")
self.error = manager.ERROR.NOT_PERMITTED
self.skip = True
return ignore_errors
_debug("Method: %s" % self.method)
# Check if import method is allowed in strategy
if not isinstance(self.strategy, (list, tuple)):
self.strategy = [self.strategy]
if self.method not in self.strategy:
_debug("Method not in strategy - skip")
self.error = manager.ERROR.NOT_PERMITTED
self.skip = True
return True
this = self.original
if not this and self.id and \
self.method in (self.METHOD.UPDATE, self.METHOD.DELETE):
query = (table.id == self.id)
this = db(query).select(limitby=(0, 1)).first()
this_mtime = None
this_mci = 0
if this:
if xml.MTIME in table.fields:
this_mtime = xml.as_utc(this[xml.MTIME])
if xml.MCI in table.fields:
this_mci = this[xml.MCI]
self.mtime = xml.as_utc(self.mtime)
# Conflict detection
this_modified = True
self.modified = True
self.conflict = False
last_sync = xml.as_utc(self.job.last_sync)
if last_sync:
if this_mtime and this_mtime < last_sync:
this_modified = False
if self.mtime and self.mtime < last_sync:
self.modified = False
if self.modified and this_modified:
self.conflict = True
if self.conflict and \
self.method in (self.METHOD.UPDATE, self.METHOD.DELETE):
_debug("Conflict: %s" % self)
if self.job.onconflict:
self.job.onconflict(self)
if self.data is not None:
data = Storage(self.data)
else:
data = Storage()
# Update existing record
if self.method == self.METHOD.UPDATE:
if this:
if "deleted" in this and this.deleted:
policy = self._get_update_policy(None)
if policy == self.POLICY.NEWER and \
this_mtime and this_mtime > self.mtime or \
policy == self.POLICY.MASTER and \
(this_mci == 0 or self.mci != 1):
self.skip = True
return True
fields = data.keys()
for f in fields:
if f not in this:
continue
if isinstance(this[f], datetime):
if xml.as_utc(data[f]) == xml.as_utc(this[f]):
del data[f]
continue
else:
if data[f] == this[f]:
del data[f]
continue
remove = False
policy = self._get_update_policy(f)
if policy == self.POLICY.THIS:
remove = True
elif policy == self.POLICY.NEWER:
if this_mtime and this_mtime > self.mtime:
remove = True
elif policy == self.POLICY.MASTER:
if this_mci == 0 or self.mci != 1:
remove = True
if remove:
del data[f]
self.data.update({f:this[f]})
if "deleted" in this and this.deleted:
# Undelete re-imported records:
data.update(deleted=False)
if "deleted_fk" in table:
data.update(deleted_fk="")
if "created_by" in table:
data.update(created_by=table.created_by.default)
if "modified_by" in table:
data.update(modified_by=table.modified_by.default)
if not self.skip and not self.conflict and \
(len(data) or self.components or self.references):
if self.uid and xml.UID in table:
data.update({xml.UID:self.uid})
if xml.MTIME in table:
data.update({xml.MTIME: self.mtime})
if xml.MCI in data:
# retain local MCI on updates
del data[xml.MCI]
query = (table._id == self.id)
try:
success = db(query).update(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return False
if success:
self.committed = True
else:
# Nothing to update
self.committed = True
# Create new record
elif self.method == self.METHOD.CREATE:
# Do not apply field policy to UID and MCI
UID = xml.UID
if UID in data:
del data[UID]
MCI = xml.MCI
if MCI in data:
del data[MCI]
for f in data:
policy = self._get_update_policy(f)
if policy == self.POLICY.MASTER and self.mci != 1:
del data[f]
if len(data) or self.components or self.references:
# Restore UID and MCI
if self.uid and UID in table.fields:
data.update({UID:self.uid})
if MCI in table.fields:
data.update({MCI:self.mci})
# Insert the new record
try:
success = table.insert(**dict(data))
except:
self.error = sys.exc_info()[1]
self.skip = True
return False
if success:
self.id = success
self.committed = True
else:
# Nothing to create
self.skip = True
return True
# Delete local record
elif self.method == self.METHOD.DELETE:
if this:
if this.deleted:
self.skip = True
policy = self._get_update_policy(None)
if policy == self.POLICY.THIS:
self.skip = True
elif policy == self.POLICY.NEWER and \
(this_mtime and this_mtime > self.mtime):
self.skip = True
elif policy == self.POLICY.MASTER and \
(this_mci == 0 or self.mci != 1):
self.skip = True
else:
self.skip = True
if not self.skip and not self.conflict:
prefix, name = self.tablename.split("_", 1)
resource = manager.define_resource(prefix, name, id=self.id)
ondelete = s3db.get_config(self.tablename, "ondelete")
success = resource.delete(ondelete=ondelete,
cascade=True)
if resource.error:
self.error = resource.error
self.skip = True
return ignore_errors
_debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
self.skip and "skippe" or \
self.method))
return True
# Audit + onaccept on successful commits
if self.committed:
form = Storage()
form.method = self.method
form.vars = self.data
tablename = self.tablename
prefix, name = tablename.split("_", 1)
if self.id:
form.vars.id = self.id
if manager.audit is not None:
manager.audit(self.method, prefix, name,
form=form,
record=self.id,
representation="xml")
s3db.update_super(table, form.vars)
if self.method == self.METHOD.CREATE:
current.auth.s3_set_record_owner(table, self.id)
key = "%s_onaccept" % self.method
onaccept = s3db.get_config(tablename, key,
s3db.get_config(tablename, "onaccept"))
if onaccept:
callback(onaccept, form, tablename=self.tablename)
# Update referencing items
if self.update and self.id:
for u in self.update:
item = u.get("item", None)
if not item:
continue
field = u.get("field", None)
if isinstance(field, (list, tuple)):
pkey, fkey = field
query = table.id == self.id
row = db(query).select(table[pkey],
limitby=(0, 1)).first()
if row:
item._update_reference(fkey, row[pkey])
else:
item._update_reference(field, self.id)
_debug("Success: %s, id=%s %sd" % (self.tablename, self.id,
self.skip and "skippe" or \
self.method))
return True
# -------------------------------------------------------------------------
def _get_update_policy(self, field):
"""
Get the update policy for a field (if the item will
update an existing record)
@param field: the name of the field
"""
if isinstance(self.update_policy, dict):
r = self.update_policy.get(field,
self.update_policy.get("__default__", self.POLICY.THIS))
else:
r = self.update_policy
if not r in self.POLICY.values():
r = self.POLICY.THIS
return r
# -------------------------------------------------------------------------
def _resolve_references(self):
"""
Resolve the references of this item (=look up all foreign
keys from other items of the same job). If a foreign key
is not yet available, it will be scheduled for later update.
"""
if not self.table:
return
items = self.job.items
for reference in self.references:
item = None
field = reference.field
entry = reference.entry
if not entry:
continue
# Resolve key tuples
if isinstance(field, (list,tuple)):
pkey, fkey = field
else:
pkey, fkey = ("id", field)
# Resolve the key table name
ktablename, key, multiple = s3_get_foreign_key(self.table[fkey])
if not ktablename:
if self.tablename == "auth_user" and \
fkey == "organisation_id":
ktablename = "org_organisation"
else:
continue
if entry.tablename:
ktablename = entry.tablename
try:
ktable = current.s3db[ktablename]
except:
continue
# Resolve the foreign key (value)
fk = entry.id
if entry.item_id:
item = items[entry.item_id]
if item:
fk = item.id
if fk and pkey != "id":
row = current.db(ktable._id == fk).select(ktable[pkey],
limitby=(0, 1)).first()
if not row:
fk = None
continue
else:
fk = row[pkey]
# Update record data
if fk:
if multiple:
val = self.data.get(fkey, [])
if fk not in val:
val.append(fk)
self.data[fkey] = val
else:
self.data[fkey] = fk
else:
if fkey in self.data and not multiple:
del self.data[fkey]
if item:
item.update.append(dict(item=self, field=fkey))
# -------------------------------------------------------------------------
def _update_reference(self, field, value):
"""
Helper method to update a foreign key in an already written
record. Will be called by the referenced item after (and only
if) it has been committed. This is only needed if the reference
could not be resolved before commit due to circular references.
@param field: the field name of the foreign key
@param value: the value of the foreign key
"""
if not value or not self.table:
return
db = current.db
if self.id and self.permitted:
fieldtype = str(self.table[field].type)
if fieldtype.startswith("list:reference"):
query = (self.table.id == self.id)
record = db(query).select(self.table[field],
limitby=(0,1)).first()
if record:
values = record[field]
if value not in values:
values.append(value)
db(self.table.id == self.id).update(**{field:values})
else:
db(self.table.id == self.id).update(**{field:value})
# -------------------------------------------------------------------------
def store(self, item_table=None):
"""
Store this item in the DB
"""
_debug("Storing item %s" % self)
if item_table is None:
return None
db = current.db
query = item_table.item_id == self.item_id
row = db(query).select(item_table.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id = self.job.job_id,
item_id = self.item_id,
tablename = self.tablename,
record_uid = self.uid,
error = self.error)
if self.element is not None:
element_str = current.xml.tostring(self.element,
xml_declaration=False)
record.update(element=element_str)
if self.data is not None:
data = Storage()
for f in self.data.keys():
table = self.table
if f not in table.fields:
continue
fieldtype = str(self.table[f].type)
if fieldtype == "id" or s3_has_foreign_key(self.table[f]):
continue
data.update({f:self.data[f]})
data_str = cPickle.dumps(data)
record.update(data=data_str)
ritems = []
for reference in self.references:
field = reference.field
entry = reference.entry
store_entry = None
if entry:
if entry.item_id is not None:
store_entry = dict(field=field,
item_id=str(entry.item_id))
elif entry.uid is not None:
store_entry = dict(field=field,
tablename=entry.tablename,
uid=str(entry.uid))
if store_entry is not None:
ritems.append(json.dumps(store_entry))
if ritems:
record.update(ritems=ritems)
citems = [c.item_id for c in self.components]
if citems:
record.update(citems=citems)
if self.parent:
record.update(parent=self.parent.item_id)
if record_id:
db(item_table.id == record_id).update(**record)
else:
record_id = item_table.insert(**record)
_debug("Record ID=%s" % record_id)
return record_id
# -------------------------------------------------------------------------
def restore(self, row):
"""
Restore an item from a item table row. This does not restore
the references (since this can not be done before all items
are restored), must call job.restore_references() to do that
@param row: the item table row
"""
xml = current.xml
self.item_id = row.item_id
self.accepted = None
self.permitted = False
self.committed = False
tablename = row.tablename
self.id = None
self.uid = row.record_uid
if row.data is not None:
self.data = cPickle.loads(row.data)
else:
self.data = Storage()
data = self.data
if xml.MTIME in data:
self.mtime = data[xml.MTIME]
if xml.MCI in data:
self.mci = data[xml.MCI]
UID = xml.UID
if UID in data:
self.uid = data[UID]
self.element = etree.fromstring(row.element)
if row.citems:
self.load_components = row.citems
if row.ritems:
self.load_references = [json.loads(ritem) for ritem in row.ritems]
self.load_parent = row.parent
try:
table = current.s3db[tablename]
except:
self.error = self.ERROR.BAD_RESOURCE
return False
else:
self.table = table
self.tablename = tablename
original = current.manager.original(table, self.data)
if original is not None:
self.original = original
self.id = original[table._id.name]
if UID in original:
self.uid = original[UID]
self.data.update({UID:self.uid})
self.error = row.error
if self.error and not self.data:
# Validation error
return False
return True
# =============================================================================
class S3ImportJob():
"""
Class to import an element tree into the database
"""
JOB_TABLE_NAME = "s3_import_job"
ITEM_TABLE_NAME = "s3_import_item"
# -------------------------------------------------------------------------
def __init__(self, manager, table,
tree=None,
files=None,
job_id=None,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None):
"""
Constructor
@param manager: the S3RequestManager instance performing this job
@param tree: the element tree to import
@param files: files attached to the import (for upload fields)
@param job_id: restore job from database (record ID or job_id)
@param strategy: the import strategy
@param update_policy: the update policy
@param conflict_policy: the conflict resolution policy
@param last_sync: the last synchronization time stamp (datetime)
@param onconflict: custom conflict resolver function
"""
self.error = None # the last error
self.error_tree = etree.Element(current.xml.TAG.root)
self.table = table
self.tree = tree
self.files = files
self.directory = Storage()
self.elements = Storage()
self.items = Storage()
self.references = []
self.job_table = None
self.item_table = None
self.count = 0 # total number of records imported
self.created = [] # IDs of created records
self.updated = [] # IDs of updated records
self.deleted = [] # IDs of deleted records
# Import strategy
self.strategy = strategy
if self.strategy is None:
self.strategy = [S3ImportItem.METHOD.CREATE,
S3ImportItem.METHOD.UPDATE,
S3ImportItem.METHOD.DELETE]
if not isinstance(self.strategy, (tuple, list)):
self.strategy = [self.strategy]
# Update policy (default=always update)
self.update_policy = update_policy
if not self.update_policy:
self.update_policy = S3ImportItem.POLICY.OTHER
# Conflict resolution policy (default=always update)
self.conflict_policy = conflict_policy
if not self.conflict_policy:
self.conflict_policy = S3ImportItem.POLICY.OTHER
# Synchronization settings
self.mtime = None
self.last_sync = last_sync
self.onconflict = onconflict
if job_id:
self.__define_tables()
jobtable = self.job_table
if str(job_id).isdigit():
query = jobtable.id == job_id
else:
query = jobtable.job_id == job_id
row = current.db(query).select(limitby=(0, 1)).first()
if not row:
raise SyntaxError("Job record not found")
self.job_id = row.job_id
if not self.table:
tablename = row.tablename
try:
table = current.s3db[tablename]
except:
pass
else:
import uuid
self.job_id = uuid.uuid4() # unique ID for this job
# -------------------------------------------------------------------------
def add_item(self,
element=None,
original=None,
components=None,
parent=None,
joinby=None):
"""
Parse and validate an XML element and add it as new item
to the job.
@param element: the element
@param original: the original DB record (if already available,
will otherwise be looked-up by this function)
@param components: a dictionary of components (as in S3Resource)
to include in the job (defaults to all
defined components)
@param parent: the parent item (if this is a component)
@param joinby: the component join key(s) (if this is a component)
@returns: a unique identifier for the new item, or None if there
was an error. self.error contains the last error, and
self.error_tree an element tree with all failing elements
including error attributes.
"""
if element in self.elements:
# element has already been added to this job
return self.elements[element]
# Parse the main element
item = S3ImportItem(self)
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
if element is not None:
self.elements[element] = item_id
if not item.parse(element,
original=original,
files=self.files):
self.error = item.error
item.accepted = False
if parent is None:
self.error_tree.append(deepcopy(item.element))
else:
# Now parse the components
table = item.table
components = current.s3db.get_components(table, names=components)
cnames = Storage()
cinfos = Storage()
for alias in components:
component = components[alias]
pkey = component.pkey
if component.linktable:
ctable = component.linktable
fkey = component.lkey
else:
ctable = component.table
fkey = component.fkey
ctablename = ctable._tablename
if ctablename in cnames:
cnames[ctablename].append(alias)
else:
cnames[ctablename] = [alias]
cinfos[(ctablename, alias)] = Storage(component = component,
ctable = ctable,
pkey = pkey,
fkey = fkey,
original = None,
uid = None)
add_item = self.add_item
xml = current.xml
for celement in xml.components(element, names=cnames.keys()):
# Get the component tablename
ctablename = celement.get(xml.ATTRIBUTE.name, None)
if not ctablename:
continue
# Get the component alias (for disambiguation)
calias = celement.get(xml.ATTRIBUTE.alias, None)
if calias is None:
if ctablename not in cnames:
continue
aliases = cnames[ctablename]
if len(aliases) == 1:
calias = aliases[0]
else:
# ambiguous components *must* use alias
continue
if (ctablename, calias) not in cinfos:
continue
else:
cinfo = cinfos[(ctablename, calias)]
component = cinfo.component
original = cinfo.original
ctable = cinfo.ctable
pkey = cinfo.pkey
fkey = cinfo.fkey
if not component.multiple:
if cinfo.uid is not None:
continue
if original is None and item.id:
query = (table.id == item.id) & \
(table[pkey] == ctable[fkey])
original = current.db(query).select(ctable.ALL,
limitby=(0, 1)).first()
if original:
cinfo.uid = uid = original.get(xml.UID, None)
celement.set(xml.UID, uid)
cinfo.original = original
item_id = add_item(element=celement,
original=original,
parent=item,
joinby=(pkey, fkey))
if item_id is None:
item.error = self.error
self.error_tree.append(deepcopy(item.element))
else:
citem = self.items[item_id]
citem.parent = item
item.components.append(citem)
# Handle references
table = item.table
tree = self.tree
if tree is not None:
fields = [table[f] for f in table.fields]
rfields = filter(s3_has_foreign_key, fields)
item.references = self.lookahead(element,
table=table,
fields=rfields,
tree=tree,
directory=self.directory)
for reference in item.references:
entry = reference.entry
if entry and entry.element is not None:
item_id = add_item(element=entry.element)
if item_id:
entry.update(item_id=item_id)
# Parent reference
if parent is not None:
entry = Storage(item_id=parent.item_id,
element=parent.element,
tablename=parent.tablename)
item.references.append(Storage(field=joinby,
entry=entry))
return item.item_id
# -------------------------------------------------------------------------
def lookahead(self,
element,
table=None,
fields=None,
tree=None,
directory=None):
"""
Find referenced elements in the tree
@param element: the element
@param table: the DB table
@param fields: the FK fields in the table
@param tree: the import tree
@param directory: a dictionary to lookup elements in the tree
(will be filled in by this function)
"""
db = current.db
s3db = current.s3db
xml = current.xml
import_uid = xml.import_uid
ATTRIBUTE = xml.ATTRIBUTE
TAG = xml.TAG
UID = xml.UID
reference_list = []
root = None
if tree is not None:
if isinstance(tree, etree._Element):
root = tree
else:
root = tree.getroot()
references = element.findall("reference")
for reference in references:
field = reference.get(ATTRIBUTE.field, None)
# Ignore references without valid field-attribute
if not field or field not in fields:
continue
# Find the key table
multiple = False
fieldtype = str(table[field].type)
if fieldtype.startswith("reference"):
ktablename = fieldtype[10:]
elif fieldtype.startswith("list:reference"):
ktablename = fieldtype[15:]
multiple = True
else:
# ignore if the field is not a reference type
continue
try:
ktable = s3db[ktablename]
except:
# Invalid tablename - skip
continue
tablename = reference.get(ATTRIBUTE.resource, None)
# Ignore references to tables without UID field:
if UID not in ktable.fields:
continue
# Fall back to key table name if tablename is not specified:
if not tablename:
tablename = ktablename
# Super-entity references must use the super-key:
if tablename != ktablename:
field = (ktable._id.name, field)
# Ignore direct references to super-entities:
if tablename == ktablename and ktable._id.name != "id":
continue
# Get the foreign key
uids = reference.get(UID, None)
attr = UID
if not uids:
uids = reference.get(ATTRIBUTE.tuid, None)
attr = ATTRIBUTE.tuid
if uids and multiple:
uids = json.loads(uids)
elif uids:
uids = [uids]
# Find the elements and map to DB records
relements = []
# Create a UID<->ID map
id_map = Storage()
if attr == UID and uids:
_uids = map(import_uid, uids)
query = ktable[UID].belongs(_uids)
records = db(query).select(ktable.id,
ktable[UID])
id_map = dict([(r[UID], r.id) for r in records])
if not uids:
# Anonymous reference: <resource> inside the element
expr = './/%s[@%s="%s"]' % (TAG.resource,
ATTRIBUTE.name,
tablename)
relements = reference.xpath(expr)
if relements and not multiple:
relements = [relements[0]]
elif root is not None:
for uid in uids:
entry = None
# Entry already in directory?
if directory is not None:
entry = directory.get((tablename, attr, uid), None)
if not entry:
expr = ".//%s[@%s='%s' and @%s='%s']" % (
TAG.resource,
ATTRIBUTE.name,
tablename,
attr,
uid)
e = root.xpath(expr)
if e:
# Element in the source => append to relements
relements.append(e[0])
else:
# No element found, see if original record exists
_uid = import_uid(uid)
if _uid and _uid in id_map:
_id = id_map[_uid]
entry = Storage(tablename=tablename,
element=None,
uid=uid,
id=_id,
item_id=None)
reference_list.append(Storage(field=field,
entry=entry))
else:
continue
else:
reference_list.append(Storage(field=field,
entry=entry))
# Create entries for all newly found elements
for relement in relements:
uid = relement.get(attr, None)
if attr == UID:
_uid = import_uid(uid)
id = _uid and id_map and id_map.get(_uid, None) or None
else:
_uid = None
id = None
entry = Storage(tablename=tablename,
element=relement,
uid=uid,
id=id,
item_id=None)
# Add entry to directory
if uid and directory is not None:
directory[(tablename, attr, uid)] = entry
# Append the entry to the reference list
reference_list.append(Storage(field=field, entry=entry))
return reference_list
# -------------------------------------------------------------------------
def load_item(self, row):
"""
Load an item from the item table (counterpart to add_item
when restoring a job from the database)
"""
item = S3ImportItem(self)
if not item.restore(row):
self.error = item.error
if item.load_parent is None:
self.error_tree.append(deepcopy(item.element))
# Update lookup lists
item_id = item.item_id
self.items[item_id] = item
return item_id
# -------------------------------------------------------------------------
def resolve(self, item_id, import_list):
"""
Resolve the reference list of an item
@param item_id: the import item UID
@param import_list: the ordered list of items (UIDs) to import
"""
item = self.items[item_id]
if item.lock or item.accepted is False:
return False
references = []
for reference in item.references:
ritem_id = reference.entry.item_id
if ritem_id and ritem_id not in import_list:
references.append(ritem_id)
for ritem_id in references:
item.lock = True
if self.resolve(ritem_id, import_list):
import_list.append(ritem_id)
item.lock = False
return True
# -------------------------------------------------------------------------
def commit(self, ignore_errors=False):
"""
Commit the import job to the DB
@param ignore_errors: skip any items with errors
(does still report the errors)
"""
ATTRIBUTE = current.xml.ATTRIBUTE
# Resolve references
import_list = []
for item_id in self.items:
self.resolve(item_id, import_list)
if item_id not in import_list:
import_list.append(item_id)
# Commit the items
items = self.items
count = 0
mtime = None
created = []
cappend = created.append
updated = []
deleted = []
tablename = self.table._tablename
for item_id in import_list:
item = items[item_id]
error = None
success = item.commit(ignore_errors=ignore_errors)
error = item.error
if error:
self.error = error
element = item.element
if element is not None:
if not element.get(ATTRIBUTE.error, False):
element.set(ATTRIBUTE.error, str(self.error))
self.error_tree.append(deepcopy(element))
if not ignore_errors:
return False
elif item.tablename == tablename:
count += 1
if mtime is None or item.mtime > mtime:
mtime = item.mtime
if item.id:
if item.method == item.METHOD.CREATE:
cappend(item.id)
elif item.method == item.METHOD.UPDATE:
updated.append(item.id)
elif item.method == item.METHOD.DELETE:
deleted.append(item.id)
self.count = count
self.mtime = mtime
self.created = created
self.updated = updated
self.deleted = deleted
return True
# -------------------------------------------------------------------------
def __define_tables(self):
"""
Define the database tables for jobs and items
"""
self.job_table = self.define_job_table()
self.item_table = self.define_item_table()
# -------------------------------------------------------------------------
@classmethod
def define_job_table(cls):
db = current.db
if cls.JOB_TABLE_NAME not in db:
job_table = db.define_table(cls.JOB_TABLE_NAME,
Field("job_id", length=128,
unique=True,
notnull=True),
Field("tablename"),
Field("timestmp", "datetime",
default=datetime.utcnow()))
else:
job_table = db[cls.JOB_TABLE_NAME]
return job_table
# -------------------------------------------------------------------------
@classmethod
def define_item_table(cls):
db = current.db
if cls.ITEM_TABLE_NAME not in db:
item_table = db.define_table(cls.ITEM_TABLE_NAME,
Field("item_id", length=128,
unique=True,
notnull=True),
Field("job_id", length=128),
Field("tablename", length=128),
#Field("record_id", "integer"),
Field("record_uid"),
Field("error", "text"),
Field("data", "text"),
Field("element", "text"),
Field("ritems", "list:string"),
Field("citems", "list:string"),
Field("parent", length=128))
else:
item_table = db[cls.ITEM_TABLE_NAME]
return item_table
# -------------------------------------------------------------------------
def store(self):
"""
Store this job and all its items in the job table
"""
db = current.db
_debug("Storing Job ID=%s" % self.job_id)
self.__define_tables()
jobtable = self.job_table
query = jobtable.job_id == self.job_id
row = db(query).select(jobtable.id, limitby=(0, 1)).first()
if row:
record_id = row.id
else:
record_id = None
record = Storage(job_id=self.job_id)
try:
tablename = self.table._tablename
except:
pass
else:
record.update(tablename=tablename)
for item in self.items.values():
item.store(item_table=self.item_table)
if record_id:
db(jobtable.id == record_id).update(**record)
else:
record_id = jobtable.insert(**record)
_debug("Job record ID=%s" % record_id)
return record_id
# -------------------------------------------------------------------------
def get_tree(self):
"""
Reconstruct the element tree of this job
"""
if self.tree is not None:
return tree
else:
xml = current.xml
root = etree.Element(xml.TAG.root)
for item in self.items.values():
if item.element is not None and not item.parent:
if item.tablename == self.table._tablename or \
item.element.get(xml.UID, None) or \
item.element.get(xml.ATTRIBUTE.tuid, None):
root.append(deepcopy(item.element))
return etree.ElementTree(root)
# -------------------------------------------------------------------------
def delete(self):
"""
Delete this job and all its items from the job table
"""
db = current.db
_debug("Deleting job ID=%s" % self.job_id)
self.__define_tables()
item_table = self.item_table
query = item_table.job_id == self.job_id
db(query).delete()
job_table = self.job_table
query = job_table.job_id == self.job_id
db(query).delete()
# -------------------------------------------------------------------------
def restore_references(self):
"""
Restore the job's reference structure after loading items
from the item table
"""
db = current.db
UID = current.xml.UID
for item in self.items.values():
for citem_id in item.load_components:
if citem_id in self.items:
item.components.append(self.items[citem_id])
item.load_components = []
for ritem in item.load_references:
field = ritem["field"]
if "item_id" in ritem:
item_id = ritem["item_id"]
if item_id in self.items:
_item = self.items[item_id]
entry = Storage(tablename=_item.tablename,
element=_item.element,
uid=_item.uid,
id=_item.id,
item_id=item_id)
item.references.append(Storage(field=field,
entry=entry))
else:
_id = None
uid = ritem.get("uid", None)
tablename = ritem.get("tablename", None)
if tablename and uid:
try:
table = current.s3db[tablename]
except:
continue
if UID not in table.fields:
continue
query = table[UID] == uid
row = db(query).select(table._id,
limitby=(0, 1)).first()
if row:
_id = row[table._id.name]
else:
continue
entry = Storage(tablename = ritem["tablename"],
element=None,
uid = ritem["uid"],
id = _id,
item_id = None)
item.references.append(Storage(field=field,
entry=entry))
item.load_references = []
if item.load_parent is not None:
item.parent = self.items[item.load_parent]
item.load_parent = None
# END =========================================================================
| ashwyn/eden-message_parser | modules/s3/s3import.py | Python | mit | 123,322 |
from __future__ import absolute_import, division, print_function, unicode_literals
import string
import urllib
try:
from urllib.parse import urlparse, urlencode, urljoin, parse_qsl, urlunparse
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse, urljoin, urlunparse, parse_qsl
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
from random import SystemRandom
try:
UNICODE_ASCII_CHARACTERS = (string.ascii_letters +
string.digits)
except AttributeError:
UNICODE_ASCII_CHARACTERS = (string.ascii_letters.decode('ascii') +
string.digits.decode('ascii'))
def random_ascii_string(length):
random = SystemRandom()
return ''.join([random.choice(UNICODE_ASCII_CHARACTERS) for x in range(length)])
def url_query_params(url):
"""Return query parameters as a dict from the specified URL.
:param url: URL.
:type url: str
:rtype: dict
"""
return dict(parse_qsl(urlparse(url).query, True))
def url_dequery(url):
"""Return a URL with the query component removed.
:param url: URL to dequery.
:type url: str
:rtype: str
"""
url = urlparse(url)
return urlunparse((url.scheme,
url.netloc,
url.path,
url.params,
'',
url.fragment))
def build_url(base, additional_params=None):
"""Construct a URL based off of base containing all parameters in
the query portion of base plus any additional parameters.
:param base: Base URL
:type base: str
::param additional_params: Additional query parameters to include.
:type additional_params: dict
:rtype: str
"""
url = urlparse(base)
query_params = {}
query_params.update(parse_qsl(url.query, True))
if additional_params is not None:
query_params.update(additional_params)
for k, v in additional_params.items():
if v is None:
query_params.pop(k)
return urlunparse((url.scheme,
url.netloc,
url.path,
url.params,
urlencode(query_params),
url.fragment))
| VulcanTechnologies/oauth2lib | oauth2lib/utils.py | Python | mit | 2,411 |
from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import CharStateDatasAI
class DistributedGoofySpeedwayAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedwayAI')
def __init__(self, air):
DistributedCCharBaseAI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Goofy)
self.fsm = ClassicFSM.ClassicFSM('DistributedGoofySpeedwayAI', [State.State('Off', self.enterOff, self.exitOff, ['Lonely', 'TransitionToCostume', 'Walk']),
State.State('Lonely', self.enterLonely, self.exitLonely, ['Chatty', 'Walk', 'TransitionToCostume']),
State.State('Chatty', self.enterChatty, self.exitChatty, ['Lonely', 'Walk', 'TransitionToCostume']),
State.State('Walk', self.enterWalk, self.exitWalk, ['Lonely', 'Chatty', 'TransitionToCostume']),
State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def delete(self):
self.fsm.requestFinalState()
DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
self.lonelyDoneEvent = None
self.lonely = None
self.chattyDoneEvent = None
self.chatty = None
self.walkDoneEvent = None
self.walk = None
return
def generate(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
name = self.getName()
self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
self.chattyDoneEvent = self.taskName(name + '-chatty-done')
self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
else:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
return
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
def start(self):
self.fsm.request('Lonely')
def __decideNextState(self, doneStatus):
if self.transitionToCostume == 1:
curWalkNode = self.walk.getDestNode()
if simbase.air.holidayManager:
if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
else:
self.notify.warning('transitionToCostume == 1 but no costume holiday')
else:
self.notify.warning('transitionToCostume == 1 but no holiday Manager')
if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
if len(self.nearbyAvatars) > 0:
self.fsm.request('Chatty')
else:
self.fsm.request('Lonely')
def enterOff(self):
pass
def exitOff(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)
def enterLonely(self):
self.lonely.enter()
self.acceptOnce(self.lonelyDoneEvent, self.__decideNextState)
def exitLonely(self):
self.ignore(self.lonelyDoneEvent)
self.lonely.exit()
def __goForAWalk(self, task):
self.notify.debug('going for a walk')
self.fsm.request('Walk')
return Task.done
def enterChatty(self):
self.chatty.enter()
self.acceptOnce(self.chattyDoneEvent, self.__decideNextState)
def exitChatty(self):
self.ignore(self.chattyDoneEvent)
self.chatty.exit()
def enterWalk(self):
self.notify.debug('going for a walk')
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def avatarEnterNextState(self):
if len(self.nearbyAvatars) == 1:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Chatty')
else:
self.notify.debug('avatarEnterNextState: in walk state')
else:
self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))
def avatarExitNextState(self):
if len(self.nearbyAvatars) == 0:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Lonely')
def handleHolidays(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
if hasattr(simbase.air, 'holidayManager'):
if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays:
if simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
self.diffPath = TTLocalizer.Donald
return
def getCCLocation(self):
if self.diffPath == None:
return 1
else:
return 0
return
def enterTransitionToCostume(self):
pass
def exitTransitionToCostume(self):
pass
| ksmit799/Toontown-Source | toontown/classicchars/DistributedGoofySpeedwayAI.py | Python | mit | 6,450 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from my_settings import name_file, test_mode, difference_days
from datetime import datetime, timedelta
print "Run spider NewenglandFilm"
file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
current_date = datetime.today().strftime('%m/%d/%Y')
class NewenglandFilm(Spider):
name = 'newenglandfilm'
allowed_domains = ["newenglandfilm.com"]
start_urls = ["http://newenglandfilm.com/jobs.htm"]
def parse(self, response):
sel = Selector(response)
for num_div in xrange(1, 31):
date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})')
if current_date == date:
for address in email:
if address + "\n" not in email_in_file and address not in email_current_session:
file_output.write(address + "\n")
email_current_session.append(address)
print "Spider: NewenglandFilm. Email {0} added to file".format(address)
else:
print "Spider: NewenglandFilm. Email {0} already in the file".format(address) | dcondrey/scrapy-spiders | dist/spiders/newenglandfilm.py | Python | mit | 1,625 |
import base64
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
from django import http, template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy, ugettext as _
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'
def _display_login_form(request, error_message=''):
request.session.set_test_cookie()
return render_to_response('admin/login.html', {
'title': _('Log in'),
'app_path': request.get_full_path(),
'error_message': error_message
}, context_instance=template.RequestContext(request))
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_authenticated() and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
# If this isn't already the login page, display it.
if LOGIN_FORM_KEY not in request.POST:
if request.POST:
message = _("Please log in again, because your session has expired.")
else:
message = ""
return _display_login_form(request, message)
# Check that the user accepts cookies.
if not request.session.test_cookie_worked():
message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
return _display_login_form(request, message)
else:
request.session.delete_test_cookie()
# Check the password.
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
message = ERROR_MESSAGE
if '@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
users = list(User.all().filter('email =', username))
if len(users) == 1 and users[0].check_password(password):
message = _("Your e-mail address is not your username. Try '%s' instead.") % users[0].username
else:
# Either we cannot find the user, or if more than 1
# we cannot guess which user is the correct one.
message = _("Usernames cannot contain the '@' character.")
return _display_login_form(request, message)
# The user data is correct; log in the user in and continue.
else:
if user.is_active and user.is_staff:
login(request, user)
return http.HttpResponseRedirect(request.get_full_path())
else:
return _display_login_form(request, ERROR_MESSAGE)
return wraps(view_func)(_checklogin)
| isaac-philip/loolu | common/django/contrib/admin/views/decorators.py | Python | mit | 3,535 |
#guimporter.py
import sys
from PySide import QtGui, QtCore, QtWebKit
Signal = QtCore.Signal | lazunin/stclient | guimporter.py | Python | mit | 92 |
# -*- coding: utf-8 -*-
# @Author: karthik
# @Date: 2016-12-10 21:40:07
# @Last Modified by: chandan
# @Last Modified time: 2016-12-11 12:55:27
from models.portfolio import Portfolio
from models.company import Company
from models.position import Position
import tenjin
from tenjin.helpers import *
import wikipedia
import matplotlib.pyplot as plt
from data_helpers import *
from stock_data import *
import BeautifulSoup as bs
import urllib2
import re
from datetime import date as dt
engine = tenjin.Engine(path=['templates'])
# info fetch handler
def send_info_handler(bot, update, args):
args = list(parse_args(args))
if len(args) == 0 or "portfolio" in [arg.lower() for arg in args] :
send_portfolio_info(bot, update)
else:
info_companies = get_companies(args)
send_companies_info(bot, update, info_companies)
# get portfolio function
def send_portfolio_info(bot, update):
print "Userid: %d requested portfolio information" %(update.message.chat_id)
context = {
'positions': Portfolio.instance.positions,
'wallet_value': Portfolio.instance.wallet_value,
}
html_str = engine.render('portfolio_info.pyhtml', context)
bot.sendMessage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)
# get companies information
def send_companies_info(bot, update, companies):
print "Userid: requested information for following companies %s" %','.join([c.name for c in companies])
for company in companies:
context = {
'company': company,
'current_price': get_current_price(company),
'description': wikipedia.summary(company.name.split()[0], sentences=2)
}
wiki_page = wikipedia.page(company.name.split()[0])
html_page = urllib2.urlopen(wiki_page.url)
soup = bs.BeautifulSoup(html_page)
img_url = 'http:' + soup.find('td', { "class" : "logo" }).find('img')['src']
bot.sendPhoto(chat_id=update.message.chat_id, photo=img_url)
html_str = engine.render('company_template.pyhtml', context)
bot.sendMessage(parse_mode="HTML", chat_id=update.message.chat_id, text=html_str)
symbols = [c.symbol for c in companies]
if len(symbols) >= 2:
symbol_string = ", ".join(symbols[:-1]) + " and " + symbols[-1]
else:
symbol_string = symbols[0]
last_n_days = 10
if len(companies) < 4:
create_graph(companies, last_n_days)
history_text = '''
Here's the price history for {} for the last {} days
'''.format(symbol_string, last_n_days)
bot.sendMessage(chat_id=update.message.chat_id, text=history_text)
bot.sendPhoto(chat_id=update.message.chat_id, photo=open("plots/temp.png",'rb'))
def create_graph(companies, timedel):
fig, ax = plt.subplots()
for company in companies:
dates, lookback_prices = get_lookback_prices(company, timedel)
# dates = [i.strftime('%d/%m') for i in dates]
h = ax.plot(dates, lookback_prices, label=company.symbol)
ax.legend()
plt.xticks(rotation=45)
plt.savefig('plots/temp.png')
| coders-creed/botathon | src/info/fetch_info.py | Python | mit | 2,889 |
#!/usr/bin/python
from typing import List, Optional
"""
16. 3Sum Closest
https://leetcode.com/problems/3sum-closest/
"""
def bsearch(nums, left, right, res, i, j, target):
while left <= right:
middle = (left + right) // 2
candidate = nums[i] + nums[j] + nums[middle]
if res is None or abs(candidate - target) < abs(res - target):
res = candidate
if candidate == target:
return res
elif candidate > target:
right = middle - 1
else:
left = middle + 1
return res
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> Optional[int]:
res = None
nums = sorted(nums)
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
res = bsearch(nums, j + 1, len(nums) - 1, res, i, j, target)
return res
def main():
sol = Solution()
print(sol.threeSumClosest([-111, -111, 3, 6, 7, 16, 17, 18, 19], 13))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| pisskidney/leetcode | medium/16.py | Python | mit | 1,070 |