repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
abhinavmoudgil95/root | interpreter/llvm/src/utils/abtest/abtest.py | 30 | 8132 | #!/usr/bin/env python
#
# Given a previous good compile narrow down miscompiles.
# Expects two directories named "before" and "after" each containing a set of
# assembly or object files where the "after" version is assumed to be broken.
# You also have to provide a script called "link_test". It is called with a list
# of files which should be linked together and result tested. "link_test" should
# returns with exitcode 0 if the linking and testing succeeded.
#
# abtest.py operates by taking all files from the "before" directory and
# in each step replacing one of them with a file from the "bad" directory.
#
# Additionally you can perform the same steps with a single .s file. In this
# mode functions are identified by "# -- Begin FunctionName" and
# "# -- End FunctionName" markers. The abtest.py then takes all functions from
# the file in the "before" directory and replaces one function with the
# corresponding function from the "bad" file in each step.
#
# Example usage to identify miscompiled files:
# 1. Create a link_test script, make it executable. Simple Example:
# clang "$@" -o /tmp/test && /tmp/test || echo "PROBLEM"
# 2. Run the script to figure out which files are miscompiled:
# > ./abtest.py
# somefile.s: ok
# someotherfile.s: skipped: same content
# anotherfile.s: failed: './link_test' exitcode != 0
# ...
# Example usage to identify miscompiled functions inside a file:
# 3. First you have to mark begin and end of the functions.
# The script comes with some examples called mark_xxx.py.
# Unfortunately this is very specific to your environment and it is likely
# that you have to write a custom version for your environment.
# > for i in before/*.s after/*.s; do mark_xxx.py $i; done
# 4. Run the tests on a single file (assuming before/file.s and
# after/file.s exist)
# > ./abtest.py file.s
# funcname1 [0/XX]: ok
# funcname2 [1/XX]: ok
# funcname3 [2/XX]: skipped: same content
# funcname4 [3/XX]: failed: './link_test' exitcode != 0
# ...
from fnmatch import filter
from sys import stderr
import argparse
import filecmp
import os
import subprocess
import sys
LINKTEST="./link_test"
ESCAPE="\033[%sm"
BOLD=ESCAPE % "1"
RED=ESCAPE % "31"
NORMAL=ESCAPE % "0"
FAILED=RED+"failed"+NORMAL
def find(dir, file_filter=None):
files = [walkdir[0]+"/"+file for walkdir in os.walk(dir) for file in walkdir[2]]
if file_filter != None:
files = filter(files, file_filter)
return files
def error(message):
stderr.write("Error: %s\n" % (message,))
def warn(message):
stderr.write("Warning: %s\n" % (message,))
def extract_functions(file):
functions = []
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
text = line
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
else:
text += line
functions.append( (in_function, text) )
in_function = None
elif in_function != None:
text += line
return functions
def replace_function(file, function, replacement, dest):
out = open(dest, "w")
skip = False
found = False
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
if in_function == function:
out.write(replacement)
skip = True
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
in_function = None
if skip:
skip = False
continue
if not skip:
out.write(line)
def announce_test(name):
stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
stderr.flush()
def announce_result(result, info):
stderr.write(result)
if info != "":
stderr.write(": %s" % info)
stderr.write("\n")
stderr.flush()
def testrun(files):
linkline="%s %s" % (LINKTEST, " ".join(files),)
res = subprocess.call(linkline, shell=True)
if res != 0:
announce_result(FAILED, "'%s' exitcode != 0" % LINKTEST)
return False
else:
announce_result("ok", "")
return True
def check_files():
"""Check files mode"""
for i in range(0, len(NO_PREFIX)):
f = NO_PREFIX[i]
b=baddir+"/"+f
if b not in BAD_FILES:
warn("There is no corresponding file to '%s' in %s" \
% (gooddir+"/"+f, baddir))
continue
announce_test(f + " [%s/%s]" % (i+1, len(NO_PREFIX)))
# combine files (everything from good except f)
testfiles=[]
skip=False
for c in NO_PREFIX:
badfile = baddir+"/"+c
goodfile = gooddir+"/"+c
if c == f:
testfiles.append(badfile)
if filecmp.cmp(goodfile, badfile):
announce_result("skipped", "same content")
skip = True
break
else:
testfiles.append(goodfile)
if skip:
continue
testrun(testfiles)
def check_functions_in_file(base, goodfile, badfile):
functions = extract_functions(goodfile)
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (goodfile,))
return
badfunctions = dict(extract_functions(badfile))
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (badfile,))
return
COMBINED="/tmp/combined.s"
i = 0
for (func,func_text) in functions:
announce_test(func + " [%s/%s]" % (i+1, len(functions)))
i+=1
if func not in badfunctions:
warn("Function '%s' missing from bad file" % func)
continue
if badfunctions[func] == func_text:
announce_result("skipped", "same content")
continue
replace_function(goodfile, func, badfunctions[func], COMBINED)
testfiles=[]
for c in NO_PREFIX:
if c == base:
testfiles.append(COMBINED)
continue
testfiles.append(gooddir + "/" + c)
testrun(testfiles)
parser = argparse.ArgumentParser()
parser.add_argument('--a', dest='dir_a', default='before')
parser.add_argument('--b', dest='dir_b', default='after')
parser.add_argument('--insane', help='Skip sanity check', action='store_true')
parser.add_argument('file', metavar='file', nargs='?')
config = parser.parse_args()
gooddir=config.dir_a
baddir=config.dir_b
BAD_FILES=find(baddir, "*")
GOOD_FILES=find(gooddir, "*")
NO_PREFIX=sorted([x[len(gooddir)+1:] for x in GOOD_FILES])
# "Checking whether build environment is sane ..."
if not config.insane:
announce_test("sanity check")
if not os.access(LINKTEST, os.X_OK):
error("Expect '%s' to be present and executable" % (LINKTEST,))
exit(1)
res = testrun(GOOD_FILES)
if not res:
# "build environment is grinning and holding a spatula. Guess not."
linkline="%s %s" % (LINKTEST, " ".join(GOOD_FILES),)
stderr.write("\n%s\n\n" % linkline)
stderr.write("Returned with exitcode != 0\n")
sys.exit(1)
if config.file is not None:
# File exchange mode
goodfile = gooddir+"/"+config.file
badfile = baddir+"/"+config.file
check_functions_in_file(config.file, goodfile, badfile)
else:
# Function exchange mode
check_files()
| lgpl-2.1 | -7,577,866,328,425,622,000 | 2,073,058,768,537,670,700 | 33.752137 | 85 | 0.595057 | false |
encukou/pytest-sourceorder | setup.py | 1 | 1256 | #!/usr/bin/python2
#
# Copyright (C) 2014 pytest-sourceorder contributors. See COPYING for license
#
from setuptools import setup
import io
with io.open('README.rst', 'rt', encoding='utf-8') as f:
readme_contents = f.read()
setup_args = dict(
name = "pytest-sourceorder",
version = "0.5.1",
description = "Test-ordering plugin for pytest",
long_description = readme_contents,
url = "https://pagure.io/python-pytest-sourceorder",
license = "GPL",
author = "Petr Viktorin",
author_email = "pviktori@redhat.com",
py_modules = ["pytest_sourceorder"],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Framework :: Pytest',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Quality Assurance',
],
install_requires=['pytest'],
entry_points = {
'pytest11': [
'sourceorder = pytest_sourceorder',
],
},
)
if __name__ == '__main__':
setup(**setup_args)
| gpl-3.0 | 1,743,565,782,433,614,600 | -1,493,649,654,296,137,700 | 28.904762 | 85 | 0.601911 | false |
UXJera/JeremiahNyman.com | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| cc0-1.0 | -2,120,093,474,816,096,000 | -930,476,614,394,377,300 | 30.103448 | 72 | 0.582594 | false |
maelnor/nova | nova/compute/vm_states.py | 99 | 2089 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible vm states for instances.
Compute instance vm states represent the state of an instance as it pertains to
a user or administrator.
vm_state describes a VM's current stable (not transition) state. That is, if
there is no ongoing compute API calls (running tasks), vm_state should reflect
what the customer expect the VM to be. When combined with task states
(task_states.py), a better picture can be formed regarding the instance's
health and progress.
See http://wiki.openstack.org/VMState
"""
ACTIVE = 'active' # VM is running
BUILDING = 'building' # VM only exists in DB
PAUSED = 'paused'
SUSPENDED = 'suspended' # VM is suspended to disk.
STOPPED = 'stopped' # VM is powered off, the disk image is still there.
RESCUED = 'rescued' # A rescue image is running with the original VM image
# attached.
RESIZED = 'resized' # a VM with the new size is active. The user is expected
# to manually confirm or revert.
SOFT_DELETED = 'soft-delete' # VM is marked as deleted but the disk images are
# still available to restore.
DELETED = 'deleted' # VM is permanently deleted.
ERROR = 'error'
SHELVED = 'shelved' # VM is powered off, resources still on hypervisor
SHELVED_OFFLOADED = 'shelved_offloaded' # VM and associated resources are
# not on hypervisor
ALLOW_SOFT_REBOOT = [ACTIVE] # states we can soft reboot from
ALLOW_HARD_REBOOT = ALLOW_SOFT_REBOOT + [STOPPED, PAUSED, SUSPENDED, ERROR]
# states we allow hard reboot from
| apache-2.0 | -5,060,930,392,442,492,000 | 4,849,999,415,345,178,000 | 39.173077 | 79 | 0.741982 | false |
purpleKarrot/fluorescence | gumps/spellbook.py | 12 | 4103 |
import ui
from data import Texture, TextureSource, rgba
import client
def create(args):
g = ui.GumpMenu("spellbook", 100, 100)
g.setFont("Devinne Swash", 16)
g.addPage(0)
g.addImage((0, 0), Texture(TextureSource.GUMPART, 2220))
# circle numbers
g.addPageButton((58, 175), Texture(TextureSource.GUMPART, 2225), 1)
g.addPageButton((92, 175), Texture(TextureSource.GUMPART, 2226), 1)
g.addPageButton((129, 175), Texture(TextureSource.GUMPART, 2227), 2)
g.addPageButton((164, 175), Texture(TextureSource.GUMPART, 2228), 2)
g.addPageButton((227, 175), Texture(TextureSource.GUMPART, 2229), 3)
g.addPageButton((260, 175), Texture(TextureSource.GUMPART, 2230), 3)
g.addPageButton((297, 175), Texture(TextureSource.GUMPART, 2231), 4)
g.addPageButton((332, 175), Texture(TextureSource.GUMPART, 2232), 4)
# circles is a list of dictionaries, each dictionary holding a list of spells and infos about the circle:
# circle["spells"]: list of spells
# ["id"]: numerical id
# ["name"]: name
# each spell is a tuple
# (id, name, wops, descriptionHeader, description, gumpid)
# id: numerical id
# name: name
# wops: words of power
# descriptionHeader: header text of the spell description
# description: spell description
# gumpid: spell icon id
circles = args["circles"]
page = 0
spellPage = 5
spellCount = 0
# circle overviews
for circle in circles:
if circle["id"] % 2 == 1:
# left page
page += 1
g.addPage(page)
# flip back
if page > 1:
g.addPageButton((50, 8), Texture(TextureSource.GUMPART, 2235), page - 1)
# flip forward
g.addPageButton((321, 8), Texture(TextureSource.GUMPART, 2236), page + 1)
addTitleLabel(g, (100, 14, 40, 15), "INDEX")
addTitleLabel(g, (55, 34, 150, 15), circle["name"])
spellX = 55
else:
# right page
addTitleLabel(g, (267, 14, 40, 15), "INDEX")
addTitleLabel(g, (223, 34, 150, 15), circle["name"])
spellX = 223
spellY = 50
for spell in circle["spells"]:
l = addTextLabel(g, (spellX, spellY, 150, 15), spell[1])
l.page = spellPage # flip gump to this page if clicked
spellCount += 1
if spellCount % 2 == 0:
spellPage += 1
spellY += 15
spellPage = 5
spellCount = 0
x = 0
for circle in circles:
for spell in circle["spells"]:
if spellCount % 2 == 0:
# add flip forward to previous page
if spellCount > 1:
g.addPageButton((321, 8), Texture(TextureSource.GUMPART, 2236), spellPage + 1)
g.addPage(spellPage)
# flip back
g.addPageButton((50, 8), Texture(TextureSource.GUMPART, 2235), spellPage - 1)
spellPage += 1
x = 0
# circle header for left page
addTitleLabel(g, (x + 85, 14, 100, 15), circle["name"])
else:
# circle header for right page
addTitleLabel(g, (x + 65, 14, 100, 15), circle["name"])
# spell name
addTitleLabel(g, (x + 105, 38, 90, 35), spell[1])
# description title
addTitleLabel(g, (x + 55, 95, 150, 15), spell[3])
# description
addTextLabel(g, (x + 55, 110, 150, 55), spell[4])
# spell icon
btCast = g.addPythonButton((x + 55, 40), Texture(TextureSource.GUMPART, spell[5]), castSpell)
btCast.store["spellid"] = spell[0]
spellCount += 1
x += 168
args["item"].setSpellbookGump(g)
def addTitleLabel(g, geom, text):
l = g.addLabel(geom, text)
l.rgba = rgba("#001052")
return l
def addTextLabel(g, geom, text):
l = g.addLabel(geom, text)
l.rgba = rgba("#5a4a31")
return l
def castSpell(button):
client.castSpell(button.store["spellid"])
| gpl-3.0 | -1,639,845,218,630,260,000 | -6,915,495,157,847,460,000 | 32.357724 | 109 | 0.562028 | false |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/_pydevd_bundle/pydevd_custom_frames.py | 2 | 4396 | from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
from _pydev_imps._pydev_saved_modules import thread, threading
import sys
from _pydev_bundle import pydev_log
DEBUG = False
class CustomFramesContainer:
# Actual Values initialized later on.
custom_frames_lock = None # : :type custom_frames_lock: threading.Lock
custom_frames = None
_next_frame_id = None
_py_db_command_thread_event = None
def custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = ForkSafeLock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
# Initialize it the first time (it may be reinitialized later on when dealing with a fork).
custom_frames_container_init()
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def add_custom_frame(frame, name, thread_id):
'''
It's possible to show paused frames by adding a custom frame through this API (it's
intended to be used for coroutines, but could potentially be used for generators too).
:param frame:
The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused.
:param name:
The name to be shown for the custom thread in the UI.
:param thread_id:
The thread id to which this frame is related (must match thread.ident).
:return: str
Returns the custom thread id which will be used to show the given frame paused.
'''
with CustomFramesContainer.custom_frames_lock:
curr_thread_id = get_current_thread_id(threading.currentThread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_custom_thread_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % (
frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_custom_thread_id
def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id)
try:
old = CustomFramesContainer.custom_frames[frame_custom_thread_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,))
pydev_log.exception()
CustomFramesContainer._py_db_command_thread_event.set()
def remove_custom_frame(frame_custom_thread_id):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('remove_custom_frame: %s\n' % frame_custom_thread_id)
CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
| epl-1.0 | 7,565,379,700,010,785,000 | -3,008,602,150,468,749,300 | 36.896552 | 133 | 0.677889 | false |
xiaoyexu/xCRM | setup.py | 1 | 19311 | # -*- coding: UTF-8 -*-
import os, django
os.environ['DJANGO_SETTINGS_MODULE'] = 'crm_site.settings'
django.setup()
from crm.models import *
from crm.common import *
# Demo initialize
# UserRoleType
userRoleTypes = [
['SALES_ROLE', u"销售"],
['DEV_ROLE', u"CRM开发"]
]
for userRoleType in userRoleTypes:
p = {}
p['key'] = userRoleType[0]
p['description'] = userRoleType[1]
UserRoleType.objects.update_or_create(**p)
# ViewType
viewTypes = [
['Search', u"Search View"],
['Result', u"Result View"],
['Detail', u"Detail View"]
]
for viewType in viewTypes:
p = {}
p['key'] = viewType[0]
p['description'] = viewType[1]
ViewType.objects.update_or_create(**p)
# SiteLanguage
siteLanguages = [
['en', u"English"],
['cn', u"中文"]
]
for siteLanguage in siteLanguages:
p = {}
p['key'] = siteLanguage[0]
p['description'] = siteLanguage[1]
SiteLanguage.objects.update_or_create(**p)
# SiteMenuItem
siteMenuItems = [
['SALES_ROLE', None, 'home', 'menu', 'home', 10, True],
['SALES_ROLE', None, 'comSearch', 'menu', 'commonOrder', 20, True]
]
for siteMenuItem in siteMenuItems:
p = {}
p['role'] = UserRoleType.objects.get(pk=siteMenuItem[0])
p['parentMenuId'] = siteMenuItem[1]
p['phraseId'] = siteMenuItem[2]
p['appId'] = siteMenuItem[3]
p['pageApp'] = siteMenuItem[4]
p['sortOrder'] = siteMenuItem[5]
p['valid'] = siteMenuItem[6]
SiteMenuItem.objects.update_or_create(**p)
# SiteMenuItem
siteAppTypes = [
['order', u"单据"],
['message', u"消息"],
['menu', u"菜单"],
['g_default', u"全局默认"],
['feedback', u"反馈"],
['calendar', u"日程"],
['bp', u"商业伙伴"]
]
for siteAppType in siteAppTypes:
p = {}
p['appId'] = siteAppType[0]
p['description'] = siteAppType[1]
SiteAppType.objects.update_or_create(**p)
userProfileTypes = [
['P_SALE_BASIC', 'Sales basic profile']
]
for userProfileType in userProfileTypes:
p = {}
p['key'] = userProfileType[0]
p['description'] = userProfileType[1]
UserProfileType.objects.update_or_create(**p)
authObjectTypes = [
['Order_SA01_Access', 'For order type SA01'],
['Order_AC01_Access', 'For order type AC01'],
['BP_OR_Access', 'For organization access'],
['BP_IN_Access', 'For individual account access'],
['BP_CO_Access', 'For corporation accounts access']
]
for authObjectType in authObjectTypes:
p = {}
p['key'] = authObjectType[0]
p['description'] = authObjectType[1]
AuthObjectType.objects.update_or_create(**p)
orderBaseTypes = [
['Order', 'Order'],
['Activity', 'Activity']
]
for orderBaseType in orderBaseTypes:
p = {}
p['key'] = orderBaseType[0]
p['description'] = orderBaseType[1]
OrderBaseType.objects.update_or_create(**p)
orderTypes = [
['SA01', 'Order', u'销售线索', 'attachment,changelog'],
['AC01', 'Activity', u'日程活动', '']
]
for orderType in orderTypes:
p = {}
p['key'] = orderType[0]
p['baseType'] = OrderBaseType.objects.get(pk=orderType[1])
p['description'] = orderType[2]
p['assignmentBlock'] = orderType[3]
OrderType.objects.update_or_create(**p)
orderBEDefs = [
['SA01', 'SaleOrderBE'],
['AC01', 'ActivityBE']
]
for orderBEDef in orderBEDefs:
p = {}
p['orderType'] = OrderType.objects.get(pk=orderBEDef[0])
p['businessEntity'] = orderBEDef[1]
OrderBEDef.objects.update_or_create(**p)
addressTypes = [
['ST', u'默认地址']
]
for addressType in addressTypes:
p = {}
p['key'] = addressType[0]
p['description'] = addressType[1]
AddressType.objects.update_or_create(**p)
bPTypes = [
['ZZ', None, u'本公司', None],
['OR', None, u'部门组织', None],
['IN', None, u'个人账户', None],
['CO', None, u'公司账户', None]
]
for bPType in bPTypes:
p = {}
p['key'] = bPType[0]
p['baseType'] = None
p['description'] = bPType[2]
p['assignmentBlock'] = None
BPType.objects.update_or_create(**p)
userLoginStatuses = [
['LOCK', u'已锁'],
['CLOSED', u'关闭'],
['ACTIVE', u'正常']
]
for userLoginStatus in userLoginStatuses:
p = {}
p['key'] = userLoginStatus[0]
p['description'] = userLoginStatus[1]
UserLoginStatus.objects.update_or_create(**p)
# tester user
firstName = 'tester'
lastName = 'tester'
bp = BP.objects.filter(firstName=firstName, lastName=lastName)
if bp:
bp = bp[0]
else:
bp = BP()
bp.type = BPType.objects.get(pk='IN')
bp.firstName = firstName
bp.lastName = lastName
bp.save()
userLogin = UserLogin.objects.filter(userbp=bp)
if userLogin:
userLogin = userLogin[0]
else:
userLogin = UserLogin()
user = User()
user.nickName = '%s %s' % (lastName, firstName)
user.realName = '%s %s' % (lastName, firstName)
user.save()
userLogin.user = user
userLogin.userbp = bp
userLogin.username = "%s.%s" % (firstName, lastName)
userLogin.password = '111111'
userLogin.passwordEncrypted = False
userLogin.status = UserLoginStatus.objects.get(pk='ACTIVE')
userLogin.save()
userRole = UserRole()
userRole.userlogin = userLogin
userRole.role = UserRoleType.objects.get(pk='SALES_ROLE')
userRole.save()
# Phrases
phrases = [
['nickName', 'g_default', 'cn', u'昵称'],
['nickName', 'g_default', 'en', u'Nickname'],
['oldPassword', 'g_default', 'cn', u'旧密码'],
['oldPassword', 'g_default', 'en', u'Old password'],
['enterOldPwd', 'g_default', 'cn', u'输入旧密码'],
['enterOldPwd', 'g_default', 'en', u'Enter old password'],
['newPassword', 'g_default', 'cn', u'新密码'],
['newPassword', 'g_default', 'en', u'New password'],
['enterNewPwd', 'g_default', 'cn', u'输入密码'],
['enterNewPwd', 'g_default', 'en', u'Enter new password'],
['reNewPassword', 'g_default', 'cn', u'确认新密码'],
['reNewPassword', 'g_default', 'en', u'Confirm new password'],
['eNewPwdAgain', 'g_default', 'cn', u'再次输入新密码'],
['eNewPwdAgain', 'g_default', 'en', u'Enter new password again'],
['calEventColor', 'g_default', 'cn', u'日程项目颜色'],
['calEventColor', 'g_default', 'en', u'Calendar item color'],
['public', 'order', 'cn', u'公开'],
['public', 'order', 'en', u'Public'],
['content', 'order', 'cn', u'内容'],
['content', 'order', 'en', u'Detail'],
['title', 'order', 'cn', u'标题'],
['title', 'order', 'en', u'Title'],
['orderChart', 'order', 'cn', u'概览'],
['orderChart', 'order', 'en', u'Overview'],
['orderList', 'order', 'cn', u'列表'],
['orderList', 'order', 'en', u'List'],
['newText', 'bp', 'cn', u'新增备注'],
['newText', 'bp', 'en', u'New Text'],
['allText', 'bp', 'cn', u'全部备注'],
['allText', 'bp', 'en', u'All Text'],
['customerType', 'order', 'cn', u'账户类型'],
['customerType', 'order', 'en', u'Type'],
['settleType', 'order', 'cn', u'结算类型'],
['settleType', 'order', 'en', u'Settlement'],
['orderSaved', 'g_default', 'cn', u'数据已保存'],
['orderSaved', 'g_default', 'en', u'Order saved'],
['field', 'g_default', 'cn', u'字段'],
['field', 'g_default', 'en', u'Field'],
['oldValue', 'g_default', 'cn', u'旧值'],
['oldValue', 'g_default', 'en', u'Old Value'],
['newValue', 'g_default', 'cn', u'新值'],
['newValue', 'g_default', 'en', u'New Value'],
['updatedBy', 'g_default', 'cn', u'更新者'],
['updatedBy', 'g_default', 'en', u'Updated By'],
['updatedAt', 'g_default', 'cn', u'更新时间'],
['updatedAt', 'g_default', 'en', u'Updated At'],
['UATP', 'order', 'cn', u'UATP票'],
['UATP', 'order', 'en', u'UATP Tickets'],
['NONUATP', 'order', 'cn', u'非UATP票'],
['NONUATP', 'order', 'en', u'Non-UATP Tickets'],
['calendar', 'calendar', 'cn', u'日程'],
['calendar', 'calendar', 'en', u'Calendar'],
['detail', 'calendar', 'cn', u'详情'],
['detail', 'calendar', 'en', u'Activity'],
['startDateTime', 'order', 'cn', u'开始时间'],
['startDateTime', 'order', 'en', u'Start At'],
['endDateTime', 'order', 'cn', u'结束时间'],
['endDateTime', 'order', 'en', u'End At'],
['visibility', 'order', 'cn', u'可见性'],
['visibility', 'order', 'en', u'Visible'],
['upload', 'g_default', 'cn', u'上传'],
['upload', 'g_default', 'en', u'Upload'],
['addNewFile', 'g_default', 'cn', u'新增文件'],
['addNewFile', 'g_default', 'en', u'Add new file'],
['caution', 'g_default', 'cn', u'注意'],
['caution', 'g_default', 'en', u'Caution'],
['delFilePrompt', 'order', 'cn', u'确认要删除该文件吗?'],
['delFilePrompt', 'order', 'en', u'Are you sure to delete the file?'],
['thisYear', 'g_default', 'cn', u'本年'],
['thisYear', 'g_default', 'en', u'This year'],
['thisSeason', 'g_default', 'cn', u'本季度'],
['thisSeason', 'g_default', 'en', u'This season'],
['thisMonth', 'g_default', 'cn', u'本月'],
['thisMonth', 'g_default', 'en', u'This month'],
['thisWeek', 'g_default', 'cn', u'本周'],
['thisWeek', 'g_default', 'en', u'This week'],
['cardNumber', 'g_default', 'cn', u'卡号'],
['cardNumber', 'g_default', 'en', u'Card Number'],
['sales', 'g_default', 'cn', u'销售'],
['sales', 'g_default', 'en', u'Salesman'],
['corporate', 'g_default', 'cn', u'企业'],
['corporate', 'g_default', 'en', u'Corporation'],
['detail', 'g_default', 'cn', u'详情'],
['detail', 'g_default', 'en', u'Detail'],
['attachment', 'g_default', 'cn', u'附件'],
['attachment', 'g_default', 'en', u'Attachment'],
['change', 'g_default', 'cn', u'修改历史'],
['change', 'g_default', 'en', u'Change log'],
['custDate', 'g_default', 'cn', u'自定义日期段'],
['custDate', 'g_default', 'en', u'Custimized Date Range'],
['salesAnalysis', 'g_default', 'cn', u'销售业务分析'],
['salesAnalysis', 'g_default', 'en', u'Analysis'],
['all', 'g_default', 'cn', u'全部'],
['all', 'g_default', 'en', u'All'],
['transactionTotal', 'g_default', 'cn', u'交易量'],
['transactionTotal', 'g_default', 'en', u'Total'],
['shanhangTotal', 'g_default', 'cn', u'山航交易量'],
['shanhangTotal', 'g_default', 'en', u'Shanghan Total'],
['err.e01', 'g_default', 'cn', u'用户名或密码错误'],
['err.e01', 'g_default', 'en', u'Wrong username or password'],
['err.e02', 'g_default', 'cn', u'登录失败过多,账户已锁,请联系管理员'],
['err.e02', 'g_default', 'en', u'Too many failures, please contact administrator'],
['copyright', 'g_default', 'cn', u'© 20XX-20XX 版权所有<br>'],
['copyright', 'g_default', 'en', u'© 20XX-20XX'],
['version', 'g_default', 'cn', u'版本'],
['version', 'g_default', 'en', u'Version'],
['customerList', 'g_default', 'cn', u'客户列表'],
['customerList', 'g_default', 'en', u'Customers'],
['customerDetail', 'g_default', 'cn', u'客户详情'],
['customerDetail', 'g_default', 'en', u'Customer Detail'],
['phone', 'g_default', 'cn', u'联系电话'],
['phone', 'g_default', 'en', u'Phone'],
['contactPerson', 'g_default', 'cn', u'联系人'],
['contactPerson', 'g_default', 'en', u'Contact'],
['legalPerson', 'g_default', 'cn', u'法人'],
['legalPerson', 'g_default', 'en', u'Legal Person'],
['actualPerson', 'g_default', 'cn', u'实际控制人'],
['actualPerson', 'g_default', 'en', u'Actual Person'],
['copStructure', 'g_default', 'cn', u'组织结构'],
['copStructure', 'g_default', 'en', u'Structure'],
['corpLiscense', 'g_default', 'cn', u'营业执照'],
['corpLiscense', 'g_default', 'en', u'Liscense'],
['showLiscense', 'g_default', 'cn', u'显示'],
['showLiscense', 'g_default', 'en', u'Show'],
['noLiscense', 'g_default', 'cn', u'未上传'],
['noLiscense', 'g_default', 'en', u'Not available'],
['reUpload', 'g_default', 'cn', u'重新上传'],
['reUpload', 'g_default', 'en', u'Reupload'],
['searchOrder', 'bp', 'cn', u'筛选条件'],
['searchOrder', 'bp', 'en', u'Search'],
['type', 'bp', 'cn', u'类型'],
['type', 'bp', 'en', u'Type'],
['name1', 'bp', 'cn', u'名称 1'],
['name1', 'bp', 'en', u'Name 1'],
['name2', 'bp', 'cn', u'名称 2'],
['name2', 'bp', 'en', u'Name 2'],
['commonBp', 'menu', 'cn', u'商业伙伴'],
['commonBp', 'menu', 'en', u'Business Partner'],
['createBp', 'g_default', 'cn', u'创建商业伙伴'],
['createBp', 'g_default', 'en', u'Create Business Partner'],
['createBpTxt', 'g_default', 'cn', u'选择商业伙伴类型'],
['createBpTxt', 'g_default', 'en', u'Select business partner type'],
['bpSaved', 'g_default', 'cn', u'商业伙伴已保存'],
['bpSaved', 'g_default', 'en', u'Business partner saved'],
['district', 'bp', 'cn', u'地区'],
['district', 'bp', 'en', u'District'],
['phone', 'bp', 'cn', u'联系电话'],
['phone', 'bp', 'en', u'Phone'],
['contact', 'bp', 'cn', u'联系人'],
['contact', 'bp', 'en', u'Contact'],
['legalPerson', 'bp', 'cn', u'法人'],
['legalPerson', 'bp', 'en', u'Legal Person'],
['actualPerson', 'bp', 'cn', u'实际控制人'],
['actualPerson', 'bp', 'en', u'Actual Person'],
['corpStructure', 'bp', 'cn', u'组织结构'],
['corpStructure', 'bp', 'en', u'Corporation type'],
['partnerNo', 'bp', 'cn', u'编号'],
['partnerNo', 'bp', 'en', u'Partner No'],
['id', 'bp', 'cn', u'编号'],
['id', 'bp', 'en', u'ID'],
['corpLiscense', 'bp', 'cn', u'营业执照'],
['corpLiscense', 'bp', 'en', u'Liscense'],
['noImage', 'g_default', 'cn', u'未上传图片'],
['noImage', 'g_default', 'en', u'No image'],
['noFile', 'g_default', 'cn', u'未上传文件'],
['noFile', 'g_default', 'en', u'No file'],
['err.noRole', 'g_default', 'cn', u"""您还未被指派角色,请联系管理员"""],
['err.noRole', 'g_default', 'en', u"""You don't have role assigned yet, please contact administrator"""],
['file1', 'order', 'cn', u'文件1'],
['file1', 'order', 'en', u'File 1'],
['imgFile1', 'order', 'cn', u'图片1'],
['imgFile1', 'order', 'en', u'Image 1'],
['file2', 'order', 'cn', u'文件2'],
['file2', 'order', 'en', u'File 2'],
['imgFile2', 'order', 'cn', u'图片2'],
['imgFile2', 'order', 'en', u'Image 2'],
['file1', 'bp', 'cn', u'文件1'],
['file1', 'bp', 'en', u'File 1'],
['imgFile1', 'bp', 'cn', u'图片1'],
['imgFile1', 'bp', 'en', u'Image 1'],
['file2', 'bp', 'cn', u'文件2'],
['file2', 'bp', 'en', u'File 2'],
['imgFile2', 'bp', 'cn', u'图片2'],
['imgFile2', 'bp', 'en', u'Image 2'],
['feedback', 'g_default', 'cn', u'我有意见'],
['feedback', 'g_default', 'en', u'Feedback'],
['feedback', 'menu', 'cn', u'我有意见'],
['feedback', 'menu', 'en', u'Feedback'],
['message', 'menu', 'cn', u'消息'],
['message', 'menu', 'en', u'Message'],
['sender', 'message', 'cn', u'发送者'],
['sender', 'message', 'en', u'Sender'],
['receiver', 'message', 'cn', u'接收者'],
['receiver', 'message', 'en', u'Receiver'],
['content', 'message', 'cn', u'内容'],
['content', 'message', 'en', u'Content'],
['sentAt', 'message', 'cn', u'时间'],
['sentAt', 'message', 'en', u'Sent At'],
['orderFollowUp', 'g_default', 'cn', u'相关单据'],
['orderFollowUp', 'g_default', 'en', u'Follow ups'],
['createFollowUp', 'g_default', 'cn', u'创建跟进'],
['createFollowUp', 'g_default', 'en', u'Follow up'],
['checkResult', 'order', 'cn', u'审核结果'],
['checkResult', 'order', 'en', u'Result'],
['err.inMaint', 'g_default', 'cn', u"""系统正在维护中,请稍后登录……"""],
['err.inMaint', 'g_default', 'en', u'Maintainence is ongoing, please wait...'],
['err.e03', 'g_default', 'cn', u"""该账号已被禁用"""],
['err.e03', 'g_default', 'en', u"""Account not valid"""],
['ajaxError', 'g_default', 'cn', u"""消息错误,请确保登录状态后重试"""],
['ajaxError', 'g_default', 'en', u"""Response error"""],
['title', 'feedback', 'cn', u"""标题"""],
['title', 'feedback', 'en', u"""Title"""],
['type', 'feedback', 'cn', u"""类型"""],
['type', 'feedback', 'en', u"""Type"""],
['text', 'feedback', 'cn', u"""详细意见"""],
['text', 'feedback', 'en', u"""Text"""],
['bug', 'feedback', 'cn', u"""Bug错误"""],
['bug', 'feedback', 'en', u"""Bug"""],
['suggestion', 'feedback', 'cn', u"""需求建议"""],
['suggestion', 'feedback', 'en', u"""New requirement"""],
['corpStruct', 'menu', 'cn', u"""公司结构"""],
['corpStruct', 'menu', 'en', u"""Sturcture"""],
['corpStruct', 'g_default', 'cn', u"""公司结构"""],
['corpStruct', 'g_default', 'en', u"""Sturcture"""],
['myWork', 'menu', 'cn', u'我的任务'],
['myWork', 'menu', 'en', u'My work'],
['checkOrder', 'order', 'cn', u'审核对象'],
['checkOrder', 'order', 'en', u'Check target'],
['reply', 'g_default', 'cn', u'回复'],
['reply', 'g_default', 'en', u'Reply'],
['text', 'message', 'cn', u'内容'],
['text', 'message', 'en', u'Content'],
['send', 'message', 'cn', u'发送'],
['send', 'message', 'en', u'Send'],
['sendMessage', 'message', 'cn', u'发送消息'],
['sendMessage', 'message', 'en', u'Post'],
['dataTableCommNoPro', 'g_default', 'cn', None, u"""
"sDom": "<'row'<'col-md-6'l><'col-md-6'f>r>t<'row'<'col-md-12'i><'col-md-12 center-block'p>>", "sPaginationType": "bootstrap", "oLanguage": { "sLengthMenu": "每页 _MENU_ 条记录", "oPaginate": { "sFirst": "首页", "sLast": "末页", "sNext": "下一页", "sPrevious": "上一页" }, "sEmptyTable": "无记录", "sInfo": "共 _TOTAL_ 条记录 (_START_ / _END_)", "sInfoEmpty": "无记录", "sSearch": "快速搜索 _INPUT_ ", "sZeroRecords": "无匹配记录", "sInfoFiltered": "从 _MAX_ 条记录中过滤","sProcessing": ""},
"""],
['dataTableCommNoPro', 'g_default', 'en', None, u"""
"sDom": "<'row'<'col-md-6'l><'col-md-6'f>r>t<'row'<'col-md-12'i><'col-md-12 center-block'p>>", "sPaginationType": "bootstrap", "oLanguage": { "sLengthMenu": "_MENU_ Per page", "oPaginate": { "sFirst": "First", "sLast": "Last", "sNext": "Next", "sPrevious": "Previous" }, "sEmptyTable": "No record", "sInfo": "Total _TOTAL_ record(s) (_START_ / _END_)", "sInfoEmpty": "No records", "sSearch": "Fast search _INPUT_ ", "sZeroRecords": "No record matches", "sInfoFiltered": "Filter from _MAX_ record(s)","sProcessing": ""},
"""
],
['dev', 'menu', 'cn', u'开发'],
['dev', 'menu', 'en', u'Develop'],
['add', 'g_default', 'cn', u'增加'],
['add', 'g_default', 'en', u'Add']
]
for phrase in phrases:
print phrase
p = {}
p['phraseId'] = phrase[0]
p['app'] = SiteAppType.objects.get(appId=phrase[1])
p['phraseLan'] = SiteLanguage.objects.get(key=phrase[2])
if phrase[3]:
p['content'] = phrase[3]
else:
p['content'] = ''
p['bigContent'] = phrase[4]
SitePhrase.objects.update_or_create(**p)
| gpl-3.0 | -8,143,524,253,391,488,000 | 7,368,724,334,174,296,000 | 37.755319 | 531 | 0.548394 | false |
cloudbau/glance | glance/openstack/common/context.py | 5 | 2664 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from glance.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 | 7,582,253,964,048,442,000 | 7,501,070,197,439,065,000 | 31.487805 | 79 | 0.646021 | false |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/test/test_faulthandler.py | 60 | 21327 | from contextlib import contextmanager
import datetime
import faulthandler
import os
import re
import signal
import subprocess
import sys
from test import support, script_helper
from test.script_helper import assert_python_ok
import tempfile
import unittest
from textwrap import dedent
try:
import threading
HAVE_THREADS = True
except ImportError:
HAVE_THREADS = False
TIMEOUT = 0.5
def expected_traceback(lineno1, lineno2, header, min_count=1):
regex = header
regex += ' File "<string>", line %s in func\n' % lineno1
regex += ' File "<string>", line %s in <module>' % lineno2
if 1 < min_count:
return '^' + (regex + '\n') * (min_count - 1) + regex
else:
return '^' + regex + '$'
@contextmanager
def temporary_filename():
filename = tempfile.mktemp()
try:
yield filename
finally:
support.unlink(filename)
class FaultHandlerTests(unittest.TestCase):
def get_output(self, code, filename=None):
"""
Run the specified code in Python (in a new child process) and read the
output from the standard error or from a file (if filename is set).
Return the output lines as a list.
Strip the reference count from the standard error for Python debug
build, and replace "Current thread 0x00007f8d8fbd9700" by "Current
thread XXX".
"""
code = dedent(code).strip()
with support.SuppressCrashReport():
process = script_helper.spawn_python('-c', code)
stdout, stderr = process.communicate()
exitcode = process.wait()
output = support.strip_python_stderr(stdout)
output = output.decode('ascii', 'backslashreplace')
if filename:
self.assertEqual(output, '')
with open(filename, "rb") as fp:
output = fp.read()
output = output.decode('ascii', 'backslashreplace')
output = re.sub('Current thread 0x[0-9a-f]+',
'Current thread XXX',
output)
return output.splitlines(), exitcode
def check_fatal_error(self, code, line_number, name_regex,
filename=None, all_threads=True, other_regex=None):
"""
Check that the fault handler for fatal errors is enabled and check the
traceback from the child process output.
Raise an error if the output doesn't match the expected format.
"""
if all_threads:
header = 'Current thread XXX (most recent call first)'
else:
header = 'Stack (most recent call first)'
regex = """
^Fatal Python error: {name}
{header}:
File "<string>", line {lineno} in <module>
"""
regex = dedent(regex.format(
lineno=line_number,
name=name_regex,
header=re.escape(header))).strip()
if other_regex:
regex += '|' + other_regex
output, exitcode = self.get_output(code, filename)
output = '\n'.join(output)
self.assertRegex(output, regex)
self.assertNotEqual(exitcode, 0)
@unittest.skipIf(sys.platform.startswith('aix'),
"the first page of memory is a mapped read-only on AIX")
def test_read_null(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._read_null()
""",
3,
# Issue #12700: Read NULL raises SIGILL on Mac OS X Lion
'(?:Segmentation fault|Bus error|Illegal instruction)')
def test_sigsegv(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigsegv()
""",
3,
'Segmentation fault')
def test_sigabrt(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigabrt()
""",
3,
'Aborted')
@unittest.skipIf(sys.platform == 'win32',
"SIGFPE cannot be caught on Windows")
def test_sigfpe(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigfpe()
""",
3,
'Floating point exception')
@unittest.skipIf(not hasattr(faulthandler, '_sigbus'),
"need faulthandler._sigbus()")
def test_sigbus(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigbus()
""",
3,
'Bus error')
@unittest.skipIf(not hasattr(faulthandler, '_sigill'),
"need faulthandler._sigill()")
def test_sigill(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._sigill()
""",
3,
'Illegal instruction')
def test_fatal_error(self):
self.check_fatal_error("""
import faulthandler
faulthandler._fatal_error(b'xyz')
""",
2,
'xyz')
@unittest.skipIf(sys.platform.startswith('openbsd') and HAVE_THREADS,
"Issue #12868: sigaltstack() doesn't work on "
"OpenBSD if Python is compiled with pthread")
@unittest.skipIf(not hasattr(faulthandler, '_stack_overflow'),
'need faulthandler._stack_overflow()')
def test_stack_overflow(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._stack_overflow()
""",
3,
'(?:Segmentation fault|Bus error)',
other_regex='unable to raise a stack overflow')
def test_gil_released(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable()
faulthandler._read_null(True)
""",
3,
'(?:Segmentation fault|Bus error|Illegal instruction)')
def test_enable_file(self):
with temporary_filename() as filename:
self.check_fatal_error("""
import faulthandler
output = open({filename}, 'wb')
faulthandler.enable(output)
faulthandler._sigsegv()
""".format(filename=repr(filename)),
4,
'Segmentation fault',
filename=filename)
def test_enable_single_thread(self):
self.check_fatal_error("""
import faulthandler
faulthandler.enable(all_threads=False)
faulthandler._sigsegv()
""",
3,
'Segmentation fault',
all_threads=False)
def test_disable(self):
code = """
import faulthandler
faulthandler.enable()
faulthandler.disable()
faulthandler._sigsegv()
"""
not_expected = 'Fatal Python error'
stderr, exitcode = self.get_output(code)
stder = '\n'.join(stderr)
self.assertTrue(not_expected not in stderr,
"%r is present in %r" % (not_expected, stderr))
self.assertNotEqual(exitcode, 0)
def test_is_enabled(self):
orig_stderr = sys.stderr
try:
# regrtest may replace sys.stderr by io.StringIO object, but
# faulthandler.enable() requires that sys.stderr has a fileno()
# method
sys.stderr = sys.__stderr__
was_enabled = faulthandler.is_enabled()
try:
faulthandler.enable()
self.assertTrue(faulthandler.is_enabled())
faulthandler.disable()
self.assertFalse(faulthandler.is_enabled())
finally:
if was_enabled:
faulthandler.enable()
else:
faulthandler.disable()
finally:
sys.stderr = orig_stderr
def test_disabled_by_default(self):
# By default, the module should be disabled
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, '-E', '-c', code)
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args)
self.assertEqual(output.rstrip(), b"False")
def test_sys_xoptions(self):
# Test python -X faulthandler
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, "-E", "-X", "faulthandler", "-c", code)
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args)
self.assertEqual(output.rstrip(), b"True")
def test_env_var(self):
# empty env var
code = "import faulthandler; print(faulthandler.is_enabled())"
args = (sys.executable, "-c", code)
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = ''
# don't use assert_python_ok() because it always enable faulthandler
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"False")
# non-empty env var
env = os.environ.copy()
env['PYTHONFAULTHANDLER'] = '1'
output = subprocess.check_output(args, env=env)
self.assertEqual(output.rstrip(), b"True")
def check_dump_traceback(self, filename):
"""
Explicitly call dump_traceback() function and check its output.
Raise an error if the output doesn't match the expected format.
"""
code = """
import faulthandler
def funcB():
if {has_filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=False)
else:
faulthandler.dump_traceback(all_threads=False)
def funcA():
funcB()
funcA()
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
)
if filename:
lineno = 6
else:
lineno = 8
expected = [
'Stack (most recent call first):',
' File "<string>", line %s in funcB' % lineno,
' File "<string>", line 11 in funcA',
' File "<string>", line 13 in <module>'
]
trace, exitcode = self.get_output(code, filename)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
def test_dump_traceback(self):
self.check_dump_traceback(None)
def test_dump_traceback_file(self):
with temporary_filename() as filename:
self.check_dump_traceback(filename)
def test_truncate(self):
maxlen = 500
func_name = 'x' * (maxlen + 50)
truncated = 'x' * maxlen + '...'
code = """
import faulthandler
def {func_name}():
faulthandler.dump_traceback(all_threads=False)
{func_name}()
"""
code = code.format(
func_name=func_name,
)
expected = [
'Stack (most recent call first):',
' File "<string>", line 4 in %s' % truncated,
' File "<string>", line 6 in <module>'
]
trace, exitcode = self.get_output(code)
self.assertEqual(trace, expected)
self.assertEqual(exitcode, 0)
@unittest.skipIf(not HAVE_THREADS, 'need threads')
def check_dump_traceback_threads(self, filename):
"""
Call explicitly dump_traceback(all_threads=True) and check the output.
Raise an error if the output doesn't match the expected format.
"""
code = """
import faulthandler
from threading import Thread, Event
import time
def dump():
if {filename}:
with open({filename}, "wb") as fp:
faulthandler.dump_traceback(fp, all_threads=True)
else:
faulthandler.dump_traceback(all_threads=True)
class Waiter(Thread):
# avoid blocking if the main thread raises an exception.
daemon = True
def __init__(self):
Thread.__init__(self)
self.running = Event()
self.stop = Event()
def run(self):
self.running.set()
self.stop.wait()
waiter = Waiter()
waiter.start()
waiter.running.wait()
dump()
waiter.stop.set()
waiter.join()
"""
code = code.format(filename=repr(filename))
output, exitcode = self.get_output(code, filename)
output = '\n'.join(output)
if filename:
lineno = 8
else:
lineno = 10
regex = """
^Thread 0x[0-9a-f]+ \(most recent call first\):
(?: File ".*threading.py", line [0-9]+ in [_a-z]+
){{1,3}} File "<string>", line 23 in run
File ".*threading.py", line [0-9]+ in _bootstrap_inner
File ".*threading.py", line [0-9]+ in _bootstrap
Current thread XXX \(most recent call first\):
File "<string>", line {lineno} in dump
File "<string>", line 28 in <module>$
"""
regex = dedent(regex.format(lineno=lineno)).strip()
self.assertRegex(output, regex)
self.assertEqual(exitcode, 0)
def test_dump_traceback_threads(self):
self.check_dump_traceback_threads(None)
def test_dump_traceback_threads_file(self):
with temporary_filename() as filename:
self.check_dump_traceback_threads(filename)
def _check_dump_traceback_later(self, repeat, cancel, filename, loops):
"""
Check how many times the traceback is written in timeout x 2.5 seconds,
or timeout x 3.5 seconds if cancel is True: 1, 2 or 3 times depending
on repeat and cancel options.
Raise an error if the output doesn't match the expect format.
"""
timeout_str = str(datetime.timedelta(seconds=TIMEOUT))
code = """
import faulthandler
import time
def func(timeout, repeat, cancel, file, loops):
for loop in range(loops):
faulthandler.dump_traceback_later(timeout, repeat=repeat, file=file)
if cancel:
faulthandler.cancel_dump_traceback_later()
time.sleep(timeout * 5)
faulthandler.cancel_dump_traceback_later()
timeout = {timeout}
repeat = {repeat}
cancel = {cancel}
loops = {loops}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
func(timeout, repeat, cancel, file, loops)
if file is not None:
file.close()
"""
code = code.format(
timeout=TIMEOUT,
repeat=repeat,
cancel=cancel,
loops=loops,
has_filename=bool(filename),
filename=repr(filename),
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not cancel:
count = loops
if repeat:
count *= 2
header = r'Timeout \(%s\)!\nThread 0x[0-9a-f]+ \(most recent call first\):\n' % timeout_str
regex = expected_traceback(9, 20, header, min_count=count)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
self.assertEqual(exitcode, 0)
@unittest.skipIf(not hasattr(faulthandler, 'dump_traceback_later'),
'need faulthandler.dump_traceback_later()')
def check_dump_traceback_later(self, repeat=False, cancel=False,
file=False, twice=False):
if twice:
loops = 2
else:
loops = 1
if file:
with temporary_filename() as filename:
self._check_dump_traceback_later(repeat, cancel,
filename, loops)
else:
self._check_dump_traceback_later(repeat, cancel, None, loops)
def test_dump_traceback_later(self):
self.check_dump_traceback_later()
def test_dump_traceback_later_repeat(self):
self.check_dump_traceback_later(repeat=True)
def test_dump_traceback_later_cancel(self):
self.check_dump_traceback_later(cancel=True)
def test_dump_traceback_later_file(self):
self.check_dump_traceback_later(file=True)
def test_dump_traceback_later_twice(self):
self.check_dump_traceback_later(twice=True)
@unittest.skipIf(not hasattr(faulthandler, "register"),
"need faulthandler.register")
def check_register(self, filename=False, all_threads=False,
unregister=False, chain=False):
"""
Register a handler displaying the traceback on a user signal. Raise the
signal and check the written traceback.
If chain is True, check that the previous signal handler is called.
Raise an error if the output doesn't match the expected format.
"""
signum = signal.SIGUSR1
code = """
import faulthandler
import os
import signal
import sys
def func(signum):
os.kill(os.getpid(), signum)
def handler(signum, frame):
handler.called = True
handler.called = False
exitcode = 0
signum = {signum}
unregister = {unregister}
chain = {chain}
if {has_filename}:
file = open({filename}, "wb")
else:
file = None
if chain:
signal.signal(signum, handler)
faulthandler.register(signum, file=file,
all_threads={all_threads}, chain={chain})
if unregister:
faulthandler.unregister(signum)
func(signum)
if chain and not handler.called:
if file is not None:
output = file
else:
output = sys.stderr
print("Error: signal handler not called!", file=output)
exitcode = 1
if file is not None:
file.close()
sys.exit(exitcode)
"""
code = code.format(
filename=repr(filename),
has_filename=bool(filename),
all_threads=all_threads,
signum=signum,
unregister=unregister,
chain=chain,
)
trace, exitcode = self.get_output(code, filename)
trace = '\n'.join(trace)
if not unregister:
if all_threads:
regex = 'Current thread XXX \(most recent call first\):\n'
else:
regex = 'Stack \(most recent call first\):\n'
regex = expected_traceback(7, 28, regex)
self.assertRegex(trace, regex)
else:
self.assertEqual(trace, '')
if unregister:
self.assertNotEqual(exitcode, 0)
else:
self.assertEqual(exitcode, 0)
def test_register(self):
self.check_register()
def test_unregister(self):
self.check_register(unregister=True)
def test_register_file(self):
with temporary_filename() as filename:
self.check_register(filename=filename)
def test_register_threads(self):
self.check_register(all_threads=True)
def test_register_chain(self):
self.check_register(chain=True)
@contextmanager
def check_stderr_none(self):
stderr = sys.stderr
try:
sys.stderr = None
with self.assertRaises(RuntimeError) as cm:
yield
self.assertEqual(str(cm.exception), "sys.stderr is None")
finally:
sys.stderr = stderr
def test_stderr_None(self):
# Issue #21497: provide an helpful error if sys.stderr is None,
# instead of just an attribute error: "None has no attribute fileno".
with self.check_stderr_none():
faulthandler.enable()
with self.check_stderr_none():
faulthandler.dump_traceback()
if hasattr(faulthandler, 'dump_traceback_later'):
with self.check_stderr_none():
faulthandler.dump_traceback_later(1e-3)
if hasattr(faulthandler, "register"):
with self.check_stderr_none():
faulthandler.register(signal.SIGUSR1)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 3,785,981,053,783,511,600 | -8,604,546,582,379,427,000 | 33.232745 | 103 | 0.541051 | false |
carnotweat/cpupimp | libs/rsa/pem.py | 216 | 3372 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions that load and write PEM-encoded files.'''
import base64
from rsa._compat import b, is_bytes
def _markers(pem_marker):
'''
Returns the start and end PEM markers
'''
if is_bytes(pem_marker):
pem_marker = pem_marker.decode('utf-8')
return (b('-----BEGIN %s-----' % pem_marker),
b('-----END %s-----' % pem_marker))
def load_pem(contents, pem_marker):
'''Loads a PEM file.
@param contents: the contents of the file to interpret
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
'''
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = []
in_pem_part = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%s" twice' % pem_start)
in_pem_part = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b(':') in line:
continue
pem_lines.append(line)
# Do some sanity checks
if not pem_lines:
raise ValueError('No PEM start marker "%s" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%s" found' % pem_end)
# Base64-decode the contents
pem = b('').join(pem_lines)
return base64.decodestring(pem)
def save_pem(contents, pem_marker):
'''Saves a PEM file.
@param contents: the contents to encode in PEM format
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-encoded content between the start and end markers.
'''
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.encodestring(contents).replace(b('\n'), b(''))
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start:block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b(''))
return b('\n').join(pem_lines)
| gpl-3.0 | 5,094,525,191,061,797,000 | -4,327,474,177,577,911,000 | 27.091667 | 79 | 0.611688 | false |
alaski/nova | nova/api/openstack/compute/server_password.py | 7 | 2555 | # Copyright (c) 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The server password extension."""
from nova.api.metadata import password
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.policies import server_password as sp_policies
ALIAS = 'os-server-password'
class ServerPasswordController(wsgi.Controller):
"""The Server Password API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ['nova.context']
context.can(sp_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
passw = password.extract_password(instance)
return {'password': passw or ''}
@extensions.expected_errors(404)
@wsgi.response(204)
def clear(self, req, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
context = req.environ['nova.context']
context.can(sp_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
meta = password.convert_password(context, None)
instance.system_metadata.update(meta)
instance.save()
class ServerPassword(extensions.V21APIExtensionBase):
"""Server password support."""
name = "ServerPassword"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(
ALIAS, ServerPasswordController(),
collection_actions={'clear': 'DELETE'},
parent=dict(member_name='server', collection_name='servers'))]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 | 7,128,872,929,431,660,000 | -5,034,951,421,587,418,000 | 32.618421 | 78 | 0.6818 | false |
naparuba/kunai | opsbro/misc/internalyaml/ruamel/_test/test_version.py | 2 | 2164 | # coding: utf-8
import pytest # NOQA
import ruamel.yaml
from roundtrip import dedent
def load(s, version=None):
return ruamel.yaml.round_trip_load(dedent(s), version)
class TestVersions:
def test_explicit_1_2(self):
l = load("""\
%YAML 1.2
---
- 12:34:56
- 012
- 012345678
- 0o12
- on
- off
- yes
- no
- true
""")
assert l[0] == '12:34:56'
assert l[1] == 12
assert l[2] == '012345678'
assert l[3] == 10
assert l[4] == 'on'
assert l[5] == 'off'
assert l[6] == 'yes'
assert l[7] == 'no'
assert l[8] is True
def test_explicit_1_1(self):
l = load("""\
%YAML 1.1
---
- 12:34:56
- 012
- 012345678
- 0o12
- on
- off
- yes
- no
- true
""")
assert l[0] == 45296
assert l[1] == 10
assert l[2] == '012345678'
assert l[3] == 10
assert l[4] is True
assert l[5] is False
assert l[6] is True
assert l[7] is False
assert l[8] is True
def test_implicit_1_2(self):
l = load("""\
- 12:34:56
- 012
- 012345678
- 0o12
- on
- off
- yes
- no
- true
""")
assert l[0] == '12:34:56'
assert l[1] == 12
assert l[2] == '012345678'
assert l[3] == 10
assert l[4] == 'on'
assert l[5] == 'off'
assert l[6] == 'yes'
assert l[7] == 'no'
assert l[8] is True
def test_load_version_1_1(self):
l = load("""\
- 12:34:56
- 012
- 012345678
- 0o12
- on
- off
- yes
- no
- true
""", version="1.1")
assert l[0] == 45296
assert l[1] == 10
assert l[2] == '012345678'
assert l[3] == 10
assert l[4] is True
assert l[5] is False
assert l[6] is True
assert l[7] is False
assert l[8] is True
| mit | -8,005,905,891,167,463,000 | -958,110,600,884,210,400 | 19.807692 | 58 | 0.404806 | false |
csdl/makahiki | makahiki/apps/widgets/notifications/tests.py | 2 | 5060 | """Notification testing."""
from django.test import TransactionTestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from apps.managers.challenge_mgr import challenge_mgr
from apps.utils import test_utils
from apps.widgets.notifications import get_unread_notifications
from apps.widgets.notifications.models import UserNotification
class NotificationUnitTests(TransactionTestCase):
"""Notification Test."""
def testGetUnread(self):
"""Test that we can get the user's unread notifications."""
user = User.objects.create_user("test", "test@test.com")
for i in range(0, 3):
notification = UserNotification(recipient=user, contents="Test notification %i" % i)
notification.save()
notifications = get_unread_notifications(user)
self.assertEqual(notifications["alerts"].count(), 0,
"There should not be any alert notifications.")
unread = notifications["unread"]
self.assertEqual(unread.count(), 3, "There should be three unread notifications.")
alert = UserNotification(recipient=user, contents="Alert notification", display_alert=True)
alert.save()
notifications = get_unread_notifications(user)
self.assertEqual(notifications["alerts"][0], alert,
"Alert notification should have been returned.")
unread = notifications["unread"]
self.assertEqual(unread.count(), 4, "There should be four unread notifications.")
class NotificationFunctionalTests(TransactionTestCase):
"""View Test."""
def setUp(self):
self.user = test_utils.setup_user(username="user", password="test")
self.team = self.user.profile.team
challenge_mgr.register_page_widget("help", "help.faq")
challenge_mgr.register_page_widget("home", "home")
from apps.managers.cache_mgr import cache_mgr
cache_mgr.clear()
self.client.login(username="user", password="test")
def testShowNotifications(self):
"""
Test that we can show notifications to the user.
"""
for i in range(0, 3):
notification = UserNotification(recipient=self.user,
contents="Test notification %i" % i)
notification.save()
response = self.client.get(reverse("home_index"))
self.assertNotContains(response, "The following item(s) need your attention",
msg_prefix="Alert should not be shown"
)
for i in range(0, 3):
self.assertContains(response, "Test notification %i" % i,
msg_prefix="Notification %i is not shown" % i
)
def testAlertNotifications(self):
"""Test alert."""
alert = UserNotification(recipient=self.user, contents="Alert notification",
display_alert=True)
alert.save()
response = self.client.get(reverse("home_index"))
self.assertContains(response, "notification-dialog", msg_prefix="Alert should be shown")
response = self.client.get(reverse("help_index"))
self.assertNotContains(response, "notification-dialog",
msg_prefix="Dialog should not be displayed")
def testAjaxReadNotifications(self):
"""Test that notifications can be marked as read via AJAX."""
notification = UserNotification(recipient=self.user, contents="Test notification")
notification.save()
response = self.client.post(reverse("notifications_read", args=(notification.pk,)), {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.failUnlessEqual(response.status_code, 200)
response = self.client.get(reverse("home_index"))
self.assertNotContains(response, "Test notification",
msg_prefix="Notification should be read")
def testReadNotifications(self):
"""Test that notifications can be marked as read without AJAX."""
notification = UserNotification(recipient=self.user, contents="Test notification")
notification.save()
response = self.client.post(reverse("notifications_read", args=(notification.pk,)), {})
self.assertRedirects(response, reverse("home_index"),
msg_prefix="Marking as read should redirect.")
response = self.client.get(reverse("home_index"))
self.assertNotContains(response, "Test notification",
msg_prefix="Notification should be read")
# Test with a referring page.
notification = UserNotification(recipient=self.user, contents="Test notification 2")
notification.save()
response = self.client.post(reverse("notifications_read", args=(notification.pk,)), {},
HTTP_REFERER=reverse("help_index"))
self.assertRedirects(response, reverse("help_index"),
msg_prefix="Marking as read should redirect.")
response = self.client.get(reverse("home_index"))
self.assertNotContains(response, "Test notification 2",
msg_prefix="Notification should be read")
| mit | 4,490,418,001,818,345,500 | -5,428,343,801,641,199,000 | 41.521008 | 99 | 0.663834 | false |
andrew-aladev/samba-talloc-debug | auth/credentials/tests/bind.py | 11 | 5515 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is unit with tests for LDAP access checks
import optparse
import sys
import base64
import re
import os
import copy
import time
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
import samba.getopt as options
from ldb import (
SCOPE_BASE, SCOPE_SUBTREE, LdbError, ERR_NO_SUCH_OBJECT)
from samba.dcerpc import security
from samba.auth import system_session
from samba import gensec
from samba.samdb import SamDB
from samba.credentials import Credentials
import samba.tests, unittest
from samba.tests import delete_force
from subunit.run import SubunitTestRunner
from samba.tests import TestCase, TestSkipped
parser = optparse.OptionParser("ldap [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
creds_machine = copy.deepcopy(creds)
creds_user1 = copy.deepcopy(creds)
creds_user2 = copy.deepcopy(creds)
creds_user3 = copy.deepcopy(creds)
class BindTests(samba.tests.TestCase):
info_dc = None
def setUp(self):
super(BindTests, self).setUp()
# fetch rootDSEs
if self.info_dc is None:
res = ldb.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
BindTests.info_dc = res[0]
# cache some of RootDSE props
self.schema_dn = self.info_dc["schemaNamingContext"][0]
self.domain_dn = self.info_dc["defaultNamingContext"][0]
self.config_dn = self.info_dc["configurationNamingContext"][0]
self.computer_dn = "CN=centos53,CN=Computers,%s" % self.domain_dn
self.password = "P@ssw0rd"
self.username = "BindTestUser_" + time.strftime("%s", time.gmtime())
def tearDown(self):
super(BindTests, self).tearDown()
def test_computer_account_bind(self):
# create a computer acocount for the test
delete_force(ldb, self.computer_dn)
ldb.add_ldif("""
dn: """ + self.computer_dn + """
cn: CENTOS53
displayName: CENTOS53$
name: CENTOS53
sAMAccountName: CENTOS53$
countryCode: 0
objectClass: computer
objectClass: organizationalPerson
objectClass: person
objectClass: top
objectClass: user
codePage: 0
userAccountControl: 4096
dNSHostName: centos53.alabala.test
operatingSystemVersion: 5.2 (3790)
operatingSystem: Windows Server 2003
""")
ldb.modify_ldif("""
dn: """ + self.computer_dn + """
changetype: modify
replace: unicodePwd
unicodePwd:: """ + base64.b64encode("\"P@ssw0rd\"".encode('utf-16-le')) + """
""")
# do a simple bind and search with the machine account
creds_machine.set_bind_dn(self.computer_dn)
creds_machine.set_password(self.password)
print "BindTest with: " + creds_machine.get_bind_dn()
ldb_machine = samba.tests.connect_samdb(host, credentials=creds_machine,
lp=lp, ldap_only=True)
res = ldb_machine.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
def test_user_account_bind(self):
# create user
ldb.newuser(username=self.username, password=self.password)
ldb_res = ldb.search(base=self.domain_dn,
scope=SCOPE_SUBTREE,
expression="(samAccountName=%s)" % self.username)
self.assertEquals(len(ldb_res), 1)
user_dn = ldb_res[0]["dn"]
# do a simple bind and search with the user account in format user@realm
creds_user1.set_bind_dn(self.username + "@" + creds.get_realm())
creds_user1.set_password(self.password)
print "BindTest with: " + creds_user1.get_bind_dn()
ldb_user1 = samba.tests.connect_samdb(host, credentials=creds_user1,
lp=lp, ldap_only=True)
res = ldb_user1.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
# do a simple bind and search with the user account in format domain\user
creds_user2.set_bind_dn(creds.get_domain() + "\\" + self.username)
creds_user2.set_password(self.password)
print "BindTest with: " + creds_user2.get_bind_dn()
ldb_user2 = samba.tests.connect_samdb(host, credentials=creds_user2,
lp=lp, ldap_only=True)
res = ldb_user2.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
# do a simple bind and search with the user account DN
creds_user3.set_bind_dn(str(user_dn))
creds_user3.set_password(self.password)
print "BindTest with: " + creds_user3.get_bind_dn()
ldb_user3 = samba.tests.connect_samdb(host, credentials=creds_user3,
lp=lp, ldap_only=True)
res = ldb_user3.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
ldb = samba.tests.connect_samdb(host, credentials=creds, lp=lp, ldap_only=True)
runner = SubunitTestRunner()
rc = 0
if not runner.run(unittest.makeSuite(BindTests)).wasSuccessful():
rc = 1
sys.exit(rc)
| gpl-3.0 | 3,226,981,166,894,832,000 | 5,303,554,798,348,648,000 | 34.811688 | 87 | 0.654941 | false |
Ouranosinc/Magpie | magpie/alembic/versions/2018-06-04_a395ef9d3fe6_reference_root_service.py | 1 | 2200 | """
reference root service.
Revision ID: a395ef9d3fe6
Revises: ae1a3c8c7860
Create Date: 2018-06-04 11:38:31.296950
"""
import sqlalchemy as sa
from alembic import op
from alembic.context import get_context # noqa: F401
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.orm.session import sessionmaker
# revision identifiers, used by Alembic.
revision = "a395ef9d3fe6"
down_revision = "ae1a3c8c7860"
branch_labels = None
depends_on = None
Session = sessionmaker()
resources = sa.table(
"resources",
sa.column("root_service_id", sa.Integer),
sa.column("resource_id", sa.Integer),
sa.column("parent_id", sa.Integer)
)
def upgrade():
context = get_context()
session = Session(bind=op.get_bind())
# two following lines avoids double "DELETE" erroneous call when deleting group due to incorrect checks
# https://stackoverflow.com/questions/28824401
context.connection.engine.dialect.supports_sane_rowcount = False
context.connection.engine.dialect.supports_sane_multi_rowcount = False
if isinstance(context.connection.engine.dialect, PGDialect):
op.add_column("resources", sa.Column("root_service_id", sa.Integer(), nullable=True))
# add existing resource references to their root service, loop through reference tree chain
query = session.execute(sa.select([resources.c.resource_id, resources.c.parent_id]))
for resource_id, parent_id in query:
root_resource_id = resource_id
while parent_id is not None:
parent_resource = session.execute(
sa.select([resources.c.resource_id, resources.c.parent_id])
.where(resources.c.resource_id == parent_id)
).fetchone()
root_resource_id, parent_id = parent_resource
session.execute(
resources.update().where(resources.c.resource_id == resource_id).
values(root_service_id=root_resource_id)
)
session.commit()
def downgrade():
context = get_context()
if isinstance(context.connection.engine.dialect, PGDialect):
op.drop_column("resources", "root_service_id")
| apache-2.0 | 2,265,523,545,376,936,200 | -773,279,891,837,814,800 | 32.333333 | 107 | 0.677727 | false |
geniusgogo/rt-thread | bsp/nrf5x/nrf51822/rtconfig.py | 5 | 2499 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu='+CPU + ' -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| apache-2.0 | 468,558,377,315,600,700 | -4,764,802,526,626,614,000 | 26.163043 | 142 | 0.559424 | false |
arokem/PyEMMA | pyemma/_ext/sklearn/base.py | 2 | 17484 | """
--------------------------------------------------------------------------------------------
Extracted from skikit-learn to ensure basic compatibility
without creating an explicit dependency.
For the original code see
http://scikit-learn.org/
and
https://github.com/scikit-learn
--------------------------------------------------------------------------------------------
Base classes for all estimators.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from pyemma._ext import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
# NOTE: I have renamed this from BaseEstimator to Parametric in order to also use it for Models
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# Currently unused
# ###############################################################################
# class ClassifierMixin(object):
# """Mixin class for all classifiers in scikit-learn."""
# _estimator_type = "classifier"
#
# def score(self, X, y, sample_weight=None):
# """Returns the mean accuracy on the given test data and labels.
# In multi-label classification, this is the subset accuracy
# which is a harsh metric since you require for each sample that
# each label set be correctly predicted.
# Parameters
# ----------
# X : array-like, shape = (n_samples, n_features)
# Test samples.
# y : array-like, shape = (n_samples) or (n_samples, n_outputs)
# True labels for X.
# sample_weight : array-like, shape = [n_samples], optional
# Sample weights.
# Returns
# -------
# score : float
# Mean accuracy of self.predict(X) wrt. y.
# """
# from .metrics import accuracy_score
# return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
# Currently unused
# ###############################################################################
# class RegressorMixin(object):
# """Mixin class for all regression estimators in scikit-learn."""
# _estimator_type = "regressor"
#
# def score(self, X, y, sample_weight=None):
# """Returns the coefficient of determination R^2 of the prediction.
# The coefficient R^2 is defined as (1 - u/v), where u is the regression
# sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
# sum of squares ((y_true - y_true.mean()) ** 2).sum().
# Best possible score is 1.0, lower values are worse.
# Parameters
# ----------
# X : array-like, shape = (n_samples, n_features)
# Test samples.
# y : array-like, shape = (n_samples) or (n_samples, n_outputs)
# True values for X.
# sample_weight : array-like, shape = [n_samples], optional
# Sample weights.
# Returns
# -------
# score : float
# R^2 of self.predict(X) wrt. y.
# """
#
# from .metrics import r2_score
# return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
# class BiclusterMixin(object):
# """Mixin class for all bicluster estimators in scikit-learn"""
#
# @property
# def biclusters_(self):
# """Convenient way to get row and column indicators together.
# Returns the ``rows_`` and ``columns_`` members.
# """
# return self.rows_, self.columns_
#
# def get_indices(self, i):
# """Row and column indices of the i'th bicluster.
# Only works if ``rows_`` and ``columns_`` attributes exist.
# Returns
# -------
# row_ind : np.array, dtype=np.intp
# Indices of rows in the dataset that belong to the bicluster.
# col_ind : np.array, dtype=np.intp
# Indices of columns in the dataset that belong to the bicluster.
# """
# rows = self.rows_[i]
# columns = self.columns_[i]
# return np.nonzero(rows)[0], np.nonzero(columns)[0]
#
# def get_shape(self, i):
# """Shape of the i'th bicluster.
# Returns
# -------
# shape : (int, int)
# Number of rows and columns (resp.) in the bicluster.
# """
# indices = self.get_indices(i)
# return tuple(len(i) for i in indices)
#
# def get_submatrix(self, i, data):
# """Returns the submatrix corresponding to bicluster `i`.
# Works with sparse matrices. Only works if ``rows_`` and
# ``columns_`` attributes exist.
# """
# from .utils.validation import check_array
# data = check_array(data, accept_sparse='csr')
# row_ind, col_ind = self.get_indices(i)
# return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor" | bsd-2-clause | -9,178,697,797,865,444,000 | -7,016,371,693,821,512,000 | 38.920091 | 95 | 0.532716 | false |
OiNutter/microbit-scripts | runner/runner.py | 1 | 4487 | from microbit import *
import random
from math import ceil
MAX_COLS = 5
PAUSE = 500
def init():
global blocks, wait, player, old_player, jump_counter, start, counter, lives, jump_held, jump_amount, jump_maxed, PAUSE
blocks = []
wait = 0
player = [1, 4]
old_player = None
jump_counter = 0
start = counter = running_time()
lives = 3
jump_held = False
jump_amount = 0
jump_maxed = False
PAUSE = 500
def handle_obstacles():
global blocks, wait
# Make sure there's been enough times between blocks
if wait == 0:
# Do we want to create block?
if random.choice([True, False]):
new_block = [MAX_COLS, random.choice([4, 4, 4, 3])]
blocks.append(new_block)
wait = 2
# Are we making this a double?
if new_block[1] != 3 and random.choice([True, False]):
blocks.append([MAX_COLS+1, new_block[1]])
wait += 2
else:
wait -= 1
# Draw the blocks
for i in range(0, len(blocks)):
if blocks[i][0] < MAX_COLS:
# Hide the previous block position
if blocks[i] != player:
display.set_pixel(blocks[i][0], blocks[i][1], 0)
# Move the block
blocks[i][0] -= 1
if blocks[i][0] >= 0 and blocks[i][0] < MAX_COLS:
display.set_pixel(blocks[i][0], blocks[i][1], 3)
print(blocks)
# Clear any blocks that have gone off screen
while len(blocks) > 0 and blocks[0][0] == -1:
blocks.pop(0)
def draw_player ():
global old_player, player
# If the player has moved turn off the old position
if old_player is not None:
display.set_pixel(old_player[0], old_player[1], 0)
old_player = None
# display the player
display.set_pixel(player[0], player[1], 9)
def jump():
global player, jump_counter, old_player
# Create a ref to the old position
old_player = player[:]
# Change the y position by the current jump amount
player[1] = 4 - (jump_amount)
jump_counter += 1
def check_collision():
global lives
# Is the player in the position of a block?
print (player)
print (blocks)
print (tuple(player) in [tuple(block) for block in blocks])
if tuple(player) in [tuple(block) for block in blocks]:
# If so remove a life
display.set_pixel(4-lives+1, 0, 0)
lives -= 1
def display_lives():
if lives > 0:
for i in range(4, 4 - lives, -1):
display.set_pixel(i, 0, 5)
display.scroll("RUNNER")
display.scroll('Press any button to play', wait=False, loop=True)
while True:
if button_a.is_pressed() or button_b.is_pressed():
display.clear()
while button_a.is_pressed() or button_b.is_pressed():
sleep(0.1)
break
init()
while True:
while True:
if button_a.is_pressed():
# Check the button has been released and they're not at the max jump height
if jump_held == False and jump_maxed == False:
jump_amount = min(2, jump_amount + 1)
jump_maxed = jump_amount == 2
jump_held = True
jump()
else:
jump_held = False
# Update everything every 500ms (this speeds up as the game goes on)
if running_time() - counter >= PAUSE:
if jump_counter == 0:
# If they've just finished jumping bring them back to the ground
if jump_amount > 0:
jump_amount = max(0, jump_amount - 1)
old_player = player[:]
player[1] = 4 - jump_amount
if player[1] == 4:
jump_maxed = False
else:
jump_counter -= 1
draw_player()
handle_obstacles()
check_collision()
display_lives()
counter = running_time()
running = running_time() - start
if running > 0 and (running / 1000) % 30 == 0:
PAUSE -= 50
if lives == 0:
break
else:
sleep(0.1)
display.scroll("SCORE: %ds" % (round(running_time() - start)/1000), wait=False, loop=True)
sleep(100)
while True:
if button_a.is_pressed() or button_b.is_pressed():
while button_a.is_pressed():
continue
init()
display.clear()
break
| mit | 5,183,721,205,409,360,000 | -3,974,891,877,524,646,000 | 26.03012 | 123 | 0.539782 | false |
ajyoon/brown | tests/test_interface/test_font_interface.py | 1 | 1421 | import unittest
import pytest
from PyQt5 import QtGui
from brown.core import brown
from brown.interface.font_interface import FontInterface
from brown.utils.units import Unit
class MockUnit(Unit):
CONVERSION_RATE = 0.5
class TestFontInterface(unittest.TestCase):
def setUp(self):
brown.setup()
brown._app_interface._remove_all_loaded_fonts()
def test_init(self):
test_font = FontInterface(None, 'Bravura', MockUnit(12), 1, False)
assert(test_font.family_name == 'Bravura')
assert(test_font.size == MockUnit(12))
assert(test_font.weight == 1)
assert(test_font.italic is False)
def test_init_qt_attribute_transfer(self):
test_font = FontInterface(None, 'Bravura', MockUnit(12), 1, False)
assert(isinstance(test_font.qt_object, QtGui.QFont))
assert(test_font.qt_object.bold() is False)
assert(test_font.qt_object.italic() is False)
assert(test_font.qt_object.pointSize() == 6)
assert(test_font.qt_object.weight() == 1)
@pytest.mark.skip
# Skip this test - seems to vary by OS or display settings?
# May not actually be a problem. Proper testing to see if this
# is an issue will likely require visual checks on different OS's.
def test_em_size(self):
test_font = FontInterface(None, 'Bravura', MockUnit(2000), 1, False)
assert(int(test_font.em_size) == 366)
| gpl-3.0 | -1,465,583,348,236,547,000 | 5,001,919,533,991,527,000 | 32.833333 | 76 | 0.670654 | false |
marcoantoniooliveira/labweb | tests/integration/partner/strategy_tests.py | 51 | 4245 | from django.test import TestCase
from decimal import Decimal as D
from oscar.apps.partner import strategy
from oscar.apps.catalogue import models
from oscar.test import factories
from oscar.apps.basket.models import Line
class TestDefaultStrategy(TestCase):
def setUp(self):
self.strategy = strategy.Default()
def test_no_stockrecords(self):
product = factories.create_product()
info = self.strategy.fetch_for_product(product)
self.assertFalse(info.availability.is_available_to_buy)
self.assertIsNone(info.price.incl_tax)
def test_one_stockrecord(self):
product = factories.create_product(price=D('1.99'), num_in_stock=4)
info = self.strategy.fetch_for_product(product)
self.assertTrue(info.availability.is_available_to_buy)
self.assertEqual(D('1.99'), info.price.excl_tax)
self.assertEqual(D('1.99'), info.price.incl_tax)
def test_product_which_doesnt_track_stock(self):
product_class = models.ProductClass.objects.create(
name="Digital", track_stock=False)
product = factories.create_product(
product_class=product_class,
price=D('1.99'), num_in_stock=None)
info = self.strategy.fetch_for_product(product)
self.assertTrue(info.availability.is_available_to_buy)
def test_line_method_is_same_as_product_one(self):
product = factories.create_product()
line = Line(product=product)
info = self.strategy.fetch_for_line(line)
self.assertFalse(info.availability.is_available_to_buy)
self.assertIsNone(info.price.incl_tax)
def test_free_product_is_available_to_buy(self):
product = factories.create_product(price=D('0'), num_in_stock=1)
info = self.strategy.fetch_for_product(product)
self.assertTrue(info.availability.is_available_to_buy)
self.assertTrue(info.price.exists)
class TestDefaultStrategyForParentProductWhoseVariantsHaveNoStockRecords(TestCase):
def setUp(self):
self.strategy = strategy.Default()
parent = factories.create_product(structure='parent')
for x in range(3):
factories.create_product(parent=parent)
self.info = self.strategy.fetch_for_parent(parent)
def test_specifies_product_is_unavailable(self):
self.assertFalse(self.info.availability.is_available_to_buy)
def test_specifies_correct_availability_code(self):
self.assertEqual('unavailable', self.info.availability.code)
def test_specifies_product_has_no_price(self):
self.assertFalse(self.info.price.exists)
class TestDefaultStrategyForParentProductWithInStockVariant(TestCase):
def setUp(self):
self.strategy = strategy.Default()
parent = factories.create_product(structure='parent')
factories.create_product(parent=parent, price=D('10.00'),
num_in_stock=3)
for x in range(2):
factories.create_product(parent=parent)
self.info = self.strategy.fetch_for_parent(parent)
def test_specifies_product_is_available(self):
self.assertTrue(self.info.availability.is_available_to_buy)
def test_specifies_correct_availability_code(self):
self.assertEqual('available', self.info.availability.code)
def test_specifies_product_has_correct_price(self):
self.assertEqual(D('10.00'), self.info.price.incl_tax)
class TestDefaultStrategyForParentProductWithOutOfStockVariant(TestCase):
def setUp(self):
self.strategy = strategy.Default()
parent = factories.create_product(structure='parent')
factories.create_product(
parent=parent, price=D('10.00'), num_in_stock=0)
for x in range(2):
factories.create_product(parent=parent)
self.info = self.strategy.fetch_for_parent(parent)
def test_specifies_product_is_unavailable(self):
self.assertFalse(self.info.availability.is_available_to_buy)
def test_specifies_correct_availability_code(self):
self.assertEqual('unavailable', self.info.availability.code)
def test_specifies_product_has_correct_price(self):
self.assertEqual(D('10.00'), self.info.price.incl_tax)
| bsd-3-clause | -4,031,084,170,419,238,400 | 1,635,877,518,904,915,000 | 37.944954 | 83 | 0.690931 | false |
biothings/biothings_explorer | biothings_explorer/utils/networkx.py | 1 | 4018 | # -*- coding: utf-8 -*-
"""A collection of util functions related to networkx
.. moduleauthor:: Jiwen Xin <kevinxin@scripps.edu>
"""
from collections import defaultdict
def load_res_to_networkx(_res, G, labels, id_mapping, output_id_types):
"""Load restructured API response into a networkx MultiDiGraph.
Parameters
* G: networkx MultiDiGraph
* _res: restructured API response
* labels: list of schema properties to extract from API response
* id_mapping: dict containing mapping between equivalent ids and original ids
* output_id_types: list of output identifiers
"""
# check if API response is empty
if not _res:
return G
# m represent input id, n represent parsed API output
for input_id, parsed_api_output in _res.items():
if not parsed_api_output:
continue
# a represent schema property, b represent value
for prop, prop_vals in parsed_api_output.items():
if prop not in labels:
continue
for _val in prop_vals:
if not isinstance(_val, dict):
G.add_node(
str(_val),
identifier=prop,
type=parsed_api_output["@type"],
level=2,
)
G.add_edge(id_mapping[input_id], str(_val), info=None, label=prop)
else:
for i, j in _val.items():
if i in output_id_types and j:
output_type = _val.get("@type")
source = _val.get("$source")
if not isinstance(j, list):
j = [j]
j = [str(jj) for jj in j]
G.add_nodes_from(j, identifier=i, type=output_type, level=2)
for _j in j:
G.add_edge(
id_mapping[input_id],
_j,
info=_val,
label=prop,
source=source,
)
return G
def add_equivalent_ids_to_nodes(G, IDResolver):
"""Add equivalent ids to each node.
Parameters
* G: Networkx Graph
* IDConverter: Python Class in BTE to convert IDs
TODO: This is weird, shouldn't include IDConverter in this.
"""
# check if G is empty
if not G:
return (G, {})
# get all nodes which are level 2 (output nodes)
output_ids = [x for x, y in G.nodes(data=True) if y and y["level"] == 2]
# check if there is no output nodes
if not output_ids:
return (G, {})
# group output ids based on identifier and type
idc_inputs = []
output_ids_dict = defaultdict(list)
for _id in output_ids:
type_identifier = G.nodes[_id]["type"] + "," + G.nodes[_id]["identifier"]
output_ids_dict[type_identifier].append(_id)
# construct inputs for IDConverter
for k, v in output_ids_dict.items():
input_cls, input_id = k.split(",")
idc_inputs.append((v, input_id, input_cls))
# find equivalent ids
equivalent_ids = IDResolver.resolve_ids(idc_inputs)
# populate nodes with equivalent ids
for m, n in equivalent_ids.items():
G.nodes[m.split(":", 1)[-1]]["equivalent_ids"] = n
return (G, equivalent_ids)
def merge_two_networkx_graphs(G1, G2):
"""Merge two networkx MultiDiGraphs.
:param: G1: networkx graph as the source graph
:param: G2: networkx graph added to G1
TODO: line G1.add_edges_from(G2.edges(data=True)) will introduce duplicate edges
"""
nodes_to_add = []
for k, v in G2.nodes(data=True):
if k not in G1:
nodes_to_add.append((k, v))
G1.add_nodes_from(nodes_to_add)
G1.add_edges_from(G2.edges(data=True))
return G1
| apache-2.0 | -1,627,107,898,338,952,700 | -2,776,103,707,518,444,000 | 35.198198 | 88 | 0.529119 | false |
gdgellatly/server-tools | auth_admin_passkey/model/res_config.py | 61 | 3206 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for OpenERP
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv.orm import TransientModel
from openerp.tools.safe_eval import safe_eval
class base_config_settings(TransientModel):
_inherit = 'base.config.settings'
# Getter / Setter Section
def get_default_auth_admin_passkey_send_to_admin(
self, cr, uid, ids, context=None):
icp = self.pool['ir.config_parameter']
return {
'auth_admin_passkey_send_to_admin': safe_eval(icp.get_param(
cr, uid, 'auth_admin_passkey.send_to_admin', 'True')),
}
def set_auth_admin_passkey_send_to_admin(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool['ir.config_parameter']
icp.set_param(
cr, uid, 'auth_admin_passkey.send_to_admin',
repr(config.auth_admin_passkey_send_to_admin))
def get_default_auth_admin_passkey_send_to_user(
self, cr, uid, ids, context=None):
icp = self.pool['ir.config_parameter']
return {
'auth_admin_passkey_send_to_user': safe_eval(icp.get_param(
cr, uid, 'auth_admin_passkey.send_to_user', 'True')),
}
def set_auth_admin_passkey_send_to_user(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool['ir.config_parameter']
icp.set_param(
cr, uid, 'auth_admin_passkey.send_to_user',
repr(config.auth_admin_passkey_send_to_user))
# Columns Section
_columns = {
'auth_admin_passkey_send_to_admin': fields.boolean(
'Send email to admin user.',
help="""When the administrator use his password to login in """
"""with a different account, OpenERP will send an email """
"""to the admin user.""",
),
'auth_admin_passkey_send_to_user': fields.boolean(
string='Send email to user.',
help="""When the administrator use his password to login in """
"""with a different account, OpenERP will send an email """
"""to the account user.""",
),
}
| agpl-3.0 | 6,956,959,542,403,843,000 | -3,968,485,508,005,432,300 | 41.184211 | 79 | 0.597941 | false |
blaggacao/OpenUpgrade | addons/board/__init__.py | 439 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,202,751,641,744,125,000 | -3,491,054,553,035,283,500 | 43 | 78 | 0.618881 | false |
gwparikh/cvgui | grouping_calibration.py | 2 | 9402 | #!/usr/bin/env python
import os, sys, subprocess
import argparse
import subprocess
import threading
import timeit
from multiprocessing import Queue, Lock
from configobj import ConfigObj
from numpy import loadtxt
from numpy.linalg import inv
import matplotlib.pyplot as plt
import moving
from cvguipy import trajstorage, cvgenetic, cvconfig
"""
Grouping Calibration By Genetic Algorithm.
This script uses genetic algorithm to search for the best configuration.
It does not monitor RAM usage, therefore, CPU thrashing might be happened when number of parents (selection size) is too large.
"""
# class for genetic algorithm
class GeneticCompare(object):
def __init__(self, motalist, motplist, IDlist, cfg_list, lock):
self.motalist = motalist
self.motplist = motplist
self.IDlist = IDlist
self.cfg_list = cfg_list
self.lock = lock
# This is used for calculte fitness of individual in genetic algorithn.
# It is modified to create sqlite and cfg file before tuning computeClearMOT.
# NOTE errors show up when loading two same ID
def computeMOT(self, i):
# create sqlite and cfg file with id i
cfg_name = config_files +str(i)+'.cfg'
sql_name = sqlite_files +str(i)+'.sqlite'
open(cfg_name,'w').close()
config = ConfigObj(cfg_name)
cfg_list.write_config(i ,config)
command = ['cp', 'tracking_only.sqlite', sql_name]
process = subprocess.Popen(command)
process.wait()
command = ['trajextract.py', args.inputVideo, '-o', args.homography, '-t', cfg_name, '-d', sql_name, '--gf']
# suppress output of grouping extraction
devnull = open(os.devnull, 'wb')
process = subprocess.Popen(command, stdout = devnull)
process.wait()
obj = trajstorage.CVsqlite(sql_name)
print "loading", i
obj.loadObjects()
motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(cdb.annotations, obj.objects, args.matchDistance, firstFrame, lastFrame)
if motp is None:
motp = 0
self.lock.acquire()
self.IDlist.put(i)
self.motplist.put(motp)
self.motalist.put(mota)
obj.close()
if args.PrintMOTA:
print("ID: mota:{} motp:{}".format(mota, motp))
self.lock.release()
return mota
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="compare all sqlites that are created by cfg_combination.py to the Annotated version to find the ID of the best configuration")
parser.add_argument('inputVideo', help= "input video filename")
parser.add_argument('-r', '--configuration-file', dest='range_cfg', help= "the configuration-file contain the range of configuration")
parser.add_argument('-t', '--traffintel-config', dest='traffintelConfig', help= "the TrafficIntelligence file to use for running the first extraction.")
parser.add_argument('-m', '--mask-File', dest='maskFilename', help="Name of the mask-File for trajextract")
parser.add_argument('-d', '--database-file', dest ='databaseFile', help ="Name of the databaseFile.")
parser.add_argument('-o', '--homography-file', dest ='homography', help = "Name of the homography file.", required = True)
parser.add_argument('-md', '--matching-distance', dest='matchDistance', help = "matchDistance", default = 10, type = float)
parser.add_argument('-a', '--accuracy', dest = 'accuracy', help = "accuracy parameter for genetic algorithm", type = int)
parser.add_argument('-p', '--population', dest = 'population', help = "population parameter for genetic algorithm", required = True, type = int)
parser.add_argument('-np', '--num-of-parents', dest = 'num_of_parents', help = "Number of parents that are selected each generation", type = int)
parser.add_argument('-mota', '--print-MOTA', dest='PrintMOTA', action = 'store_true', help = "Print MOTA for each ID.")
args = parser.parse_args()
os.mkdir('cfg_files')
os.mkdir('sql_files')
sqlite_files = "sql_files/Sqlite_ID_"
config_files = "cfg_files/Cfg_ID_"
# ------------------initialize annotated version if not existed ---------- #
# inputVideo check
if not os.path.exists(args.inputVideo):
print("Input video {} does not exist! Exiting...".format(args.inputVideo))
sys.exit(1)
# configuration file check
if args.range_cfg is None:
config = ConfigObj('range.cfg')
else:
config = ConfigObj(args.range_cfg)
# get configuration and put them to a List
cfg_list = cvconfig.CVConfigList()
thread_cfgtolist = threading.Thread(target = cvconfig.config_to_list, args = (cfg_list, config))
thread_cfgtolist.start();
# check if dbfile name is entered
if args.databaseFile is None:
print("Database-file is not entered, running trajextract and cvplayer.")
if not os.path.exists(args.homography):
print("Homography file does not exist! Exiting...")
sys.exit(1)
else:
videofile=args.inputVideo
if 'avi' in videofile:
if args.maskFilename is not None:
command = ['trajextract.py',args.inputVideo,'-m', args.maskFilename,'-o', args.homography]
else:
command = ['trajextract.py',args.inputVideo,'-o', args.homography]
process = subprocess.Popen(command)
process.wait()
databaseFile = videofile.replace('avi','sqlite')
command = ['cvplayer.py',args.inputVideo,'-d',databaseFile,'-o',args.homography]
process = subprocess.Popen(command)
process.wait()
else:
print("Input video {} is not 'avi' type. Exiting...".format(args.inputVideo))
sys.exit(1)
else:
databaseFile = args.databaseFile
thread_cfgtolist.join()
# ------------------Done initialization for annotation-------------------- #
# create first tracking only database template.
print("creating the first tracking only database template.")
if args.maskFilename is not None:
command = map(str, ['trajextract.py',args.inputVideo, '-d', 'tracking_only.sqlite', '-t', args.traffintelConfig, '-o', args.homography, '-m', args.maskFilename, '--tf'])
else:
command = map(str, ['trajextract.py',args.inputVideo, '-d', sql_name, '-t', args.traffintelConfig, '-o', args.homography, '--tf'])
process = subprocess.Popen(command)
process.wait()
# ----start using genetic algorithm to search for best configuration-------#
start = timeit.default_timer()
dbfile = databaseFile;
homography = loadtxt(args.homography)
cdb = trajstorage.CVsqlite(dbfile)
cdb.open()
cdb.getLatestAnnotation()
cdb.createBoundingBoxTable(cdb.latestannotations, inv(homography))
cdb.loadAnnotaion()
for a in cdb.annotations:
a.computeCentroidTrajectory(homography)
print "Latest Annotaions in "+dbfile+": ", cdb.latestannotations
cdb.frameNumbers = cdb.getFrameList()
firstFrame = cdb.frameNumbers[0]
lastFrame = cdb.frameNumbers[-1]
foundmota = Queue()
foundmotp = Queue()
IDs = Queue()
lock = Lock()
Comp = GeneticCompare(foundmota, foundmotp, IDs, cfg_list, lock)
if args.accuracy != None:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT, args.accuracy)
else:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT)
if args.num_of_parents != None:
GeneticCal.run_thread(args.num_of_parents)
else:
GeneticCal.run_thread()
# tranform queues to lists
foundmota = cvgenetic.Queue_to_list(foundmota)
foundmotp = cvgenetic.Queue_to_list(foundmotp)
IDs = cvgenetic.Queue_to_list(IDs)
for i in range(len(foundmotp)):
foundmotp[i] /= args.matchDistance
Best_mota = max(foundmota)
Best_ID = IDs[foundmota.index(Best_mota)]
print "Best multiple object tracking accuracy (MOTA)", Best_mota
print "ID:", Best_ID
stop = timeit.default_timer()
print str(stop-start) + "s"
total = []
for i in range(len(foundmota)):
total.append(foundmota[i]- 0.1 * foundmotp[i])
Best_total = max(total)
Best_total_ID = IDs[total.index(Best_total)]
# ------------------------------Done searching----------------------------#
# use matplot to plot a graph of all calculated IDs along with thier mota
plt.figure(1)
plt.plot(foundmota ,IDs ,'bo')
plt.plot(foundmotp ,IDs ,'yo')
plt.plot(Best_mota, Best_ID, 'ro')
plt.axis([-1, 1, -1, cfg_list.get_total_combination()])
plt.xlabel('mota')
plt.ylabel('ID')
plt.title(b'Best MOTA: '+str(Best_mota) +'\nwith ID: '+str(Best_ID))
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_mota.png'
plt.savefig(plotFile)
plt.figure(2)
plt.plot(total, IDs, 'bo')
plt.plot(Best_total, Best_total_ID, 'ro')
plt.xlabel('mota + motp')
plt.ylabel('ID')
plt.title(b'Best total: '+str(Best_total) +'\nwith ID: '+str(Best_total_ID))
# save the plot
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_motp.png'
plt.savefig(plotFile)
plt.show()
cdb.close()
| mit | -260,440,000,553,215,100 | 3,241,722,185,035,996,700 | 42.12844 | 177 | 0.636141 | false |
davidvon/pipa-pay-server | site-packages/jinja2/bccache.py | 256 | 12289 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import stat
import sys
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 2
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
self.code = marshal_load(f)
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU) # 0o700
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
raise RuntimeError('Temporary directory \'%s\' has an incorrect '
'owner, permissions, or type.' % actual_dir)
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| apache-2.0 | -2,176,363,984,911,888,100 | -416,742,573,609,803,600 | 34.723837 | 87 | 0.630157 | false |
julianwang/cinder | cinder/openstack/common/fileutils.py | 78 | 4135 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import logging
import os
import stat
import tempfile
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
def ensure_tree(path, mode=DEFAULT_MODE):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
:param mode: Directory creation permissions
"""
try:
os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
| apache-2.0 | -3,617,129,143,617,320,000 | 772,366,341,842,130,400 | 26.751678 | 78 | 0.662636 | false |
beni55/olympia | apps/amo/search.py | 18 | 10622 | import logging
from django.conf import settings as dj_settings
from django_statsd.clients import statsd
from elasticsearch import Elasticsearch
log = logging.getLogger('z.es')
DEFAULT_HOSTS = ['localhost:9200']
DEFAULT_TIMEOUT = 5
DEFAULT_INDEXES = ['default']
DEFAULT_DUMP_CURL = None
def get_es(hosts=None, timeout=None, **settings):
"""Create an ES object and return it."""
# Cheap way of de-None-ifying things
hosts = hosts or getattr(dj_settings, 'ES_HOSTS', DEFAULT_HOSTS)
timeout = (timeout if timeout is not None else
getattr(dj_settings, 'ES_TIMEOUT', DEFAULT_TIMEOUT))
return Elasticsearch(hosts, timeout=timeout, **settings)
class ES(object):
def __init__(self, type_, index):
self.type = type_
self.index = index
self.steps = []
self.start = 0
self.stop = None
self.as_list = self.as_dict = False
self._results_cache = None
def _clone(self, next_step=None):
new = self.__class__(self.type, self.index)
new.steps = list(self.steps)
if next_step:
new.steps.append(next_step)
new.start = self.start
new.stop = self.stop
return new
def values(self, *fields):
return self._clone(next_step=('values', fields))
def values_dict(self, *fields):
return self._clone(next_step=('values_dict', fields))
def order_by(self, *fields):
return self._clone(next_step=('order_by', fields))
def query(self, **kw):
return self._clone(next_step=('query', kw.items()))
def filter(self, **kw):
return self._clone(next_step=('filter', kw.items()))
def facet(self, **kw):
return self._clone(next_step=('facet', kw.items()))
def source(self, *fields):
return self._clone(next_step=('source', fields))
def extra(self, **kw):
new = self._clone()
actions = 'values values_dict order_by query filter facet'.split()
for key, vals in kw.items():
assert key in actions
if hasattr(vals, 'items'):
new.steps.append((key, vals.items()))
else:
new.steps.append((key, vals))
return new
def count(self):
if self._results_cache:
return self._results_cache.count
else:
return self[:0].raw()['hits']['total']
def __len__(self):
return len(self._do_search())
def __getitem__(self, k):
new = self._clone()
# TODO: validate numbers and ranges
if isinstance(k, slice):
new.start, new.stop = k.start or 0, k.stop
return new
else:
new.start, new.stop = k, k + 1
return list(new)[0]
def _build_query(self):
filters = []
queries = []
sort = []
fields = ['id']
source = []
facets = {}
as_list = as_dict = False
for action, value in self.steps:
if action == 'order_by':
for key in value:
if key.startswith('-'):
sort.append({key[1:]: 'desc'})
else:
sort.append(key)
elif action == 'values':
fields.extend(value)
as_list, as_dict = True, False
elif action == 'values_dict':
if not value:
fields = []
else:
fields.extend(value)
as_list, as_dict = False, True
elif action == 'query':
queries.extend(self._process_queries(value))
elif action == 'filter':
filters.extend(self._process_filters(value))
elif action == 'source':
source.extend(value)
elif action == 'facet':
facets.update(value)
else:
raise NotImplementedError(action)
if len(queries) > 1:
qs = {'bool': {'must': queries}}
elif queries:
qs = queries[0]
else:
qs = {"match_all": {}}
qs = {
"function_score": {
"query": qs,
"functions": [{"field_value_factor": {"field": "boost"}}]
}
}
if filters:
if len(filters) > 1:
filters = {"and": filters}
qs = {
"filtered": {
"query": qs,
"filter": filters
}
}
body = {"query": qs}
if sort:
body['sort'] = sort
if self.start:
body['from'] = self.start
if self.stop is not None:
body['size'] = self.stop - self.start
if facets:
body['facets'] = facets
if fields:
body['fields'] = fields
# As per version 1.0, ES has deprecated loading fields not stored from
# '_source', plus non leaf fields are not allowed in fields.
if source:
body['_source'] = source
self.fields, self.as_list, self.as_dict = fields, as_list, as_dict
return body
def _split(self, string):
if '__' in string:
return string.rsplit('__', 1)
else:
return string, None
def _process_filters(self, value):
rv = []
value = dict(value)
or_ = value.pop('or_', [])
for key, val in value.items():
key, field_action = self._split(key)
if field_action is None:
rv.append({'term': {key: val}})
if field_action == 'in':
rv.append({'in': {key: val}})
elif field_action in ('gt', 'gte', 'lt', 'lte'):
rv.append({'range': {key: {field_action: val}}})
elif field_action == 'range':
from_, to = val
rv.append({'range': {key: {'gte': from_, 'lte': to}}})
if or_:
rv.append({'or': self._process_filters(or_.items())})
return rv
def _process_queries(self, value):
rv = []
value = dict(value)
or_ = value.pop('or_', [])
for key, val in value.items():
key, field_action = self._split(key)
if field_action is None:
rv.append({'term': {key: val}})
elif field_action in ('text', 'match'):
rv.append({'match': {key: val}})
elif field_action in ('prefix', 'startswith'):
rv.append({'prefix': {key: val}})
elif field_action in ('gt', 'gte', 'lt', 'lte'):
rv.append({'range': {key: {field_action: val}}})
elif field_action == 'fuzzy':
rv.append({'fuzzy': {key: val}})
if or_:
rv.append({'bool': {'should': self._process_queries(or_.items())}})
return rv
def _do_search(self):
if not self._results_cache:
hits = self.raw()
if self.as_dict:
ResultClass = DictSearchResults
elif self.as_list:
ResultClass = ListSearchResults
else:
ResultClass = ObjectSearchResults
self._results_cache = ResultClass(self.type, hits, self.fields)
return self._results_cache
def raw(self):
build_body = self._build_query()
es = get_es()
try:
with statsd.timer('search.es.timer') as timer:
hits = es.search(
body=build_body,
index=self.index,
doc_type=self.type._meta.db_table
)
except Exception:
log.error(build_body)
raise
statsd.timing('search.es.took', hits['took'])
log.debug('[%s] [%s] %s' % (hits['took'], timer.ms, build_body))
return hits
def __iter__(self):
return iter(self._do_search())
def raw_facets(self):
return self._do_search().results.get('facets', {})
@property
def facets(self):
facets = {}
for key, val in self.raw_facets().items():
if val['_type'] == 'terms':
facets[key] = [v for v in val['terms']]
elif val['_type'] == 'range':
facets[key] = [v for v in val['ranges']]
return facets
class SearchResults(object):
def __init__(self, type, results, fields):
self.type = type
self.took = results['took']
self.count = results['hits']['total']
self.results = results
self.fields = fields
self.set_objects(results['hits']['hits'])
def set_objects(self, hits):
raise NotImplementedError()
def __iter__(self):
return iter(self.objects)
def __len__(self):
return len(self.objects)
class DictSearchResults(SearchResults):
def set_objects(self, hits):
objs = []
if self.fields:
# When fields are specified in `values_dict(...)` we return the
# fields. Each field is coerced to a list to match the
# Elasticsearch >= 1.0 style.
for h in hits:
hit = {}
fields = h['fields']
# If source is returned, it means that it has been asked, so
# take it.
if '_source' in h:
fields.update(h['_source'])
for field, value in fields.items():
if type(value) != list:
value = [value]
hit[field] = value
objs.append(hit)
self.objects = objs
else:
self.objects = [r['_source'] for r in hits]
return self.objects
class ListSearchResults(SearchResults):
def set_objects(self, hits):
key = 'fields' if self.fields else '_source'
# When fields are specified in `values(...)` we return the fields. Each
# field is coerced to a list to match the Elasticsearch >= 1.0 style.
objs = []
for hit in hits:
objs.append(tuple([v] if key == 'fields' and type(v) != list else v
for v in hit[key].values()))
self.objects = objs
class ObjectSearchResults(SearchResults):
def set_objects(self, hits):
self.ids = [int(r['_id']) for r in hits]
self.objects = self.type.objects.filter(id__in=self.ids)
def __iter__(self):
objs = dict((obj.id, obj) for obj in self.objects)
return (objs[id] for id in self.ids if id in objs)
| bsd-3-clause | 4,121,856,612,253,812,000 | -8,752,467,821,246,583,000 | 30.241176 | 79 | 0.501318 | false |
sankhesh/VTK | Filters/Hybrid/Testing/Python/TestPCA.py | 20 | 8348 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This example shows how to visualise the variation in shape in a set of objects using
# vtkPCAAnalysisFilter.
#
# We make three ellipsoids by distorting and translating a sphere and then align them together
# using vtkProcrustesAlignmentFilter, and then pass the output to vtkPCAAnalysisFilter. We visualise
# the first and second modes - the major sources of variation that were in the training set.
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(36)
sphere.SetThetaResolution(36)
sphere.Update()
# make two copies of the shape and distort them a little
transform1 = vtk.vtkTransform()
transform1.Translate(0.2,0.1,0.3)
transform1.Scale(1.3,1.1,0.8)
transform2 = vtk.vtkTransform()
transform2.Translate(0.3,0.7,0.1)
transform2.Scale(1.0,0.1,1.8)
transformer1 = vtk.vtkTransformPolyDataFilter()
transformer1.SetInputConnection(sphere.GetOutputPort())
transformer1.SetTransform(transform1)
transformer1.Update()
transformer2 = vtk.vtkTransformPolyDataFilter()
transformer2.SetInputConnection(sphere.GetOutputPort())
transformer2.SetTransform(transform2)
transformer2.Update()
#------------------------------------------------------------------
# map these three shapes into the first renderer
#------------------------------------------------------------------
map1a = vtk.vtkPolyDataMapper()
map1a.SetInputConnection(sphere.GetOutputPort())
Actor1a = vtk.vtkActor()
Actor1a.SetMapper(map1a)
Actor1a.GetProperty().SetDiffuseColor(1.0000,0.3882,0.2784)
map1b = vtk.vtkPolyDataMapper()
map1b.SetInputConnection(transformer1.GetOutputPort())
Actor1b = vtk.vtkActor()
Actor1b.SetMapper(map1b)
Actor1b.GetProperty().SetDiffuseColor(0.3882,1.0000,0.2784)
map1c = vtk.vtkPolyDataMapper()
map1c.SetInputConnection(transformer2.GetOutputPort())
Actor1c = vtk.vtkActor()
Actor1c.SetMapper(map1c)
Actor1c.GetProperty().SetDiffuseColor(0.3882,0.2784,1.0000)
#------------------------------------------------------------------
# align the shapes using Procrustes (using SetModeToRigidBody)
# and map the aligned shapes into the second renderer
#------------------------------------------------------------------
group = vtk.vtkMultiBlockDataGroupFilter()
group.AddInputConnection(sphere.GetOutputPort())
group.AddInputConnection(transformer1.GetOutputPort())
group.AddInputConnection(transformer2.GetOutputPort())
procrustes = vtk.vtkProcrustesAlignmentFilter()
procrustes.SetInputConnection(group.GetOutputPort())
procrustes.GetLandmarkTransform().SetModeToRigidBody()
procrustes.Update()
map2a = vtk.vtkPolyDataMapper()
map2a.SetInputData(procrustes.GetOutput().GetBlock(0))
Actor2a = vtk.vtkActor()
Actor2a.SetMapper(map2a)
Actor2a.GetProperty().SetDiffuseColor(1.0000,0.3882,0.2784)
map2b = vtk.vtkPolyDataMapper()
map2b.SetInputData(procrustes.GetOutput().GetBlock(1))
Actor2b = vtk.vtkActor()
Actor2b.SetMapper(map2b)
Actor2b.GetProperty().SetDiffuseColor(0.3882,1.0000,0.2784)
map2c = vtk.vtkPolyDataMapper()
map2c.SetInputData(procrustes.GetOutput().GetBlock(2))
Actor2c = vtk.vtkActor()
Actor2c.SetMapper(map2c)
Actor2c.GetProperty().SetDiffuseColor(0.3882,0.2784,1.0000)
#------------------------------------------------------------------
# pass the output of Procrustes to vtkPCAAnalysisFilter
#------------------------------------------------------------------
pca = vtk.vtkPCAAnalysisFilter()
pca.SetInputConnection(procrustes.GetOutputPort())
pca.Update()
# we need to call Update because GetParameterisedShape is not
# part of the normal SetInput/GetOutput pipeline
#------------------------------------------------------------------
# map the first mode into the third renderer:
# -3,0,3 standard deviations on the first mode
# illustrate the extremes around the average shape
#------------------------------------------------------------------
params = vtk.vtkFloatArray()
params.SetNumberOfComponents(1)
params.SetNumberOfTuples(1)
params.SetTuple1(0,0.0)
shapea = vtk.vtkPolyData()
shapea.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapea)
normalsa = vtk.vtkPolyDataNormals()
normalsa.SetInputData(shapea)
map3a = vtk.vtkPolyDataMapper()
map3a.SetInputConnection(normalsa.GetOutputPort())
Actor3a = vtk.vtkActor()
Actor3a.SetMapper(map3a)
Actor3a.GetProperty().SetDiffuseColor(1,1,1)
params.SetTuple1(0,-3.0)
shapeb = vtk.vtkPolyData()
shapeb.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapeb)
normalsb = vtk.vtkPolyDataNormals()
normalsb.SetInputData(shapeb)
map3b = vtk.vtkPolyDataMapper()
map3b.SetInputConnection(normalsb.GetOutputPort())
Actor3b = vtk.vtkActor()
Actor3b.SetMapper(map3b)
Actor3b.GetProperty().SetDiffuseColor(1,1,1)
params.SetTuple1(0,3.0)
shapec = vtk.vtkPolyData()
shapec.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapec)
normalsc = vtk.vtkPolyDataNormals()
normalsc.SetInputData(shapec)
map3c = vtk.vtkPolyDataMapper()
map3c.SetInputConnection(normalsc.GetOutputPort())
Actor3c = vtk.vtkActor()
Actor3c.SetMapper(map3c)
Actor3c.GetProperty().SetDiffuseColor(1,1,1)
#------------------------------------------------------------------
# map the second mode into the fourth renderer:
#------------------------------------------------------------------
params4 = vtk.vtkFloatArray()
params4.SetNumberOfComponents(1)
params4.SetNumberOfTuples(2)
params4.SetTuple1(0,0.0)
params4.SetTuple1(1,-3.0)
shape4a = vtk.vtkPolyData()
shape4a.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4a)
normals4a = vtk.vtkPolyDataNormals()
normals4a.SetInputData(shape4a)
map4a = vtk.vtkPolyDataMapper()
map4a.SetInputConnection(normals4a.GetOutputPort())
Actor4a = vtk.vtkActor()
Actor4a.SetMapper(map4a)
Actor4a.GetProperty().SetDiffuseColor(1,1,1)
params4.SetTuple1(1,0.0)
shape4b = vtk.vtkPolyData()
shape4b.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4b)
normals4b = vtk.vtkPolyDataNormals()
normals4b.SetInputData(shape4b)
map4b = vtk.vtkPolyDataMapper()
map4b.SetInputConnection(normals4b.GetOutputPort())
Actor4b = vtk.vtkActor()
Actor4b.SetMapper(map4b)
Actor4b.GetProperty().SetDiffuseColor(1,1,1)
params4.SetTuple1(1,3.0)
shape4c = vtk.vtkPolyData()
shape4c.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4c)
normals4c = vtk.vtkPolyDataNormals()
normals4c.SetInputData(shape4c)
map4c = vtk.vtkPolyDataMapper()
map4c.SetInputConnection(normals4c.GetOutputPort())
Actor4c = vtk.vtkActor()
Actor4c.SetMapper(map4c)
Actor4c.GetProperty().SetDiffuseColor(1,1,1)
#------------------------------------------------------------------
# Create the RenderWindow and its four Renderers
#------------------------------------------------------------------
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
ren3 = vtk.vtkRenderer()
ren4 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
renWin.AddRenderer(ren4)
renWin.SetSize(600,200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer
ren1.AddActor(Actor1a)
ren1.AddActor(Actor1b)
ren1.AddActor(Actor1c)
ren2.AddActor(Actor2a)
ren2.AddActor(Actor2b)
ren2.AddActor(Actor2c)
ren3.AddActor(Actor3a)
ren3.AddActor(Actor3b)
ren3.AddActor(Actor3c)
ren4.AddActor(Actor4a)
ren4.AddActor(Actor4b)
ren4.AddActor(Actor4c)
# set the properties of the renderers
ren1.SetBackground(1,1,1)
ren1.SetViewport(0.0,0.0,0.25,1.0)
ren1.ResetCamera()
ren1.GetActiveCamera().SetPosition(1,-1,0)
ren1.ResetCamera()
ren2.SetBackground(1,1,1)
ren2.SetViewport(0.25,0.0,0.5,1.0)
ren2.ResetCamera()
ren2.GetActiveCamera().SetPosition(1,-1,0)
ren2.ResetCamera()
ren3.SetBackground(1,1,1)
ren3.SetViewport(0.5,0.0,0.75,1.0)
ren3.ResetCamera()
ren3.GetActiveCamera().SetPosition(1,-1,0)
ren3.ResetCamera()
ren4.SetBackground(1,1,1)
ren4.SetViewport(0.75,0.0,1.0,1.0)
ren4.ResetCamera()
ren4.GetActiveCamera().SetPosition(1,-1,0)
ren4.ResetCamera()
# render the image
#
renWin.Render()
# output the image to file (used to generate the initial regression image)
#vtkWindowToImageFilter to_image
#to_image SetInput renWin
#vtkPNGWriter to_png
#to_png SetFileName "TestPCA.png"
#to_png SetInputConnection [to_image GetOutputPort]
#to_png Write
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause | -8,952,747,792,698,552,000 | -7,964,128,480,771,954,000 | 35.938053 | 100 | 0.727 | false |
svn2github/django | django/conf/locale/sr/formats.py | 655 | 1980 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause | 9,189,229,077,512,861,000 | -5,420,080,410,345,167,000 | 39.408163 | 77 | 0.45 | false |
IllusionRom-deprecated/android_platform_external_chromium_org | tools/perf/benchmarks/spaceport.py | 23 | 2422 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs spaceport.io's PerfMarks benchmark."""
import logging
import os
import sys
from telemetry import test
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_set
class _SpaceportMeasurement(page_measurement.PageMeasurement):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--disable-gpu-vsync')
def MeasurePage(self, _, tab, results):
tab.WaitForJavaScriptExpression(
'!document.getElementById("start-performance-tests").disabled', 60)
tab.ExecuteJavaScript("""
window.__results = {};
window.console.log = function(str) {
if (!str) return;
var key_val = str.split(': ');
if (!key_val.length == 2) return;
__results[key_val[0]] = key_val[1];
};
document.getElementById('start-performance-tests').click();
""")
num_results = 0
num_tests_in_spaceport = 24
while num_results < num_tests_in_spaceport:
tab.WaitForJavaScriptExpression(
'Object.keys(window.__results).length > %d' % num_results, 180)
num_results = tab.EvaluateJavaScript(
'Object.keys(window.__results).length')
logging.info('Completed test %d of %d' %
(num_results, num_tests_in_spaceport))
result_dict = eval(tab.EvaluateJavaScript(
'JSON.stringify(window.__results)'))
for key in result_dict:
chart, trace = key.split('.', 1)
results.Add(trace, 'objects (bigger is better)', float(result_dict[key]),
chart_name=chart, data_type='unimportant')
results.Add('Score', 'objects (bigger is better)',
[float(x) for x in result_dict.values()])
class Spaceport(test.Test):
"""spaceport.io's PerfMarks benchmark."""
test = _SpaceportMeasurement
# crbug.com/166703: This test frequently times out on Windows.
enabled = sys.platform != 'darwin' and not sys.platform.startswith('win')
def CreatePageSet(self, options):
spaceport_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test',
'data', 'third_party', 'spaceport')
return page_set.PageSet.FromDict(
{'pages': [{'url': 'file://index.html'}]},
spaceport_dir)
| bsd-3-clause | -857,680,268,471,962,100 | 150,231,253,019,693,980 | 34.617647 | 79 | 0.652766 | false |
ntt-sic/cinder | cinder/api/v2/router.py | 2 | 4033 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack Volume API.
"""
from cinder.api import extensions
import cinder.api.openstack
from cinder.api.v2 import limits
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
from cinder.api.v2 import types
from cinder.api.v2 import volume_metadata
from cinder.api.v2 import volumes
from cinder.api import versions
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class APIRouter(cinder.api.openstack.APIRouter):
"""Routes requests on the API to the appropriate controller and method."""
ExtensionManager = extensions.ExtensionManager
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='show')
mapper.redirect("", "/")
self.resources['volumes'] = volumes.create_resource(ext_mgr)
mapper.resource("volume", "volumes",
controller=self.resources['volumes'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['types'] = types.create_resource()
mapper.resource("type", "types",
controller=self.resources['types'])
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
mapper.resource("snapshot", "snapshots",
controller=self.resources['snapshots'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['limits'] = limits.create_resource()
mapper.resource("limit", "limits",
controller=self.resources['limits'])
self.resources['snapshot_metadata'] = \
snapshot_metadata.create_resource()
snapshot_metadata_controller = self.resources['snapshot_metadata']
mapper.resource("snapshot_metadata", "metadata",
controller=snapshot_metadata_controller,
parent_resource=dict(member_name='snapshot',
collection_name='snapshots'))
mapper.connect("metadata",
"/{project_id}/snapshots/{snapshot_id}/metadata",
controller=snapshot_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
self.resources['volume_metadata'] = \
volume_metadata.create_resource()
volume_metadata_controller = self.resources['volume_metadata']
mapper.resource("volume_metadata", "metadata",
controller=volume_metadata_controller,
parent_resource=dict(member_name='volume',
collection_name='volumes'))
mapper.connect("metadata",
"/{project_id}/volumes/{volume_id}/metadata",
controller=volume_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
| apache-2.0 | 6,046,381,956,070,604,000 | 1,635,728,267,227,787,300 | 39.737374 | 78 | 0.60972 | false |
sriprasanna/django-1.3.1 | tests/regressiontests/get_or_create_regress/tests.py | 88 | 2540 | from django.test import TestCase
from models import Author, Publisher
class GetOrCreateTests(TestCase):
def test_related(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
| bsd-3-clause | 7,249,650,782,695,270,000 | 4,871,913,870,109,406,000 | 38.6875 | 91 | 0.64252 | false |
kmoocdev/edx-platform | common/lib/xmodule/xmodule/tests/test_word_cloud.py | 28 | 1793 | # -*- coding: utf-8 -*-
"""Test for Word cloud Xmodule functional logic."""
from webob.multidict import MultiDict
from xmodule.word_cloud_module import WordCloudDescriptor
from . import LogicTest
class WordCloudModuleTest(LogicTest):
"""Logic tests for Word Cloud Xmodule."""
descriptor_class = WordCloudDescriptor
raw_field_data = {
'all_words': {'cat': 10, 'dog': 5, 'mom': 1, 'dad': 2},
'top_words': {'cat': 10, 'dog': 5, 'dad': 2},
'submitted': False
}
def test_bad_ajax_request(self):
"Make sure that answer for incorrect request is error json"
response = self.ajax_request('bad_dispatch', {})
self.assertDictEqual(response, {
'status': 'fail',
'error': 'Unknown Command!'
})
def test_good_ajax_request(self):
"Make shure that ajax request works correctly"
post_data = MultiDict(('student_words[]', word) for word in ['cat', 'cat', 'dog', 'sun'])
response = self.ajax_request('submit', post_data)
self.assertEqual(response['status'], 'success')
self.assertEqual(response['submitted'], True)
self.assertEqual(response['total_count'], 22)
self.assertDictEqual(
response['student_words'],
{'sun': 1, 'dog': 6, 'cat': 12}
)
self.assertListEqual(
response['top_words'],
[{'text': 'dad', 'size': 2, 'percent': 9.0},
{'text': 'sun', 'size': 1, 'percent': 5.0},
{'text': 'dog', 'size': 6, 'percent': 27.0},
{'text': 'mom', 'size': 1, 'percent': 5.0},
{'text': 'cat', 'size': 12, 'percent': 54.0}]
)
self.assertEqual(
100.0,
sum(i['percent'] for i in response['top_words']))
| agpl-3.0 | -226,261,857,996,854,720 | -5,406,016,593,810,160,000 | 36.354167 | 97 | 0.552705 | false |
stu314159/pyNFC | partition_suggestion.py | 1 | 5450 | """
partition_suggestion.py
purpose: Given Nx, Ny, Nz and a number of processes, suggest a partitioning strategy that would result in
more-or-less cube-shaped partitions
"""
import random
from collections import deque
def random_partition(factors,n_factors):
"""
factors = list of prime factors of a number
n_factors = three-tuple [#px, #py, #pz] indicating the number
of prime factors that should be chosen (at random) for each direction
returns [px,py,pz]
"""
l_factors = factors[:] # make a local copy
p_list = [1,1,1]
for d in range(3):
for i in range(n_factors[d]):
c = random.choice(l_factors)
l_factors.remove(c)
p_list[d]*=c
return p_list
class Partition:
def __init__(self,Nx,Ny,Nz,part):
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.px = part[0]
self.py = part[1]
self.pz = part[2]
self.set_score()
def set_score(self):
lx = self.Nx/float(self.px)
ly = self.Ny/float(self.py)
lz = self.Nz/float(self.pz)
# obviously, change this measure if it proves crappy.
# estimate surface to volume ratio of the typical partition
vol = lx*ly*lz
surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
self.score = surf/vol
#vol = lx*ly*lz
#surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
#interior = vol - surf
#self.score = (surf/interior)
def get_score(self):
return self.score
def get_partition(self):
return [self.px, self.py, self.pz]
def partitionfunc(n,k,l=1):
'''n is the integer to partition, k is the length of partitions, l is the min partition element size'''
if k < 1:
raise StopIteration
if k == 1:
if n >= l:
yield (n,)
raise StopIteration
for i in range(l,n+1):
for result in partitionfunc(n-i,k-1,i):
yield (i,)+result
def primes(n):
"""
return a list containing the prime factorization of positive integer n
n = positive integer
"""
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def factors(n):
"""
n = positive integer
returns a list of the factors of n in (more-or-less) standard form
"""
return filter(lambda i: n % i == 0, range(1, n + 1))
def part_advisor(Nx,Ny,Nz,num_procs, numTrials = 2000):
"""
Nx = number of points in the x-direction
Ny = number of points in the y-direction
Nz = number of points in the z-direction
num_procs = the number of partitions to create
returns a suggested px,py,pz
"""
p_facts = primes(num_procs)
p_facts.append(1)
p_facts.append(1) # to allow effectively 1-D partitioning if that is best....
bestScore = float("inf")
bestPartition = None
#numTrials = 4000 # not clear to me how big this should be...
for p in partitionfunc(len(p_facts),3):
#print p
"""
set up some partitions and keep track of the one with the best score.
"""
p_deque = deque(p);
for i in range(3):
p_deque.rotate(1) #shift the groupings
# take numTrials samples
for trial in range(numTrials):
r_part = random_partition(p_facts,p_deque)
sample_partition = Partition(Nx,Ny,Nz,r_part)
sample_score = sample_partition.get_score()
if sample_score < bestScore:
bestPartition = Partition(Nx,Ny,Nz,r_part)
return bestPartition.get_partition()
"""
partitionfunc will let me generate groupings of the prime factors
"""
"""
if there are fewer than 3 prime factors, then there is no way to solve
the problem; an error should be returned and the user should be prompted
to provide a value for num_procs that has more prime factors.
"""
if len(p_facts)<3:
print 'Error! num_procs is prime and cannot be used for 3D partitioning'
raise RuntimeError
print p_facts
"""
concept: use the prime factors listed in p_facts
and put them into 3 groups such that, as close as possible,
Nx/g1, Ny/g2 and Nz/g3 are nearly equal.
To do this, for each grouping, I will compute the variance of the partition dimensions.
I will then select the grouping that has the lowest variance.
1. Enumerate all of the possible groupings of the prime factors.
2. Compute the partition dimension variance for each grouping
3. Pick the smallest one.
"""
if __name__=="__main__":
"""
write test code here...
"""
Nx = 150
Ny = 150
Nz = 1000
num_procs = 8
partition = part_advisor(Nx,Ny,Nz,num_procs)
bestPartition = Partition(Nx,Ny,Nz,partition)
print 'Best partition found has score = %g \n'%bestPartition.get_score()
print bestPartition.get_partition()
print 'Block sizes approximately %i x %i x %i'%(Nx/partition[0],Ny/partition[1],Nz/partition[2])
| mit | -7,052,538,094,187,769,000 | 970,659,899,947,123,800 | 25.715686 | 107 | 0.572661 | false |
Softmotions/edx-platform | lms/djangoapps/instructor/tests/test_api_email_localization.py | 64 | 3576 | # -*- coding: utf-8 -*-
"""
Unit tests for the localization of emails sent by instructor.api methods.
"""
from django.core import mail
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from courseware.tests.factories import InstructorFactory
from lang_pref import LANGUAGE_KEY
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
@attr('shard_1')
class TestInstructorAPIEnrollmentEmailLocalization(SharedModuleStoreTestCase):
"""
Test whether the enroll, unenroll and beta role emails are sent in the
proper language, i.e: the student's language.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollmentEmailLocalization, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPIEnrollmentEmailLocalization, self).setUp()
# Platform language is English, instructor's language is Chinese,
# student's language is French, so the emails should all be sent in
# French.
self.instructor = InstructorFactory(course_key=self.course.id)
set_user_preference(self.instructor, LANGUAGE_KEY, 'zh-cn')
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory.create()
set_user_preference(self.student, LANGUAGE_KEY, 'fr')
def update_enrollement(self, action, student_email):
"""
Update the current student enrollment status.
"""
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
args = {'identifiers': student_email, 'email_students': 'true', 'action': action, 'reason': 'testing'}
response = self.client.post(url, args)
return response
def check_outbox_is_french(self):
"""
Check that the email outbox contains exactly one message for which both
the message subject and body contain a certain French string.
"""
return self.check_outbox(u"Vous avez été")
def check_outbox(self, expected_message):
"""
Check that the email outbox contains exactly one message for which both
the message subject and body contain a certain string.
"""
self.assertEqual(1, len(mail.outbox))
self.assertIn(expected_message, mail.outbox[0].subject)
self.assertIn(expected_message, mail.outbox[0].body)
def test_enroll(self):
self.update_enrollement("enroll", self.student.email)
self.check_outbox_is_french()
def test_unenroll(self):
CourseEnrollment.enroll(
self.student,
self.course.id
)
self.update_enrollement("unenroll", self.student.email)
self.check_outbox_is_french()
def test_set_beta_role(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.client.post(url, {'identifiers': self.student.email, 'action': 'add', 'email_students': 'true'})
self.check_outbox_is_french()
def test_enroll_unsubscribed_student(self):
# Student is unknown, so the platform language should be used
self.update_enrollement("enroll", "newuser@hotmail.com")
self.check_outbox("You have been")
| agpl-3.0 | 3,744,077,181,390,374,400 | -3,543,775,593,661,486,600 | 38.274725 | 112 | 0.691382 | false |
hubertokf/lupsEdgeServer | projects/lupsEdgeServer/EngineRules/setuptools-20.10.1/setuptools/lib2to3_ex.py | 907 | 1998 | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| mit | 4,799,978,503,090,113,000 | 8,400,670,697,184,610,000 | 33.448276 | 77 | 0.638639 | false |
domsooch/mapreduce-python | mapreduce/handlers.py | 4 | 68593 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# pylint: disable=protected-access
# pylint: disable=g-bad-name
import datetime
import logging
import math
import os
import random
import sys
import time
import traceback
from mapreduce.third_party import simplejson
from google.appengine.ext import ndb
from google.appengine import runtime
from google.appengine.api import datastore_errors
from google.appengine.api import logservice
from google.appengine.api import modules
from google.appengine.api import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import errors
from mapreduce import input_readers
from mapreduce import model
from mapreduce import operation
from mapreduce import output_writers
from mapreduce import parameters
from mapreduce import util
from mapreduce.api import map_job
from mapreduce.api.map_job import shard_life_cycle
from google.appengine.runtime import apiproxy_errors
# pylint: disable=g-import-not-at-top
try:
import cloudstorage
# In 25 runtime, the above code will be scrubbed to import the stub version
# of cloudstorage. All occurences of the following if condition in MR
# codebase is to tell it apart.
# TODO(user): Remove after 25 runtime MR is abondoned.
if hasattr(cloudstorage, "_STUB"):
cloudstorage = None
except ImportError:
cloudstorage = None # CloudStorage library not available
# A guide to logging.
# log.critical: messages user absolutely should see, e.g. failed job.
# log.error: exceptions during processing user data, or unexpected
# errors detected by mr framework.
# log.warning: errors mr framework knows how to handle.
# log.info: other expected events.
# Set of strings of various test-injected faults.
_TEST_INJECTED_FAULTS = set()
def _run_task_hook(hooks, method, task, queue_name):
"""Invokes hooks.method(task, queue_name).
Args:
hooks: A hooks.Hooks instance or None.
method: The name of the method to invoke on the hooks class e.g.
"enqueue_kickoff_task".
task: The taskqueue.Task to pass to the hook method.
queue_name: The name of the queue to pass to the hook method.
Returns:
True if the hooks.Hooks instance handled the method, False otherwise.
"""
if hooks is not None:
try:
getattr(hooks, method)(task, queue_name)
except NotImplementedError:
# Use the default task addition implementation.
return False
return True
return False
class MapperWorkerCallbackHandler(base_handler.HugeTaskHandler):
"""Callback handler for mapreduce worker task."""
# These directives instruct self.__return() how to set state and enqueue task.
_TASK_DIRECTIVE = util._enum(
# Task is running as expected.
PROCEED_TASK="proceed_task",
# Need to retry task. Lock was NOT acquired when the error occur.
# Don't change payload or datastore.
RETRY_TASK="retry_task",
# Need to retry task. Lock was acquired when the error occurr.
# Don't change payload or datastore.
RETRY_SLICE="retry_slice",
# Drop the task (due to duplicated task). Must log permanent drop.
DROP_TASK="drop_task",
# See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery.
RECOVER_SLICE="recover_slice",
# Need to retry the shard.
RETRY_SHARD="retry_shard",
# Need to drop task and fail the shard. Log permanent failure.
FAIL_TASK="fail_task",
# Need to abort the shard.
ABORT_SHARD="abort_shard")
def __init__(self, *args):
"""Constructor."""
super(MapperWorkerCallbackHandler, self).__init__(*args)
self._time = time.time
def _drop_gracefully(self):
"""Drop worker task gracefully.
Set current shard_state to failed. Controller logic will take care of
other shards and the entire MR.
"""
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
shard_state, mr_state = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceState.get_key_by_job_id(mr_id)])
if shard_state and shard_state.active:
shard_state.set_for_failure()
config = util.create_datastore_write_config(mr_state.mapreduce_spec)
shard_state.put(config=config)
def _try_acquire_lease(self, shard_state, tstate):
"""Validate datastore and the task payload are consistent.
If so, attempt to get a lease on this slice's execution.
See model.ShardState doc on slice_start_time.
Args:
shard_state: model.ShardState from datastore.
tstate: model.TransientShardState from taskqueue paylod.
Returns:
A _TASK_DIRECTIVE enum. PROCEED_TASK if lock is acquired.
RETRY_TASK if task should be retried, DROP_TASK if task should
be dropped. Only old tasks (comparing to datastore state)
will be dropped. Future tasks are retried until they naturally
become old so that we don't ever stuck MR.
"""
# Controller will tally shard_states and properly handle the situation.
if not shard_state:
logging.warning("State not found for shard %s; Possible spurious task "
"execution. Dropping this task.",
tstate.shard_id)
return self._TASK_DIRECTIVE.DROP_TASK
if not shard_state.active:
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
# Validate shard retry count.
if shard_state.retries > tstate.retries:
logging.warning(
"Got shard %s from previous shard retry %s. Possible spurious "
"task execution. Dropping this task.",
tstate.shard_id,
tstate.retries)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
elif shard_state.retries < tstate.retries:
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass.
logging.warning(
"ShardState for %s is behind slice. Waiting for it to catch up",
shard_state.shard_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Validate slice id.
# Taskqueue executes old successful tasks.
if shard_state.slice_id > tstate.slice_id:
logging.warning(
"Task %s-%s is behind ShardState %s. Dropping task.""",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.DROP_TASK
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass. User data is duplicated in this case.
elif shard_state.slice_id < tstate.slice_id:
logging.warning(
"Task %s-%s is ahead of ShardState %s. Waiting for it to catch up.",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Check potential duplicated tasks for the same slice.
# See model.ShardState doc.
if shard_state.slice_start_time:
countdown = self._wait_time(shard_state,
parameters._LEASE_DURATION_SEC)
if countdown > 0:
logging.warning(
"Last retry of slice %s-%s may be still running."
"Will try again in %s seconds", tstate.shard_id, tstate.slice_id,
countdown)
# TODO(user): There might be a better way. Taskqueue's countdown
# only applies to add new tasks, not retry of tasks.
# Reduce contention.
time.sleep(countdown)
return self._TASK_DIRECTIVE.RETRY_TASK
# lease could have expired. Verify with logs API.
else:
if self._wait_time(shard_state,
parameters._MAX_LEASE_DURATION_SEC):
if not self._has_old_request_ended(shard_state):
logging.warning(
"Last retry of slice %s-%s is still in flight with request_id "
"%s. Will try again later.", tstate.shard_id, tstate.slice_id,
shard_state.slice_request_id)
return self._TASK_DIRECTIVE.RETRY_TASK
else:
logging.warning(
"Last retry of slice %s-%s has no log entry and has"
"timed out after %s seconds",
tstate.shard_id, tstate.slice_id,
parameters._MAX_LEASE_DURATION_SEC)
# Lease expired or slice_start_time not set.
config = util.create_datastore_write_config(tstate.mapreduce_spec)
@db.transactional(retries=5)
def _tx():
"""Use datastore to set slice_start_time to now.
If failed for any reason, raise error to retry the task (hence all
the previous validation code). The task would die naturally eventually.
Raises:
Rollback: If the shard state is missing.
Returns:
A _TASK_DIRECTIVE enum.
"""
fresh_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_state:
logging.warning("ShardState missing.")
raise db.Rollback()
if (fresh_state.active and
fresh_state.slice_id == shard_state.slice_id and
fresh_state.slice_start_time == shard_state.slice_start_time):
shard_state.slice_start_time = datetime.datetime.now()
shard_state.slice_request_id = os.environ.get("REQUEST_LOG_ID")
shard_state.acquired_once = True
shard_state.put(config=config)
return self._TASK_DIRECTIVE.PROCEED_TASK
else:
logging.warning(
"Contention on slice %s-%s execution. Will retry again.",
tstate.shard_id, tstate.slice_id)
# One proposer should win. In case all lost, back off arbitrarily.
time.sleep(random.randrange(1, 5))
return self._TASK_DIRECTIVE.RETRY_TASK
return _tx()
def _has_old_request_ended(self, shard_state):
"""Whether previous slice retry has ended according to Logs API.
Args:
shard_state: shard state.
Returns:
True if the request of previous slice retry has ended. False if it has
not or unknown.
"""
assert shard_state.slice_start_time is not None
assert shard_state.slice_request_id is not None
request_ids = [shard_state.slice_request_id]
logs = list(logservice.fetch(
request_ids=request_ids,
# TODO(user): Remove after b/8173230 is fixed.
module_versions=[(os.environ["CURRENT_MODULE_ID"],
modules.get_current_version_name())]))
if not logs or not logs[0].finished:
return False
return True
def _wait_time(self, shard_state, secs, now=datetime.datetime.now):
"""Time to wait until slice_start_time is secs ago from now.
Args:
shard_state: shard state.
secs: duration in seconds.
now: a func that gets now.
Returns:
0 if no wait. A positive int in seconds otherwise. Always around up.
"""
assert shard_state.slice_start_time is not None
delta = now() - shard_state.slice_start_time
duration = datetime.timedelta(seconds=secs)
if delta < duration:
return util.total_seconds(duration - delta)
else:
return 0
def _try_free_lease(self, shard_state, slice_retry=False):
"""Try to free lease.
A lightweight transaction to update shard_state and unset
slice_start_time to allow the next retry to happen without blocking.
We don't care if this fails or not because the lease will expire
anyway.
Under normal execution, _save_state_and_schedule_next is the exit point.
It updates/saves shard state and schedules the next slice or returns.
Other exit points are:
1. _are_states_consistent: at the beginning of handle, checks
if datastore states and the task are in sync.
If not, raise or return.
2. _attempt_slice_retry: may raise exception to taskqueue.
3. _save_state_and_schedule_next: may raise exception when taskqueue/db
unreachable.
This handler should try to free the lease on every exceptional exit point.
Args:
shard_state: model.ShardState.
slice_retry: whether to count this as a failed slice execution.
"""
@db.transactional
def _tx():
fresh_state = model.ShardState.get_by_shard_id(shard_state.shard_id)
if fresh_state and fresh_state.active:
# Free lease.
fresh_state.slice_start_time = None
fresh_state.slice_request_id = None
if slice_retry:
fresh_state.slice_retries += 1
fresh_state.put()
try:
_tx()
# pylint: disable=broad-except
except Exception, e:
logging.warning(e)
logging.warning(
"Release lock for shard %s failed. Wait for lease to expire.",
shard_state.shard_id)
def _maintain_LC(self, obj, slice_id, last_slice=False, begin_slice=True,
shard_ctx=None, slice_ctx=None):
"""Makes sure shard life cycle interface are respected.
Args:
obj: the obj that may have implemented _ShardLifeCycle.
slice_id: current slice_id
last_slice: whether this is the last slice.
begin_slice: whether this is the beginning or the end of a slice.
shard_ctx: shard ctx for dependency injection. If None, it will be read
from self.
slice_ctx: slice ctx for dependency injection. If None, it will be read
from self.
"""
if obj is None or not isinstance(obj, shard_life_cycle._ShardLifeCycle):
return
shard_context = shard_ctx or self.shard_context
slice_context = slice_ctx or self.slice_context
if begin_slice:
if slice_id == 0:
obj.begin_shard(shard_context)
obj.begin_slice(slice_context)
else:
obj.end_slice(slice_context)
if last_slice:
obj.end_shard(shard_context)
def handle(self):
"""Handle request.
This method has to be careful to pass the same ShardState instance to
its subroutines calls if the calls mutate or read from ShardState.
Note especially that Context instance caches and updates the ShardState
instance.
Returns:
Set HTTP status code and always returns None.
"""
# Reconstruct basic states.
self._start_time = self._time()
shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER]
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
spec = model.MapreduceSpec._get_mapreduce_spec(mr_id)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(mr_id),
])
# Set context before any IO code is called.
ctx = context.Context(spec, shard_state,
task_retry_count=self.task_retry_count())
context.Context._set(ctx)
# Unmarshall input reader, output writer, and other transient states.
tstate = model.TransientShardState.from_request(self.request)
# Try acquire a lease on the shard.
if shard_state:
is_this_a_retry = shard_state.acquired_once
task_directive = self._try_acquire_lease(shard_state, tstate)
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return self.__return(shard_state, tstate, task_directive)
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
# Abort shard if received signal.
if control and control.command == model.MapreduceControl.ABORT:
task_directive = self._TASK_DIRECTIVE.ABORT_SHARD
return self.__return(shard_state, tstate, task_directive)
# Retry shard if user disabled slice retry.
if (is_this_a_retry and
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS <= 1):
task_directive = self._TASK_DIRECTIVE.RETRY_SHARD
return self.__return(shard_state, tstate, task_directive)
# TODO(user): Find a better way to set these per thread configs.
# E.g. what if user change it?
util._set_ndb_cache_policy()
job_config = map_job.JobConfig._to_map_job_config(
spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
job_context = map_job.JobContext(job_config)
self.shard_context = map_job.ShardContext(job_context, shard_state)
self.slice_context = map_job.SliceContext(self.shard_context,
shard_state,
tstate)
try:
slice_id = tstate.slice_id
self._maintain_LC(tstate.handler, slice_id)
self._maintain_LC(tstate.input_reader, slice_id)
self._maintain_LC(tstate.output_writer, slice_id)
if is_this_a_retry:
task_directive = self._attempt_slice_recovery(shard_state, tstate)
if task_directive != self._TASK_DIRECTIVE.PROCEED_TASK:
return self.__return(shard_state, tstate, task_directive)
last_slice = self._process_inputs(
tstate.input_reader, shard_state, tstate, ctx)
self._maintain_LC(tstate.handler, slice_id, last_slice, False)
self._maintain_LC(tstate.input_reader, slice_id, last_slice, False)
self._maintain_LC(tstate.output_writer, slice_id, last_slice, False)
ctx.flush()
if last_slice:
# Since there was no exception raised, we can finalize output writer
# safely. Otherwise writer might be stuck in some bad state.
if (tstate.output_writer and
isinstance(tstate.output_writer, output_writers.OutputWriter)):
# It's possible that finalization is successful but
# saving state failed. In this case this shard will retry upon
# finalization error.
# TODO(user): make finalize method idempotent!
tstate.output_writer.finalize(ctx, shard_state)
shard_state.set_for_success()
# pylint: disable=broad-except
except Exception, e:
logging.warning("Shard %s got error.", shard_state.shard_id)
logging.error(traceback.format_exc())
# Fail fast.
if type(e) is errors.FailJobError:
logging.error("Got FailJobError.")
task_directive = self._TASK_DIRECTIVE.FAIL_TASK
else:
task_directive = self._TASK_DIRECTIVE.RETRY_SLICE
self.__return(shard_state, tstate, task_directive)
def __return(self, shard_state, tstate, task_directive):
"""Handler should always call this as the last statement."""
task_directive = self._set_state(shard_state, tstate, task_directive)
self._save_state_and_schedule_next(shard_state, tstate, task_directive)
def _process_inputs(self,
input_reader,
shard_state,
tstate,
ctx):
"""Read inputs, process them, and write out outputs.
This is the core logic of MapReduce. It reads inputs from input reader,
invokes user specified mapper function, and writes output with
output writer. It also updates shard_state accordingly.
e.g. if shard processing is done, set shard_state.active to False.
If errors.FailJobError is caught, it will fail this MR job.
All other exceptions will be logged and raised to taskqueue for retry
until the number of retries exceeds a limit.
Args:
input_reader: input reader.
shard_state: shard state.
tstate: transient shard state.
ctx: mapreduce context.
Returns:
Whether this shard has finished processing all its input split.
"""
processing_limit = self._processing_limit(tstate.mapreduce_spec)
if processing_limit == 0:
return
finished_shard = True
# Input reader may not be an iterator. It is only a container.
iterator = iter(input_reader)
while True:
try:
entity = iterator.next()
except StopIteration:
break
# Reading input got exception. If we assume
# 1. The input reader have done enough retries.
# 2. The input reader can still serialize correctly after this exception.
# 3. The input reader, upon resume, will try to re-read this failed
# record.
# 4. This exception doesn't imply the input reader is permanently stuck.
# we can serialize current slice immediately to avoid duplicated
# outputs.
# TODO(user): Validate these assumptions on all readers. MR should
# also have a way to detect fake forward progress.
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
elif isinstance(entity, ndb.Model):
shard_state.last_work_item = repr(entity.key)
else:
shard_state.last_work_item = repr(entity)[:100]
processing_limit -= 1
if not self._process_datum(
entity, input_reader, ctx, tstate):
finished_shard = False
break
elif processing_limit == 0:
finished_shard = False
break
# Flush context and its pools.
self.slice_context.incr(
context.COUNTER_MAPPER_WALLTIME_MS,
int((self._time() - self._start_time)*1000))
return finished_shard
def _process_datum(self, data, input_reader, ctx, transient_shard_state):
"""Process a single data piece.
Call mapper handler on the data.
Args:
data: a datum to process.
input_reader: input reader.
ctx: mapreduce context
transient_shard_state: transient shard state.
Returns:
True if scan should be continued, False if scan should be stopped.
"""
if data is not input_readers.ALLOW_CHECKPOINT:
self.slice_context.incr(context.COUNTER_MAPPER_CALLS)
handler = transient_shard_state.handler
if isinstance(handler, map_job.Mapper):
handler(self.slice_context, data)
else:
if input_reader.expand_parameters:
result = handler(*data)
else:
result = handler(data)
if util.is_generator(result):
for output in result:
if isinstance(output, operation.Operation):
output(ctx)
else:
output_writer = transient_shard_state.output_writer
if not output_writer:
logging.warning(
"Handler yielded %s, but no output writer is set.", output)
else:
output_writer.write(output)
if self._time() - self._start_time >= parameters.config._SLICE_DURATION_SEC:
return False
return True
def _set_state(self, shard_state, tstate, task_directive):
"""Set shard_state and tstate based on task_directive.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: self._TASK_DIRECTIVE for current shard.
Returns:
A _TASK_DIRECTIVE enum.
PROCEED_TASK if task should proceed normally.
RETRY_SHARD if shard should be retried.
RETRY_SLICE if slice should be retried.
FAIL_TASK if sahrd should fail.
RECOVER_SLICE if slice should be recovered.
ABORT_SHARD if shard should be aborted.
RETRY_TASK if task should be retried.
DROP_TASK if task should be dropped.
"""
if task_directive in (self._TASK_DIRECTIVE.RETRY_TASK,
self._TASK_DIRECTIVE.DROP_TASK):
return task_directive
if task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
shard_state.set_for_abort()
return task_directive
if task_directive == self._TASK_DIRECTIVE.PROCEED_TASK:
shard_state.advance_for_next_slice()
tstate.advance_for_next_slice()
return task_directive
if task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
tstate.advance_for_next_slice(recovery_slice=True)
shard_state.advance_for_next_slice(recovery_slice=True)
return task_directive
if task_directive == self._TASK_DIRECTIVE.RETRY_SLICE:
task_directive = self._attempt_slice_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
task_directive = self._attempt_shard_retry(shard_state, tstate)
if task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
shard_state.set_for_failure()
return task_directive
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
"""Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
"""
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
# Set HTTP code to 500.
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
# Prepare parameters for db transaction and taskqueue.
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
# For test only.
# TODO(user): Remove this.
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
"""The Transaction helper."""
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
# Add task in the same datastore transaction.
# This way we guarantee taskqueue is never behind datastore states.
# Old tasks will be dropped.
# Future task won't run until datastore states catches up.
if fresh_shard_state.active:
# Not adding task transactionally.
# transactional enqueue requires tasks with no name.
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e
def _attempt_slice_recovery(self, shard_state, tstate):
"""Recover a slice.
This is run when a slice had been previously attempted and output
may have been written. If an output writer requires slice recovery,
we run those logic to remove output duplicates. Otherwise we just retry
the slice.
If recovery is needed, then the entire slice will be dedicated
to recovery logic. No data processing will take place. Thus we call
the slice "recovery slice". This is needed for correctness:
An output writer instance can be out of sync from its physical
medium only when the slice dies after acquring the shard lock but before
committing shard state to db. The worst failure case is when
shard state failed to commit after the NAMED task for the next slice was
added. Thus, recovery slice has a special logic to increment current
slice_id n to n+2. If the task for n+1 had been added, it will be dropped
because it is behind shard state.
Args:
shard_state: an instance of Model.ShardState.
tstate: an instance of Model.TransientShardState.
Returns:
_TASK_DIRECTIVE.PROCEED_TASK to continue with this retry.
_TASK_DIRECTIVE.RECOVER_SLICE to recover this slice.
The next slice will start at the same input as
this slice but output to a new instance of output writer.
Combining outputs from all writer instances is up to implementation.
"""
mapper_spec = tstate.mapreduce_spec.mapper
if not (tstate.output_writer and
tstate.output_writer._supports_slice_recovery(mapper_spec)):
return self._TASK_DIRECTIVE.PROCEED_TASK
tstate.output_writer = tstate.output_writer._recover(
tstate.mapreduce_spec, shard_state.shard_number,
shard_state.retries + 1)
return self._TASK_DIRECTIVE.RECOVER_SLICE
def _attempt_shard_retry(self, shard_state, tstate):
"""Whether to retry shard.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.
FAIL_TASK otherwise.
"""
shard_attempts = shard_state.retries + 1
if shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS:
logging.warning(
"Shard attempt %s exceeded %s max attempts.",
shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)
return self._TASK_DIRECTIVE.FAIL_TASK
if tstate.output_writer and (
not tstate.output_writer._supports_shard_retry(tstate)):
logging.warning("Output writer %s does not support shard retry.",
tstate.output_writer.__class__.__name__)
return self._TASK_DIRECTIVE.FAIL_TASK
shard_state.reset_for_retry()
logging.warning("Shard %s attempt %s failed with up to %s attempts.",
shard_state.shard_id,
shard_state.retries,
parameters.config.SHARD_MAX_ATTEMPTS)
output_writer = None
if tstate.output_writer:
output_writer = tstate.output_writer.create(
tstate.mapreduce_spec, shard_state.shard_number, shard_attempts + 1)
tstate.reset_for_retry(output_writer)
return self._TASK_DIRECTIVE.RETRY_SHARD
def _attempt_slice_retry(self, shard_state, tstate):
"""Attempt to retry this slice.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SLICE if slice should be retried.
RETRY_SHARD if shard retry should be attempted.
"""
if (shard_state.slice_retries + 1 <
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS):
logging.warning(
"Slice %s %s failed for the %s of up to %s attempts "
"(%s of %s taskqueue execution attempts). "
"Will retry now.",
tstate.shard_id,
tstate.slice_id,
shard_state.slice_retries + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS,
self.task_retry_count() + 1,
parameters.config.TASK_MAX_ATTEMPTS)
# Clear info related to current exception. Otherwise, the real
# callstack that includes a frame for this method will show up
# in log.
sys.exc_clear()
self._try_free_lease(shard_state, slice_retry=True)
return self._TASK_DIRECTIVE.RETRY_SLICE
if parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS > 0:
logging.warning("Slice attempt %s exceeded %s max attempts.",
self.task_retry_count() + 1,
parameters.config.TASK_MAX_DATA_PROCESSING_ATTEMPTS)
return self._TASK_DIRECTIVE.RETRY_SHARD
@staticmethod
def get_task_name(shard_id, slice_id, retry=0):
"""Compute single worker task name.
Args:
shard_id: shard id.
slice_id: slice id.
retry: current shard retry count.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s-retry-%s" % (
shard_id, slice_id, retry)
def _get_countdown_for_next_slice(self, spec):
"""Get countdown for next slice's task.
When user sets processing rate, we set countdown to delay task execution.
Args:
spec: model.MapreduceSpec
Returns:
countdown in int.
"""
countdown = 0
if self._processing_limit(spec) != -1:
countdown = max(
int(parameters.config._SLICE_DURATION_SEC -
(self._time() - self._start_time)), 0)
return countdown
@classmethod
def _state_to_task(cls,
tstate,
shard_state,
eta=None,
countdown=None):
"""Generate task for slice according to current states.
Args:
tstate: An instance of TransientShardState.
shard_state: An instance of ShardState.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
A model.HugeTask instance for the slice specified by current states.
"""
base_path = tstate.base_path
task_name = MapperWorkerCallbackHandler.get_task_name(
tstate.shard_id,
tstate.slice_id,
tstate.retries)
headers = util._get_task_headers(tstate.mapreduce_spec.mapreduce_id)
headers[util._MR_SHARD_ID_TASK_HEADER] = tstate.shard_id
worker_task = model.HugeTask(
url=base_path + "/worker_callback/" + tstate.shard_id,
params=tstate.to_dict(),
name=task_name,
eta=eta,
countdown=countdown,
parent=shard_state,
headers=headers)
return worker_task
@classmethod
def _add_task(cls,
worker_task,
mapreduce_spec,
queue_name):
"""Schedule slice scanning by adding it to the task queue.
Args:
worker_task: a model.HugeTask task for slice. This is NOT a taskqueue
task.
mapreduce_spec: an instance of model.MapreduceSpec.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
"""
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_worker_task",
worker_task,
queue_name):
try:
# Not adding transactionally because worker_task has name.
# Named task is not allowed for transactional add.
worker_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
worker_task.name,
e.__class__,
e)
def _processing_limit(self, spec):
"""Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
"""
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit
# Deprecated. Only used by old test cases.
# TODO(user): clean up tests.
@classmethod
def _schedule_slice(cls,
shard_state,
tstate,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
shard_state: An instance of ShardState.
tstate: An instance of TransientShardState.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
"default")
task = cls._state_to_task(tstate, shard_state, eta, countdown)
cls._add_task(task, tstate.mapreduce_spec, queue_name)
class ControllerCallbackHandler(base_handler.HugeTaskHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
and only if mapreduce is still active. A mapreduce is active if it has
actively running shards.
"""
def __init__(self, *args):
"""Constructor."""
super(ControllerCallbackHandler, self).__init__(*args)
self._time = time.time
def _drop_gracefully(self):
"""Gracefully drop controller task.
This method is called when decoding controller task payload failed.
Upon this we mark ShardState and MapreduceState as failed so all
tasks can stop.
Writing to datastore is forced (ignore read-only mode) because we
want the tasks to stop badly, and if force_writes was False,
the job would have never been started.
"""
mr_id = self.request.headers[util._MR_ID_TASK_HEADER]
state = model.MapreduceState.get_by_job_id(mr_id)
if not state or not state.active:
return
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
config = util.create_datastore_write_config(state.mapreduce_spec)
puts = []
for ss in model.ShardState.find_all_by_mapreduce_state(state):
if ss.active:
ss.set_for_failure()
puts.append(ss)
# Avoid having too many shard states in memory.
if len(puts) > model.ShardState._MAX_STATES_IN_MEMORY:
db.put(puts, config=config)
puts = []
db.put(puts, config=config)
# Put mr_state only after all shard_states are put.
db.put(state, config=config)
def handle(self):
"""Handle request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.warning("State not found for MR '%s'; dropping controller task.",
spec.mapreduce_id)
return
if not state.active:
logging.warning(
"MR %r is not active. Looks like spurious controller task execution.",
spec.mapreduce_id)
self._clean_up_mr(spec)
return
shard_states = model.ShardState.find_all_by_mapreduce_state(state)
self._update_state_from_shard_states(state, shard_states, control)
if state.active:
ControllerCallbackHandler.reschedule(
state, spec, self.serial_id() + 1)
def _update_state_from_shard_states(self, state, shard_states, control):
"""Update mr state by examing shard states.
Args:
state: current mapreduce state as MapreduceState.
shard_states: an iterator over shard states.
control: model.MapreduceControl entity.
"""
# Initialize vars.
state.active_shards, state.aborted_shards, state.failed_shards = 0, 0, 0
total_shards = 0
processed_counts = []
state.counters_map.clear()
# Tally across shard states once.
for s in shard_states:
total_shards += 1
if s.active:
state.active_shards += 1
if s.result_status == model.ShardState.RESULT_ABORTED:
state.aborted_shards += 1
elif s.result_status == model.ShardState.RESULT_FAILED:
state.failed_shards += 1
# Update stats in mapreduce state by aggregating stats from shard states.
state.counters_map.add_map(s.counters_map)
processed_counts.append(s.counters_map.get(context.COUNTER_MAPPER_CALLS))
state.set_processed_counts(processed_counts)
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
spec = state.mapreduce_spec
if total_shards != spec.mapper.shard_count:
logging.error("Found %d shard states. Expect %d. "
"Issuing abort command to job '%s'",
total_shards, spec.mapper.shard_count,
spec.mapreduce_id)
# We issue abort command to allow shards to stop themselves.
model.MapreduceControl.abort(spec.mapreduce_id)
# If any shard is active then the mr is active.
# This way, controller won't prematurely stop before all the shards have.
state.active = bool(state.active_shards)
if not control and (state.failed_shards or state.aborted_shards):
# Issue abort command if there are failed shards.
model.MapreduceControl.abort(spec.mapreduce_id)
if not state.active:
# Set final result status derived from shard states.
if state.failed_shards or not total_shards:
state.result_status = model.MapreduceState.RESULT_FAILED
# It's important failed shards is checked before aborted shards
# because failed shards will trigger other shards to abort.
elif state.aborted_shards:
state.result_status = model.MapreduceState.RESULT_ABORTED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
self._finalize_outputs(spec, state)
self._finalize_job(spec, state)
else:
@db.transactional(retries=5)
def _put_state():
"""The helper for storing the state."""
fresh_state = model.MapreduceState.get_by_job_id(spec.mapreduce_id)
# We don't check anything other than active because we are only
# updating stats. It's OK if they are briefly inconsistent.
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping controller task.", spec.mapreduce_id)
return
config = util.create_datastore_write_config(spec)
state.put(config=config)
_put_state()
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@classmethod
def _finalize_outputs(cls, mapreduce_spec, mapreduce_state):
"""Finalize outputs.
Args:
mapreduce_spec: an instance of MapreduceSpec.
mapreduce_state: an instance of MapreduceState.
"""
# Only finalize the output writers if the job is successful.
if (mapreduce_spec.mapper.output_writer_class() and
mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS):
mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state)
@classmethod
def _finalize_job(cls, mapreduce_spec, mapreduce_state):
"""Finalize job execution.
Invokes done callback and save mapreduce state in a transaction,
and schedule necessary clean ups. This method is idempotent.
Args:
mapreduce_spec: an instance of MapreduceSpec
mapreduce_state: an instance of MapreduceState
"""
config = util.create_datastore_write_config(mapreduce_spec)
queue_name = util.get_queue_name(mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE))
done_callback = mapreduce_spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
done_task = None
if done_callback:
done_task = taskqueue.Task(
url=done_callback,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id,
util.CALLBACK_MR_ID_TASK_HEADER),
method=mapreduce_spec.params.get("done_callback_method", "POST"))
@db.transactional(retries=5)
def _put_state():
"""Helper to store state."""
fresh_state = model.MapreduceState.get_by_job_id(
mapreduce_spec.mapreduce_id)
if not fresh_state.active:
logging.warning(
"Job %s is not active. Looks like spurious task execution. "
"Dropping task.", mapreduce_spec.mapreduce_id)
return
mapreduce_state.put(config=config)
# Enqueue done_callback if needed.
if done_task and not _run_task_hook(
mapreduce_spec.get_hooks(),
"enqueue_done_task",
done_task,
queue_name):
done_task.add(queue_name, transactional=True)
_put_state()
logging.info("Final result for job '%s' is '%s'",
mapreduce_spec.mapreduce_id, mapreduce_state.result_status)
cls._clean_up_mr(mapreduce_spec)
@classmethod
def _clean_up_mr(cls, mapreduce_spec):
FinalizeJobHandler.schedule(mapreduce_spec)
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls,
mapreduce_state,
mapreduce_spec,
serial_id,
queue_name=None):
"""Schedule new update status callback task.
Args:
mapreduce_state: mapreduce state as model.MapreduceState
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
controller_callback_task = model.HugeTask(
url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
mapreduce_spec.mapreduce_id),
name=task_name, params=task_params,
countdown=parameters.config._CONTROLLER_PERIOD_SEC,
parent=mapreduce_state,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
controller_callback_task,
queue_name):
try:
controller_callback_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.TaskQueueHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
This handler is idempotent.
Precondition:
The Model.MapreduceState entity for this mr is already created and
saved to datastore by StartJobHandler._start_map.
Request Parameters:
mapreduce_id: in string.
"""
# Datastore key used to save json serialized input readers.
_SERIALIZED_INPUT_READERS_KEY = "input_readers_for_mr_%s"
def handle(self):
"""Handles kick off request."""
# Get and verify mr state.
mr_id = self.request.get("mapreduce_id")
# Log the mr_id since this is started in an unnamed task
logging.info("Processing kickoff for job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Create input readers.
readers, serialized_readers_entity = self._get_input_readers(state)
if readers is None:
# We don't have any data. Finish map.
logging.warning("Found no mapper input data to process.")
state.active = False
state.result_status = model.MapreduceState.RESULT_SUCCESS
ControllerCallbackHandler._finalize_job(
state.mapreduce_spec, state)
return False
# Create output writers.
self._setup_output_writer(state)
# Save states and make sure we use the saved input readers for
# subsequent operations.
result = self._save_states(state, serialized_readers_entity)
if result is None:
readers, _ = self._get_input_readers(state)
elif not result:
return
queue_name = self.request.headers.get("X-AppEngine-QueueName")
KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers,
queue_name,
state.mapreduce_spec.params["base_path"],
state)
ControllerCallbackHandler.reschedule(
state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
def _drop_gracefully(self):
"""See parent."""
mr_id = self.request.get("mapreduce_id")
logging.error("Failed to kick off job %s", mr_id)
state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(state, mr_id):
return
# Issue abort command just in case there are running tasks.
config = util.create_datastore_write_config(state.mapreduce_spec)
model.MapreduceControl.abort(mr_id, config=config)
# Finalize job and invoke callback.
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
ControllerCallbackHandler._finalize_job(state.mapreduce_spec, state)
def _get_input_readers(self, state):
"""Get input readers.
Args:
state: a MapreduceState model.
Returns:
A tuple: (a list of input readers, a model._HugeTaskPayload entity).
The payload entity contains the json serialized input readers.
(None, None) when input reader inplitting returned no data to process.
"""
serialized_input_readers_key = (self._SERIALIZED_INPUT_READERS_KEY %
state.key().id_or_name())
serialized_input_readers = model._HugeTaskPayload.get_by_key_name(
serialized_input_readers_key, parent=state)
# Initialize input readers.
input_reader_class = state.mapreduce_spec.mapper.input_reader_class()
split_param = state.mapreduce_spec.mapper
if issubclass(input_reader_class, map_job.InputReader):
split_param = map_job.JobConfig._to_map_job_config(
state.mapreduce_spec,
os.environ.get("HTTP_X_APPENGINE_QUEUENAME"))
if serialized_input_readers is None:
readers = input_reader_class.split_input(split_param)
else:
readers = [input_reader_class.from_json_str(json) for json in
simplejson.loads(serialized_input_readers.payload)]
if not readers:
return None, None
# Update state and spec with actual shard count.
state.mapreduce_spec.mapper.shard_count = len(readers)
state.active_shards = len(readers)
# Prepare to save serialized input readers.
if serialized_input_readers is None:
# Use mr_state as parent so it can be easily cleaned up later.
serialized_input_readers = model._HugeTaskPayload(
key_name=serialized_input_readers_key, parent=state)
readers_json_str = [i.to_json_str() for i in readers]
serialized_input_readers.payload = simplejson.dumps(readers_json_str)
return readers, serialized_input_readers
def _setup_output_writer(self, state):
if not state.writer_state:
output_writer_class = state.mapreduce_spec.mapper.output_writer_class()
if output_writer_class:
output_writer_class.init_job(state)
@db.transactional
def _save_states(self, state, serialized_readers_entity):
"""Run transaction to save state.
Args:
state: a model.MapreduceState entity.
serialized_readers_entity: a model._HugeTaskPayload entity containing
json serialized input readers.
Returns:
False if a fatal error is encountered and this task should be dropped
immediately. True if transaction is successful. None if a previous
attempt of this same transaction has already succeeded.
"""
mr_id = state.key().id_or_name()
fresh_state = model.MapreduceState.get_by_job_id(mr_id)
if not self._check_mr_state(fresh_state, mr_id):
return False
if fresh_state.active_shards != 0:
logging.warning(
"Mapreduce %s already has active shards. Looks like spurious task "
"execution.", mr_id)
return None
config = util.create_datastore_write_config(state.mapreduce_spec)
db.put([state, serialized_readers_entity], config=config)
return True
@classmethod
def _schedule_shards(cls,
spec,
readers,
queue_name,
base_path,
mr_state):
"""Prepares shard states and schedules their execution.
Even though this method does not schedule shard task and save shard state
transactionally, it's safe for taskqueue to retry this logic because
the initial shard_state for each shard is the same from any retry.
This is an important yet reasonable assumption on model.ShardState.
Args:
spec: mapreduce specification as MapreduceSpec.
readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
mr_state: The MapReduceState of current job.
"""
# Create shard states.
shard_states = []
for shard_number, input_reader in enumerate(readers):
shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard_state.shard_description = str(input_reader)
shard_states.append(shard_state)
# Retrieves already existing shard states.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Save non existent shard states.
# Note: we could do this transactionally if necessary.
db.put((shard for shard in shard_states
if shard.key() not in existing_shard_keys),
config=util.create_datastore_write_config(spec))
# Create output writers.
writer_class = spec.mapper.output_writer_class()
writers = [None] * len(readers)
if writer_class:
for shard_number, shard_state in enumerate(shard_states):
writers[shard_number] = writer_class.create(
mr_state.mapreduce_spec,
shard_state.shard_number, shard_state.retries + 1,
mr_state.writer_state)
# Schedule ALL shard tasks.
# Since each task is named, _add_task will fall back gracefully if a
# task already exists.
for shard_number, (input_reader, output_writer) in enumerate(
zip(readers, writers)):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
task = MapperWorkerCallbackHandler._state_to_task(
model.TransientShardState(
base_path, spec, shard_id, 0, input_reader, input_reader,
output_writer=output_writer,
handler=spec.mapper.handler),
shard_states[shard_number])
MapperWorkerCallbackHandler._add_task(task,
spec,
queue_name)
@classmethod
def _check_mr_state(cls, state, mr_id):
"""Check MapreduceState.
Args:
state: an MapreduceState instance.
mr_id: mapreduce id.
Returns:
True if state is valid. False if not and this task should be dropped.
"""
if state is None:
logging.warning(
"Mapreduce State for job %s is missing. Dropping Task.",
mr_id)
return False
if not state.active:
logging.warning(
"Mapreduce %s is not active. Looks like spurious task "
"execution. Dropping Task.", mr_id)
return False
return True
class StartJobHandler(base_handler.PostJsonHandler):
"""Command handler starts a mapreduce job.
This handler allows user to start a mr via a web form. It's _start_map
method can also be used independently to start a mapreduce.
"""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_output_writer_spec = self.request.get("mapper_output_writer")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Default values.
mr_params = map_job.JobConfig._get_default_mr_params()
mr_params.update(params)
if "queue_name" in mapper_params:
mr_params["queue_name"] = mapper_params["queue_name"]
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC)
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)),
output_writer_spec=mapper_output_writer_spec)
mapreduce_id = self._start_map(
mapreduce_name,
mapper_spec,
mr_params,
queue_name=mr_params["queue_name"],
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
Returns:
The user parameters.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
errors.NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise errors.NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls,
name,
mapper_spec,
mapreduce_params,
queue_name,
eta=None,
countdown=None,
hooks_class_name=None,
_app=None,
in_xg_transaction=False):
# pylint: disable=g-doc-args
# pylint: disable=g-doc-return-or-yield
"""See control.start_map.
Requirements for this method:
1. The request that invokes this method can either be regular or
from taskqueue. So taskqueue specific headers can not be used.
2. Each invocation transactionally starts an isolated mapreduce job with
a unique id. MapreduceState should be immediately available after
returning. See control.start_map's doc on transactional.
3. Method should be lightweight.
"""
# Validate input reader.
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_reader_class.validate(mapper_spec)
# Validate output writer.
mapper_output_writer_class = mapper_spec.output_writer_class()
if mapper_output_writer_class:
mapper_output_writer_class.validate(mapper_spec)
# Create a new id and mr spec.
mapreduce_id = model.MapreduceState.new_mapreduce_id()
mapreduce_spec = model.MapreduceSpec(
name,
mapreduce_id,
mapper_spec.to_json(),
mapreduce_params,
hooks_class_name)
# Validate mapper handler.
ctx = context.Context(mapreduce_spec, None)
context.Context._set(ctx)
try:
# pylint: disable=pointless-statement
mapper_spec.handler
finally:
context.Context._set(None)
# Save states and enqueue task.
if in_xg_transaction:
propagation = db.MANDATORY
else:
propagation = db.INDEPENDENT
@db.transactional(propagation=propagation)
def _txn():
cls._create_and_save_state(mapreduce_spec, _app)
cls._add_kickoff_task(mapreduce_params["base_path"], mapreduce_spec, eta,
countdown, queue_name)
_txn()
return mapreduce_id
@classmethod
def _create_and_save_state(cls, mapreduce_spec, _app):
"""Save mapreduce state to datastore.
Save state to datastore so that UI can see it immediately.
Args:
mapreduce_spec: model.MapreduceSpec,
_app: app id if specified. None otherwise.
Returns:
The saved Mapreduce state.
"""
state = model.MapreduceState.create_new(mapreduce_spec.mapreduce_id)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = 0
if _app:
state.app_id = _app
config = util.create_datastore_write_config(mapreduce_spec)
state.put(config=config)
return state
@classmethod
def _add_kickoff_task(cls,
base_path,
mapreduce_spec,
eta,
countdown,
queue_name):
"""Enqueues a new kickoff task."""
params = {"mapreduce_id": mapreduce_spec.mapreduce_id}
# Task is not named so that it can be added within a transaction.
kickoff_task = taskqueue.Task(
url=base_path + "/kickoffjob_callback/" + mapreduce_spec.mapreduce_id,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id),
params=params,
eta=eta,
countdown=countdown)
hooks = mapreduce_spec.get_hooks()
if hooks is not None:
try:
hooks.enqueue_kickoff_task(kickoff_task, queue_name)
return
except NotImplementedError:
pass
kickoff_task.add(queue_name, transactional=True)
class FinalizeJobHandler(base_handler.TaskQueueHandler):
"""Finalize map job by deleting all temporary entities."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state:
config = (
util.create_datastore_write_config(mapreduce_state.mapreduce_spec))
keys = [model.MapreduceControl.get_key_by_job_id(mapreduce_id)]
for ss in model.ShardState.find_all_by_mapreduce_state(mapreduce_state):
keys.extend(list(
model._HugeTaskPayload.all().ancestor(ss).run(keys_only=True)))
keys.extend(list(model._HugeTaskPayload.all().ancestor(
mapreduce_state).run(keys_only=True)))
db.delete(keys, config=config)
@classmethod
def schedule(cls, mapreduce_spec):
"""Schedule finalize task.
Args:
mapreduce_spec: mapreduce specification as MapreduceSpec.
"""
task_name = mapreduce_spec.mapreduce_id + "-finalize"
finalize_task = taskqueue.Task(
name=task_name,
url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
mapreduce_spec.mapreduce_id),
params={"mapreduce_id": mapreduce_spec.mapreduce_id},
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
queue_name = util.get_queue_name(None)
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
finalize_task,
queue_name):
try:
finalize_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
task_name, e.__class__, e)
class CleanUpJobHandler(base_handler.PostJsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)
if mapreduce_state:
shard_keys = model.ShardState.calculate_keys_by_mapreduce_state(
mapreduce_state)
db.delete(shard_keys)
db.delete(mapreduce_state)
self.json_response["status"] = ("Job %s successfully cleaned up." %
mapreduce_id)
class AbortJobHandler(base_handler.PostJsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
| apache-2.0 | 7,339,639,703,896,222,000 | 5,160,125,190,490,043,000 | 36.238328 | 80 | 0.655563 | false |
josephcslater/scipy | scipy/ndimage/tests/test_regression.py | 123 | 1429 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal, run_module_suite
import scipy.ndimage as ndimage
def test_byte_order_median():
"""Regression test for #413: median_filter does not handle bytes orders."""
a = np.arange(9, dtype='<f4').reshape(3, 3)
ref = ndimage.filters.median_filter(a,(3, 3))
b = np.arange(9, dtype='>f4').reshape(3, 3)
t = ndimage.filters.median_filter(b, (3, 3))
assert_array_almost_equal(ref, t)
def test_zoom_output_shape():
"""Ticket #643"""
x = np.arange(12).reshape((3,4))
ndimage.zoom(x, 2, output=np.zeros((6,8)))
def test_ticket_742():
def SE(img, thresh=.7, size=4):
mask = img > thresh
rank = len(mask.shape)
la, co = ndimage.label(mask,
ndimage.generate_binary_structure(rank, rank))
slices = ndimage.find_objects(la)
if np.dtype(np.intp) != np.dtype('i'):
shape = (3,1240,1240)
a = np.random.rand(np.product(shape)).reshape(shape)
# shouldn't crash
SE(a)
def test_gh_issue_3025():
"""Github issue #3025 - improper merging of labels"""
d = np.zeros((60,320))
d[:,:257] = 1
d[:,260:] = 1
d[36,257] = 1
d[35,258] = 1
d[35,259] = 1
assert ndimage.label(d, np.ones((3,3)))[1] == 1
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 6,644,027,788,288,142,000 | 1,146,211,468,484,125,200 | 27.58 | 79 | 0.589223 | false |
Shengliang/mbed | workspace_tools/export/uvision5.py | 3 | 4120 | """
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import basename, join, dirname
from project_generator_definitions.definitions import ProGenDef
from workspace_tools.export.exporters import Exporter
from workspace_tools.targets import TARGET_MAP, TARGET_NAMES
# If you wish to add a new target, add it to project_generator_definitions, and then
# define progen_target name in the target class (`` self.progen_target = 'my_target_name' ``)
# There are 2 default mbed templates (predefined settings) uvision.uvproj and uvproj_microlib.uvproj.tmpl
class Uvision5(Exporter):
"""
Exporter class for uvision5. This class uses project generator.
"""
# These 2 are currently for exporters backward compatiblity
NAME = 'uVision5'
TOOLCHAIN = 'ARM'
# PROGEN_ACTIVE contains information for exporter scripts that this is using progen
PROGEN_ACTIVE = True
# backward compatibility with our scripts
TARGETS = []
for target in TARGET_NAMES:
try:
if (ProGenDef('uvision5').is_supported(str(TARGET_MAP[target])) or
ProGenDef('uvision5').is_supported(TARGET_MAP[target].progen['target'])):
TARGETS.append(target)
except AttributeError:
# target is not supported yet
continue
def get_toolchain(self):
return TARGET_MAP[self.target].default_toolchain
def generate(self):
""" Generates the project files """
project_data = self.progen_get_project_data()
tool_specific = {}
# Expand tool specific settings by uvision specific settings which are required
try:
if TARGET_MAP[self.target].progen['uvision5']['template']:
tool_specific['uvision5'] = TARGET_MAP[self.target].progen['uvision5']
except KeyError:
# use default template
# by the mbed projects
tool_specific['uvision5'] = {
'template': [join(dirname(__file__), 'uvision.uvproj.tmpl')],
}
project_data['tool_specific'] = {}
project_data['tool_specific'].update(tool_specific)
# get flags from toolchain and apply
project_data['tool_specific']['uvision5']['misc'] = {}
project_data['tool_specific']['uvision5']['misc']['asm_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['asm']))
project_data['tool_specific']['uvision5']['misc']['c_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['c']))
# not compatible with c99 flag set in the template
project_data['tool_specific']['uvision5']['misc']['c_flags'].remove("--c99")
project_data['tool_specific']['uvision5']['misc']['cxx_flags'] = list(set(self.toolchain.flags['common'] + self.toolchain.flags['ld']))
project_data['tool_specific']['uvision5']['misc']['ld_flags'] = self.toolchain.flags['ld']
i = 0
for macro in project_data['common']['macros']:
# armasm does not like floating numbers in macros, timestamp to int
if macro.startswith('MBED_BUILD_TIMESTAMP'):
timestamp = macro[len('MBED_BUILD_TIMESTAMP='):]
project_data['common']['macros'][i] = 'MBED_BUILD_TIMESTAMP=' + str(int(float(timestamp)))
# armasm does not even accept MACRO=string
if macro.startswith('MBED_USERNAME'):
project_data['common']['macros'].pop(i)
i += 1
project_data['common']['macros'].append('__ASSERT_MSG')
self.progen_gen_file('uvision5', project_data)
| apache-2.0 | -5,387,702,522,343,281,000 | 6,598,039,044,870,390,000 | 45.292135 | 144 | 0.655097 | false |
provaleks/o8 | addons/base_report_designer/wizard/__init__.py | 421 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_report_designer_modify
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 618,022,326,345,688,800 | -5,397,866,121,304,557,000 | 44.041667 | 78 | 0.618871 | false |
VladimirFilonov/django-yandex-kassa | setup.py | 1 | 1336 | # -*- coding: utf-8 -*-
import os
from distutils.core import setup
from setuptools import find_packages
__author__ = 'Alexei Kuzmin'
__version__ = "0.9.13"
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-yandex-kassa',
version=__version__,
packages=find_packages(),
include_package_data=True,
url='https://github.com/DrMartiner/django-yandex-kassa',
license='MIT',
author=__author__,
author_email='DrMartiner@GMail.Com',
keywords=['django', 'yandex', 'money', 'kassa', 'payment',
'pay', 'payment', 'ecommerce', 'shop', 'cart'],
description='Integrating django project with yandex-kassa',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'Django>1.6',
'ecdsa==0.13',
'pycrypto==2.6.1',
'django-annoying',
'lxml',
'six'
],
)
| mit | -3,382,197,379,814,493,700 | 816,915,292,895,706,800 | 29.363636 | 78 | 0.586078 | false |
rchrd2/markdown-keyval | key_val.py | 1 | 2188 | """
KeyValue Extension for Python-Markdown
=============================================
A simple example:
Apple: Pomaceous fruit of plants of the genus Malus in.
Orange: The fruit of an evergreen tree of the genus Citrus.
@todo get this to print out in a table
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.blockprocessors import BlockProcessor, ListIndentProcessor
from markdown.util import etree
import re
class KeyValProcessor(BlockProcessor):
""" Process Key Value Pairs. """
# Regex for key value
RE = re.compile(r'^(?P<key>[^\s]+):\s+(?P<value>.+)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
#if before:
# # As the header was not the first line of the block and the
# # lines before the header must be parsed first,
# # recursively parse this lines as a block.
# self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = etree.SubElement(parent, 'div')
#template = "<dl><dt>%s</dt><dd>%s</dd></dl>"
template = "<b>%s</b>: %s"
#template = "<tr><td>%s</td><td>%s</td></tr>"
h.text = template % (m.group('key').strip(), m.group('value').strip())
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class KeyValExtension(Extension):
""" Add keyvalues to Markdown. """
def extendMarkdown(self, md, md_globals):
md.parser.blockprocessors.add('keyval', KeyValProcessor(md.parser), '>ulist')
def makeExtension(configs={}):
return KeyValExtension(configs=configs)
| mit | -2,444,165,289,048,585,000 | -8,140,360,940,107,445,000 | 33.1875 | 85 | 0.590494 | false |
kvesteri/flask-storage | flask_storage/amazon.py | 1 | 9418 | from functools import wraps
import mimetypes
from boto.s3.connection import S3Connection, SubdomainCallingFormat
from boto.exception import S3ResponseError, S3CreateError
from boto.s3.key import Key
from flask import current_app
from .base import (
FileNotFoundError,
Storage,
StorageException,
StorageFile,
reraise
)
class S3BotoStorage(Storage):
def __init__(
self,
folder_name=None,
access_key=None,
secret_key=None,
bucket_acl=None,
acl=None,
headers=None,
gzip=None,
gzip_content_types=None,
querystring_auth=None,
querystring_expire=None,
reduced_redundancy=None,
custom_domain=None,
secure_urls=None,
location=None,
file_name_charset=None,
preload_metadata=None,
calling_format=None,
file_overwrite=None,
auto_create_bucket=None):
self.access_key = access_key or \
current_app.config.get('AWS_ACCESS_KEY_ID', None)
self.secret_key = secret_key or \
current_app.config.get('AWS_SECRET_ACCESS_KEY', None)
self.calling_format = calling_format or \
current_app.config.get(
'AWS_S3_CALLING_FORMAT',
SubdomainCallingFormat()
)
self.auto_create_bucket = auto_create_bucket or \
current_app.config.get('AWS_AUTO_CREATE_BUCKET', False)
self.bucket_name = folder_name or \
current_app.config.get('AWS_STORAGE_BUCKET_NAME', None)
self.acl = acl or \
current_app.config.get('AWS_DEFAULT_ACL', 'public-read')
self.bucket_acl = bucket_acl or \
current_app.config.get('AWS_BUCKET_ACL', self.acl)
self.file_overwrite = file_overwrite or \
current_app.config.get('AWS_S3_FILE_OVERWRITE', False)
self.headers = headers or \
current_app.config.get('AWS_HEADERS', {})
self.preload_metadata = preload_metadata or \
current_app.config.get('AWS_PRELOAD_METADATA', False)
self.gzip = gzip or \
current_app.config.get('AWS_IS_GZIPPED', False)
self.gzip_content_types = gzip_content_types or \
current_app.config.get(
'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript',
)
)
self.querystring_auth = querystring_auth or \
current_app.config.get('AWS_QUERYSTRING_AUTH', True)
self.querystring_expire = querystring_expire or \
current_app.config.get('AWS_QUERYSTRING_EXPIRE', 3600)
self.reduced_redundancy = reduced_redundancy or \
current_app.config.get('AWS_REDUCED_REDUNDANCY', False)
self.custom_domain = custom_domain or \
current_app.config.get('AWS_S3_CUSTOM_DOMAIN', None)
self.secure_urls = secure_urls or \
current_app.config.get('AWS_S3_SECURE_URLS', True)
self.location = location or current_app.config.get('AWS_LOCATION', '')
self.location = self.location.lstrip('/')
self.file_name_charset = file_name_charset or \
current_app.config.get('AWS_S3_FILE_NAME_CHARSET', 'utf-8')
self._connection = None
self._entries = {}
@property
def connection(self):
if self._connection is None:
self._connection = S3Connection(
self.access_key, self.secret_key,
calling_format=self.calling_format
)
return self._connection
@property
def folder_name(self):
return self.bucket_name
@property
def bucket(self):
"""
Get the current bucket. If there is no current bucket object
create it.
"""
if not hasattr(self, '_bucket'):
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
def list_folders(self):
return [bucket.name for bucket in self.connection.get_all_buckets()]
@property
def folder(self):
return self.bucket
def list_files(self):
return [
self.file_class(self, key.name) for key in self.bucket.list()
]
def create_folder(self, name=None):
if not name:
name = self.folder_name
try:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
except S3CreateError, e:
reraise(e)
return bucket
def _get_or_create_bucket(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_bucket(
name,
validate=self.auto_create_bucket
)
except S3ResponseError:
if self.auto_create_bucket:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise RuntimeError(
"Bucket specified by "
"S3_BUCKET_NAME does not exist. "
"Buckets can be automatically created by setting "
"AWS_AUTO_CREATE_BUCKET=True")
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers.copy()
name = cleaned_name
content_type = mimetypes.guess_type(name)[0] or Key.DefaultContentType
encoded_name = self._encode_name(name)
key = self.bucket.new_key(encoded_name)
if self.preload_metadata:
self._entries[encoded_name] = key
key.set_metadata('Content-Type', content_type)
if isinstance(content, basestring):
key.set_contents_from_string(
content,
headers=headers,
policy=self.acl,
reduced_redundancy=self.reduced_redundancy
)
else:
content.name = cleaned_name
key.set_contents_from_file(
content,
headers=headers,
policy=self.acl,
reduced_redundancy=self.reduced_redundancy
)
return self.open(encoded_name)
def _open(self, name, mode='r'):
return self.file_class(self, name=name, mode=mode)
def delete_folder(self, name=None):
if name is None:
name = self.folder_name
self.bucket.delete()
def delete(self, name):
name = self._encode_name(self._normalize_name(self._clean_name(name)))
if self.bucket.lookup(name) is None:
raise FileNotFoundError(name, 404)
self.bucket.delete_key(name)
def exists(self, name):
name = self._normalize_name(self._clean_name(name))
return bool(self.bucket.lookup(self._encode_name(name)))
def url(self, name):
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s://%s/%s" % ('https' if self.secure_urls else 'http',
self.custom_domain, name)
return self.connection.generate_url(
self.querystring_expire,
method='GET',
bucket=self.bucket.name,
key=self._encode_name(name),
query_auth=self.querystring_auth,
force_http=not self.secure_urls
)
@property
def file_class(self):
return S3BotoStorageFile
def require_opening(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._is_open:
self._key.open(self._mode)
self._is_open = True
return func(self, *args, **kwargs)
return wrapper
class S3BotoStorageFile(StorageFile):
def __init__(self, storage, name=None, prefix='', mode='r'):
if mode == 'rb':
mode = 'r' # rb is not supported
self._storage = storage
self.prefix = prefix
self._key = Key(storage.bucket)
self._mode = mode
if name is not None:
self.name = name
self._pos = 0
self._is_open = False
@property
def content_type(self):
return getattr(
self._key.content,
'content_type',
mimetypes.guess_type(self.name)[0] or Key.DefaultContentType
)
@property
def file(self):
return self._key
@property
@require_opening
def size(self):
return self._key.size
@property
@require_opening
def last_modified(self):
return self._key.last_modified
@property
def url(self):
return self._storage.url(self._key.name)
@StorageFile.name.setter
def name(self, value):
if self._name:
raise StorageException(
"You can't rename files this way. Use rename method instead."
)
self._name = self.prefix + self._storage._clean_name(value)
self._key.name = self._name
@require_opening
def read(self, size=0):
return self.file.read(size)
def seek(self, *args, **kw):
raise NotImplementedError
def write(self, *args, **kw):
raise NotImplementedError
| mit | 4,209,925,262,069,832,000 | -8,850,223,084,250,028,000 | 31.143345 | 78 | 0.56976 | false |
kmonsoor/pyglet | contrib/wydget/wydget/widgets/menu.py | 29 | 2966 | import sys
import xml.sax.saxutils
from pyglet.gl import *
from pyglet.window import mouse, key
from wydget import event, layouts, loadxml
from wydget.widgets.frame import Frame
from wydget.widgets.label import Label
class MenuItem(Label):
name = 'menu-item'
@event.default('menu-item')
def on_element_enter(item, x, y):
item._save_bgcolor = item.bgcolor
item.bgcolor = (.9, .9, 1, 1)
return event.EVENT_HANDLED
@event.default('menu-item')
def on_element_leave(item, x, y):
item.bgcolor = item._save_bgcolor
return event.EVENT_HANDLED
@event.default('menu-item')
def on_click(widget, *args):
menu = widget.parent
menu.hide()
return event.EVENT_HANDLED
class PopupMenu(Frame):
'''A menu that should appear under the mouse when activated.
The static method `isActivatingClick(buttons, modifiers)` may be used
to determine whether the menu should be shown.
'''
name = 'popup-menu'
is_focusable = True
def __init__(self, parent, items, **kw):
super(PopupMenu, self).__init__(parent, border="black",
is_visible=False, **kw)
for n, (label, id) in enumerate(items):
MenuItem(self, text=label, id=id, width='100%',
bgcolor=((.95, .95, .95, 1), (1, 1, 1, 1))[n%2])
self.layout = layouts.Vertical(self)
def expose(self, mouse):
w = self.getGUI().window
w, h = w.width, w.height
self.center = map(int, mouse)
if self.x < 0: self.x = 0
if self.y < 0: self.y = 0
if self.x + self.width > w: self.x = w - self.width
if self.y + self.height > h: self.y = h - self.height
self.setVisible(True)
self.gainFocus()
def hide(self):
self.setVisible(False)
@classmethod
def fromXML(cls, element, parent):
'''Create the object from the XML element and attach it to the parent.
If scrollable then put all children loaded into a container frame.
'''
kw = loadxml.parseAttributes(element)
items = []
for child in element.getchildren():
text = xml.sax.saxutils.unescape(child.text)
items.append((text, child.attrib['id']))
return cls(parent, items, **kw)
@staticmethod
def isActivatingClick(button, modifiers):
'''Determine whether the mouse button / modifiers combine to be a
popup menu activation click or not.
On all platforms a RMB click is allowed.
On OS X a control-LMB is allowed.
'''
if sys.platform == 'darwin':
if button & mouse.LEFT and modifiers & key.MOD_CTRL:
return True
return button & mouse.RIGHT
@event.default('popup-menu', 'on_gain_focus')
def on_menu_gain_focus(menu, method):
# catch focus
return event.EVENT_HANDLED
@event.default('popup-menu', 'on_lose_focus')
def on_menu_lose_focus(menu, method):
menu.hide()
return event.EVENT_HANDLED
| bsd-3-clause | -8,278,768,784,858,725,000 | -2,979,720,305,594,934,000 | 28.959596 | 78 | 0.623736 | false |
tomsilver/nupic | tests/unit/nupic/support/custom_configuration_test.py | 4 | 42107 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from copy import copy
import os
import shutil
from StringIO import StringIO
import sys
import tempfile
import unittest2 as unittest
import uuid
from mock import Mock, patch
from pkg_resources import resource_filename
from xml.parsers.expat import ExpatError
# ParseError not present in xml module for python2.6
try:
from xml.etree.ElementTree import ParseError
except ImportError:
from xml.parsers.expat import ExpatError as ParseError
import nupic
import nupic.support.configuration_custom as configuration
import configuration_test
class ConfigurationCustomTest(unittest.TestCase):
def setUp(self):
if "NTA_DYNAMIC_CONF_DIR" in os.environ:
# Remove it to make sure our in-proc tests won't accidentally
# mess with actual files
oldNtaDynamicConfDir = os.environ["NTA_DYNAMIC_CONF_DIR"]
del os.environ["NTA_DYNAMIC_CONF_DIR"]
self.addCleanup(os.environ.update,
dict(NTA_DYNAMIC_CONF_DIR=oldNtaDynamicConfDir))
self.files = dict()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-default.xml')) as inp:
fp.write(inp.read())
self.files['nupic-default.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-site.xml-unittest'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-site.xml')) as inp:
fp.write(inp.read())
self.files['nupic-site.xml'] = fp.name
with open(os.path.join(tmpDir, 'nupic-custom.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/nupic-custom.xml')) as inp:
fp.write(inp.read())
self.files['nupic-custom.xml'] = fp.name
self.customParam = 'nupic.custom.hello'
self.customValue = 'world'
configuration.Configuration.clear()
####################################################################
# Custom Configuration Tests
# Todo: Share tests between two configuration_test files
####################################################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomFileCreated(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.setCustomProperty('param', 'val')
self.assertTrue(os.path.exists(self.files['nupic-custom.xml']))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+self.customParam+'</name>',
' <value>'+self.customValue+'</value>',
' </property>',
'</configuration>')))
self.assertEqual(
configuration.Configuration.get(self.customParam),
self.customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperty(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.setCustomProperty('PersistProp', 'PersistVal')
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
configuration.Configuration.clear()
self.assertEqual(
configuration.Configuration.get('PersistProp'),'PersistVal')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetCustomProperties(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>' + self.customParam + '</name>',
' <value>' + self.customValue + '</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
originalProps = copy(configuration.Configuration.dict())
configuration.Configuration.setCustomProperties(
{'PersistProp' : 'PersistVal', 'apple' : 'pear'})
expectedProps = {'PersistProp' : 'PersistVal', 'apple' : 'pear'}
expectedProps.update(originalProps)
self.assertEqual(configuration.Configuration.dict(), expectedProps)
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.dict(), expectedProps)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictWithTemp(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>param</name>',
' <value>value</value>',
' </property>',
' <property>',
' <name>param2</name>',
' <value>value2</value>',
' </property>',
'</configuration>')))
customDict = configuration.Configuration.dict()
self.assertTrue('param' in customDict)
self.assertTrue('param2' in customDict)
self.assertEqual(customDict['param'], 'value')
self.assertEqual(customDict['param2'], 'value2')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigOverrides(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
paramNames = configuration.Configuration.dict().keys()
customValue = 'NewValue'
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>'+paramNames[0]+'</name>',
' <value>'+customValue+'</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get(paramNames[0]), \
customValue)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testCustomConfigDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
configuration.Configuration.clear()
self.assertEqual(configuration.Configuration.get('CustomParam'), \
'CustomValue')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClearInvalidFile(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
configuration.Configuration.clear()
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetInvalidFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("Expected top-level element to be 'configuration'",
cm.exception.args[0])
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join(('')))
with patch('sys.stderr', new_callable=StringIO):
with self.assertRaises(RuntimeError) as cm:
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertIn("File contents of custom configuration is corrupt.",
cm.exception.args[0])
# NTA_CONF_PATH is not being mocked out in this test, so we have to mock out
# findConfigFile to return the right path to the config file.
findConfigFile.return_value = self.files['nupic-custom.xml']
configuration.Configuration.resetCustomConfig()
configuration.Configuration.setCustomProperty('foo', 'value')
self.assertEqual(configuration.Configuration.getCustomDict(), {'foo': 'value'})
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDict(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-custom.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>CustomParam</name>',
' <value>CustomValue</value>',
' </property>',
'</configuration>')))
self.assertEqual(configuration.Configuration.getCustomDict(),
dict(CustomParam='CustomValue'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetCustomDictNoFile(self, findConfigFile, environ):
environ.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
del self.files['nupic-custom.xml']
###############################################
# Replicated Tests From configuration_test.py
###############################################
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetStringMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getString(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetString(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.getString('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getBool(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBoolOutOfRangeRaisesValueError(self, findConfigFileMock,
environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool2', '2')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('foobool2')
configuration.Configuration.set('fooboolneg1', '-1')
with self.assertRaises(ValueError):
configuration.Configuration.getBool('fooboolneg1')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetBool(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foobool0', '0')
result = configuration.Configuration.getBool('foobool0')
self.assertEqual(result, False)
configuration.Configuration.set('foobool1', '1')
result = configuration.Configuration.getBool('foobool1')
self.assertEqual(result, True)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetIntMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getInt(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetInt(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('fooint', '-127')
result = configuration.Configuration.getInt('fooint')
self.assertEqual(result, -127)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloatMissingRaisesKeyError(self, findConfigFileMock, environMock):
findConfigFileMock.side_effect = self.files.get
environMock.get.return_value = None
configuration.Configuration.clear()
with self.assertRaises(KeyError):
configuration.Configuration.getFloat(uuid.uuid1().hex)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFloat(self, findConfigFileMock, environMock):
environMock.get.return_value = None
findConfigFileMock.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foofloat', '-127.65')
result = configuration.Configuration.getFloat('foofloat')
self.assertEqual(result, -127.65)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetMissingReturnsNone(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
environ.get.return_value = None
configuration.Configuration.clear()
result = configuration.Configuration.get(str(uuid.uuid4()))
self.assertTrue(result is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testSetAndGet(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
result = configuration.Configuration.get('foo')
self.assertTrue(result == 'bar')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDict(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue('foo' in result)
self.assertTrue(result['foo'] == 'bar')
self.assertTrue('apple' in result)
self.assertTrue(result['apple'] == 'banana')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReadsFilesFirstTime(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
result = configuration.Configuration.dict()
self.assertTrue(isinstance(result, dict))
self.assertTrue(len(result) == 1, result)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testDictReplacesKeysFromEnvironment(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
env = {'NTA_CONF_PROP_' + key: 'foo'}
environ.keys.side_effect = env.keys
environ.__getitem__.side_effect = env.__getitem__
result = configuration.Configuration.dict()
self.assertTrue(key in result)
self.assertTrue(result[key] == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testClear(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
configuration.Configuration.set('foo', 'bar')
configuration.Configuration.set('apple', 'banana')
self.assertTrue(configuration.Configuration.get('foo') == 'bar')
self.assertTrue(configuration.Configuration.get('apple') == 'banana')
configuration.Configuration.clear()
self.assertTrue(configuration.Configuration.get('foo') is None)
self.assertTrue(configuration.Configuration.get('apple') is None)
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testGetFromEnvironment(self, findConfigFile, environ):
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
key = str(uuid.uuid4())
environ.get.side_effect = {'NTA_CONF_PROP_' + key: 'foo'}.get
self.assertTrue(configuration.Configuration.get(key) == 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileFromPath(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
configuration.Configuration.readConfigFile(filename, prefix)
self.assertTrue(configuration.Configuration.get('dummy') == 'dummy value')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileUnexpectedElementAtRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<foo/>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingDocumentRoot(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises((ExpatError, ParseError), configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingNonPropertyConfigurationChildren(
self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <foo>bar<baz/></foo>',
'</configuration>')))
self.assertEqual(configuration.Configuration.dict(), \
dict(dummy='dummy value'))
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(Exception, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEmptyNameAndValue(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name></name>',
' <value></value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMissingEnvVars(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo}</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileMalformedEnvReference(self, findConfigFile,
environ): # pylint: disable=W0613
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.foo</value>',
' </property>',
'</configuration>')))
with patch('sys.stderr', new_callable=StringIO):
self.assertRaises(RuntimeError, configuration.Configuration.get, 'foo')
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testReadConfigFileEnvironmentOverride(self, findConfigFile, environ):
environ.get.return_value = None
findConfigFile.side_effect = self.files.get
configuration.Configuration.clear()
with open(self.files['nupic-default.xml'], 'w') as fp:
fp.write('\n'.join((
'<?xml version="1.0"?>',
'<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>',
'<configuration>',
' <property>',
' <name>foo</name>',
' <value>${env.NTA_CONF_PROP_foo}</value>',
' </property>',
'</configuration>')))
env = {'NTA_CONF_PROP_foo': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.get('foo')
self.assertEqual(result, 'bar')
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFile(self, getConfigPaths):
prefix, _, filename = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(filename)
self.assertTrue(result == self.files['nupic-default.xml'])
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, 'getConfigPaths',
spec=configuration.Configuration.getConfigPaths)
def testFindConfigFileReturnsNoneForMissingFile(self, getConfigPaths):
prefix, _, _ = self.files['nupic-default.xml'].rpartition(os.sep)
def replacePaths(**_):
return [prefix]
getConfigPaths.side_effect = replacePaths
configuration.Configuration.clear()
result = configuration.Configuration.findConfigFile(str(uuid.uuid4()))
self.assertTrue(result is None)
getConfigPaths.assert_called_with()
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPaths(
self, environ, configPaths): # pylint: disable=W0613
result = configuration.Configuration.getConfigPaths()
self.assertEqual(result, configPaths)
@unittest.skip('NUP-2081')
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNone(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertEqual(result, [os.path.join(os.environ['NUPIC'],
'config', 'default')])
@patch.object(configuration.Configuration, '_configPaths',
spec=configuration.Configuration._configPaths)
@patch.object(configuration.os, 'environ', spec=dict)
def testGetConfigPathsForNoneWithNTA_CONF_PATHInEnv(
self, environ, configPaths): # pylint: disable=W0613
configuration.Configuration._configPaths = None # pylint: disable=W0212
env = {'NTA_CONF_PATH': ''}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
result = configuration.Configuration.getConfigPaths()
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
self.assertEqual(result[0], env['NTA_CONF_PATH'])
def testSetConfigPathsForNoneWithNTA_CONF_PATHInEnv(self):
paths = [Mock()]
configuration.Configuration.setConfigPaths(paths)
self.assertEqual(
paths,
configuration.Configuration._configPaths) # pylint: disable=W0212
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testEmptyGetCustomDict(self, findConfigFile, environMock):
findConfigFile.side_effect = self.files.get
environMock.__getitem__.side_effect = dict(
NTA_DYNAMIC_CONF_DIR=os.path.dirname(self.files['nupic-custom.xml'])).get
configuration.Configuration.resetCustomConfig()
self.assertEqual(configuration.Configuration.getCustomDict(), dict())
@patch.object(configuration.os, 'environ', spec=dict)
@patch.object(configuration.Configuration, 'findConfigFile',
spec=configuration.Configuration.findConfigFile)
def testConfiguration(self, findConfigFile, environ):
configuration.Configuration.clear()
findConfigFile.side_effect = self.files.get
with open(self.files['nupic-default.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(self.files['nupic-site.xml'], 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {'USER': 'foo', 'HOME': 'bar'}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(configuration.Configuration.get('database.emptypassword'),
'')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertTrue(actualValue == expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'), 'FooBar')
allProps = configuration.Configuration.dict()
self.assertTrue(allProps['database.host'] == 'FooBar')
del env['NTA_CONF_PROP_database_host']
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'), 'matrix')
@patch.object(configuration.os, 'environ', spec=dict)
def testConfiguration2(self, environ):
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
env = {
'USER': 'foo',
'HOME': 'bar',
'NTA_CONF_PATH': tmpDir
}
environ.__getitem__.side_effect = env.__getitem__
environ.get.side_effect = env.get
environ.__contains__.side_effect = env.__contains__
environ.keys.side_effect = env.keys
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'), 'root')
expectedValue = 'foo'
actualValue = configuration.Configuration.get(
'var.environment.standalone.user')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = "The user " + os.environ['USER'] + " rocks!"
actualValue = configuration.Configuration.get(
'var.environment.user.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
expectedValue = ("User " + os.environ['USER'] + " and home " +
os.environ['HOME'] + " in the middle")
actualValue = configuration.Configuration.get(
'var.environment.user.and.home.in.the.middle')
self.assertEqual(actualValue, expectedValue,
"expected %r, but got %r" % (expectedValue, actualValue))
env['NTA_CONF_PROP_database_host'] = 'FooBar'
self.assertEqual(configuration.Configuration.get('database.host'),
'FooBar')
allProps = configuration.Configuration.dict()
self.assertEqual(allProps['database.host'], 'FooBar')
del env['NTA_CONF_PROP_database_host']
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
configuration.Configuration.clear()
tmpDir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir)
with open(os.path.join(tmpDir, 'nupic-default.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile1.xml')) as inp:
fp.write(inp.read())
with open(os.path.join(tmpDir, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile2.xml')) as inp:
fp.write(inp.read())
tmpDir2 = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpDir2)
with open(os.path.join(tmpDir2, 'nupic-site.xml'), 'w') as fp:
with open(resource_filename(__name__, 'conf/testFile3.xml')) as inp:
fp.write(inp.read())
env['NTA_CONF_PATH'] = os.pathsep.join([tmpDir, tmpDir2])
# Test the resulting configuration
self.assertEqual(configuration.Configuration.get('database.host'),
'TestHost')
self.assertEqual(configuration.Configuration.get('database.password'),
'pass')
self.assertEqual(
configuration.Configuration.get('database.emptypassword'), '')
self.assertEqual(configuration.Configuration.get('database.missingfield'),
None)
self.assertEqual(configuration.Configuration.get('database.user'),
'root')
# Change a property
configuration.Configuration.set('database.host', 'matrix')
self.assertEqual(configuration.Configuration.get('database.host'),
'matrix')
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], "--verbose"] + sys.argv[1:])
| gpl-3.0 | -8,924,739,527,963,121,000 | -5,262,250,437,263,602,000 | 40.484729 | 89 | 0.672525 | false |
jinnykoo/wuyisj.com | src/oscar/apps/dashboard/catalogue/forms.py | 5 | 16311 | from django import forms
from django.core import exceptions
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_class, get_model
from oscar.core.utils import slugify
from oscar.forms.widgets import ImageInput
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
Category = get_model('catalogue', 'Category')
StockRecord = get_model('partner', 'StockRecord')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
ProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')
CategoryForm = movenodeform_factory(
Category,
fields=['name', 'description', 'image'])
class ProductClassSelectForm(forms.Form):
"""
Form which is used before creating a product to select it's product class
"""
product_class = forms.ModelChoiceField(
label=_("Create a new product of type"),
empty_label=_("-- Choose type --"),
queryset=ProductClass.objects.all())
def __init__(self, *args, **kwargs):
"""
If there's only one product class, pre-select it
"""
super(ProductClassSelectForm, self).__init__(*args, **kwargs)
qs = self.fields['product_class'].queryset
if not kwargs.get('initial') and len(qs) == 1:
self.fields['product_class'].initial = qs[0]
class ProductSearchForm(forms.Form):
upc = forms.CharField(max_length=16, required=False, label=_('UPC'))
title = forms.CharField(
max_length=255, required=False, label=_('Product title'))
def clean(self):
cleaned_data = super(ProductSearchForm, self).clean()
cleaned_data['upc'] = cleaned_data['upc'].strip()
cleaned_data['title'] = cleaned_data['title'].strip()
return cleaned_data
class StockRecordForm(forms.ModelForm):
def __init__(self, product_class, user, *args, **kwargs):
# The user kwarg is not used by stock StockRecordForm. We pass it
# anyway in case one wishes to customise the partner queryset
self.user = user
super(StockRecordForm, self).__init__(*args, **kwargs)
# If not tracking stock, we hide the fields
if not product_class.track_stock:
del self.fields['num_in_stock']
del self.fields['low_stock_threshold']
else:
self.fields['price_excl_tax'].required = True
self.fields['num_in_stock'].required = True
class Meta:
model = StockRecord
fields = [
'partner', 'partner_sku',
'price_currency', 'price_excl_tax', 'price_retail', 'cost_price',
'num_in_stock', 'low_stock_threshold',
]
BaseStockRecordFormSet = inlineformset_factory(
Product, StockRecord, form=StockRecordForm, extra=1)
class StockRecordFormSet(BaseStockRecordFormSet):
def __init__(self, product_class, user, *args, **kwargs):
self.user = user
self.require_user_stockrecord = not user.is_staff
self.product_class = product_class
super(StockRecordFormSet, self).__init__(*args, **kwargs)
self.set_initial_data()
def set_initial_data(self):
"""
If user has only one partner associated, set the first
stock record's partner to it. Can't pre-select for staff users as
they're allowed to save a product without a stock record.
This is intentionally done after calling __init__ as passing initial
data to __init__ creates a form for each list item. So depending on
whether we can pre-select the partner or not, we'd end up with 1 or 2
forms for an unbound form.
"""
if self.require_user_stockrecord:
try:
user_partner = self.user.partners.get()
except (exceptions.ObjectDoesNotExist,
exceptions.MultipleObjectsReturned):
pass
else:
partner_field = self.forms[0].fields.get('partner', None)
if partner_field and partner_field.initial is None:
partner_field.initial = user_partner
def _construct_form(self, i, **kwargs):
kwargs['product_class'] = self.product_class
kwargs['user'] = self.user
return super(StockRecordFormSet, self)._construct_form(
i, **kwargs)
def clean(self):
"""
If the user isn't a staff user, this validation ensures that at least
one stock record's partner is associated with a users partners.
"""
if any(self.errors):
return
if self.require_user_stockrecord:
stockrecord_partners = set([form.cleaned_data.get('partner', None)
for form in self.forms])
user_partners = set(self.user.partners.all())
if not user_partners & stockrecord_partners:
raise exceptions.ValidationError(
_("At least one stock record must be set to a partner that"
" you're associated with."))
def _attr_text_field(attribute):
return forms.CharField(label=attribute.name,
required=attribute.required)
def _attr_textarea_field(attribute):
return forms.CharField(label=attribute.name,
widget=forms.Textarea(),
required=attribute.required)
def _attr_integer_field(attribute):
return forms.IntegerField(label=attribute.name,
required=attribute.required)
def _attr_boolean_field(attribute):
return forms.BooleanField(label=attribute.name,
required=attribute.required)
def _attr_float_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_date_field(attribute):
return forms.DateField(label=attribute.name,
required=attribute.required,
widget=forms.widgets.DateInput)
def _attr_option_field(attribute):
return forms.ModelChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_multi_option_field(attribute):
return forms.ModelMultipleChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_entity_field(attribute):
# Product entities don't have out-of-the-box supported in the ProductForm.
# There is no ModelChoiceField for generic foreign keys, and there's no
# good default behaviour anyway; offering a choice of *all* model instances
# is hardly useful.
return None
def _attr_numeric_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_file_field(attribute):
return forms.FileField(
label=attribute.name, required=attribute.required)
def _attr_image_field(attribute):
return forms.ImageField(
label=attribute.name, required=attribute.required)
class ProductForm(forms.ModelForm):
FIELD_FACTORIES = {
"text": _attr_text_field,
"richtext": _attr_textarea_field,
"integer": _attr_integer_field,
"boolean": _attr_boolean_field,
"float": _attr_float_field,
"date": _attr_date_field,
"option": _attr_option_field,
"multi_option": _attr_multi_option_field,
"entity": _attr_entity_field,
"numeric": _attr_numeric_field,
"file": _attr_file_field,
"image": _attr_image_field,
}
class Meta:
model = Product
fields = [
'title', 'upc', 'description', 'is_discountable', 'structure']
widgets = {
'structure': forms.HiddenInput()
}
def __init__(self, product_class, data=None, parent=None, *args, **kwargs):
self.set_initial(product_class, parent, kwargs)
super(ProductForm, self).__init__(data, *args, **kwargs)
if parent:
self.instance.parent = parent
# We need to set the correct product structures explicitly to pass
# attribute validation and child product validation. Note that
# those changes are not persisted.
self.instance.structure = Product.CHILD
self.instance.parent.structure = Product.PARENT
self.delete_non_child_fields()
else:
# Only set product class for non-child products
self.instance.product_class = product_class
self.add_attribute_fields(product_class, self.instance.is_parent)
if 'title' in self.fields:
self.fields['title'].widget = forms.TextInput(
attrs={'autocomplete': 'off'})
def set_initial(self, product_class, parent, kwargs):
"""
Set initial data for the form. Sets the correct product structure
and fetches initial values for the dynamically constructed attribute
fields.
"""
if 'initial' not in kwargs:
kwargs['initial'] = {}
self.set_initial_attribute_values(product_class, kwargs)
if parent:
kwargs['initial']['structure'] = Product.CHILD
def set_initial_attribute_values(self, product_class, kwargs):
"""
Update the kwargs['initial'] value to have the initial values based on
the product instance's attributes
"""
instance = kwargs.get('instance')
if instance is None:
return
for attribute in product_class.attributes.all():
try:
value = instance.attribute_values.get(
attribute=attribute).value
except exceptions.ObjectDoesNotExist:
pass
else:
kwargs['initial']['attr_%s' % attribute.code] = value
def add_attribute_fields(self, product_class, is_parent=False):
"""
For each attribute specified by the product class, this method
dynamically adds form fields to the product form.
"""
for attribute in product_class.attributes.all():
field = self.get_attribute_field(attribute)
if field:
self.fields['attr_%s' % attribute.code] = field
# Attributes are not required for a parent product
if is_parent:
self.fields['attr_%s' % attribute.code].required = False
def get_attribute_field(self, attribute):
"""
Gets the correct form field for a given attribute type.
"""
return self.FIELD_FACTORIES[attribute.type](attribute)
def delete_non_child_fields(self):
"""
Deletes any fields not needed for child products. Override this if
you want to e.g. keep the description field.
"""
for field_name in ['description', 'is_discountable']:
if field_name in self.fields:
del self.fields[field_name]
def _post_clean(self):
"""
Set attributes before ModelForm calls the product's clean method
(which it does in _post_clean), which in turn validates attributes.
"""
product_class = self.instance.get_product_class()
for attribute in product_class.attributes.all():
field_name = 'attr_%s' % attribute.code
# An empty text field won't show up in cleaned_data.
if field_name in self.cleaned_data:
value = self.cleaned_data[field_name]
setattr(self.instance.attr, attribute.code, value)
super(ProductForm, self)._post_clean()
class StockAlertSearchForm(forms.Form):
status = forms.CharField(label=_('Status'))
class ProductCategoryForm(forms.ModelForm):
class Meta:
model = ProductCategory
fields = ('category', )
BaseProductCategoryFormSet = inlineformset_factory(
Product, ProductCategory, form=ProductCategoryForm, extra=1,
can_delete=True)
class ProductCategoryFormSet(BaseProductCategoryFormSet):
def __init__(self, product_class, user, *args, **kwargs):
# This function just exists to drop the extra arguments
super(ProductCategoryFormSet, self).__init__(*args, **kwargs)
def clean(self):
if not self.instance.is_child and self.get_num_categories() == 0:
raise forms.ValidationError(
_("Stand-alone and parent products "
"must have at least one category"))
if self.instance.is_child and self.get_num_categories() > 0:
raise forms.ValidationError(
_("A child product should not have categories"))
def get_num_categories(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (hasattr(form, 'cleaned_data')
and form.cleaned_data.get('category', None)
and not form.cleaned_data.get('DELETE', False)):
num_categories += 1
return num_categories
class ProductImageForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['product', 'original', 'caption']
# use ImageInput widget to create HTML displaying the
# actual uploaded image and providing the upload dialog
# when clicking on the actual image.
widgets = {
'original': ImageInput(),
}
def save(self, *args, **kwargs):
# We infer the display order of the image based on the order of the
# image fields within the formset.
kwargs['commit'] = False
obj = super(ProductImageForm, self).save(*args, **kwargs)
obj.display_order = self.get_display_order()
obj.save()
return obj
def get_display_order(self):
return self.prefix.split('-').pop()
BaseProductImageFormSet = inlineformset_factory(
Product, ProductImage, form=ProductImageForm, extra=2)
class ProductImageFormSet(BaseProductImageFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductImageFormSet, self).__init__(*args, **kwargs)
class ProductRecommendationForm(forms.ModelForm):
class Meta:
model = ProductRecommendation
fields = ['primary', 'recommendation', 'ranking']
widgets = {
'recommendation': ProductSelect,
}
BaseProductRecommendationFormSet = inlineformset_factory(
Product, ProductRecommendation, form=ProductRecommendationForm,
extra=5, fk_name="primary")
class ProductRecommendationFormSet(BaseProductRecommendationFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)
class ProductClassForm(forms.ModelForm):
class Meta:
model = ProductClass
fields = ['name', 'requires_shipping', 'track_stock', 'options']
class ProductAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductAttributesForm, self).__init__(*args, **kwargs)
# because we'll allow submission of the form with blank
# codes so that we can generate them.
self.fields["code"].required = False
self.fields["option_group"].help_text = _("Select an option group")
def clean_code(self):
code = self.cleaned_data.get("code")
title = self.cleaned_data.get("name")
if not code and title:
code = slugify(title)
return code
class Meta:
model = ProductAttribute
fields = ["name", "code", "type", "option_group", "required"]
ProductAttributesFormSet = inlineformset_factory(ProductClass,
ProductAttribute,
form=ProductAttributesForm,
extra=3)
| bsd-3-clause | 4,089,315,025,103,004,000 | 8,320,386,301,342,351,000 | 34.535948 | 79 | 0.620808 | false |
aninternetof/bremen | bremenenv/lib/python3.5/site-packages/pip/_vendor/__init__.py | 329 | 4670 | """
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# We can just silently allow import failures to pass here. If we
# got to this point it means that ``import pip._vendor.whatever``
# failed and so did ``import whatever``. Since we're importing this
# upfront in an attempt to alias imports, not erroring here will
# just mean we get a regular import error whenever pip *actually*
# tries to import one of these modules to use it, which actually
# gives us a better error message than we would have otherwise
# gotten.
pass
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("distro")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
| mit | -6,844,445,799,450,293,000 | 2,486,728,851,620,531,700 | 42.64486 | 79 | 0.714133 | false |
atodorov/dnf | dnf/yum/history.py | 9 | 63071 | # Copyright (C) 2009, 2012-2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# James Antill <james@fedoraproject.org>
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.i18n import _, ucd
import time
import os, os.path
import glob
from .sqlutils import sqlite, executeSQL, sql_esc_glob
from . import misc as misc
import dnf.arch
import dnf.exceptions
import dnf.rpm.miscutils
import dnf.i18n
import functools
# Cut over for when we should just give up and load everything.
# The main problem here is not so much SQLite dying (although that happens
# at large values: http://sqlite.org/limits.html#max_variable_number) but that
# but SQLite going really slow when it gets medium sized values (much slower
# than just loading everything and filtering it in python).
PATTERNS_MAX = 8
# We have another value here because name is indexed and sqlite is _much_
# faster even at large numbers of patterns.
PATTERNS_INDEXED_MAX = 128
def _setupHistorySearchSQL(patterns=None, ignore_case=False):
"""Setup need_full and patterns for _yieldSQLDataList, also see if
we can get away with just using searchNames(). """
if patterns is None:
patterns = []
fields = ['name', 'sql_nameArch', 'sql_nameVerRelArch',
'sql_nameVer', 'sql_nameVerRel',
'sql_envra', 'sql_nevra']
need_full = False
for pat in patterns:
if misc.re_full_search_needed(pat):
need_full = True
break
pat_max = PATTERNS_MAX
if not need_full:
fields = ['name']
pat_max = PATTERNS_INDEXED_MAX
if len(patterns) > pat_max:
patterns = []
if ignore_case:
patterns = sql_esc_glob(patterns)
else:
tmp = []
need_glob = False
for pat in patterns:
if misc.re_glob(pat):
tmp.append((pat, 'glob'))
need_glob = True
else:
tmp.append((pat, '='))
if not need_full and not need_glob and patterns:
return (need_full, patterns, fields, True)
patterns = tmp
return (need_full, patterns, fields, False)
class _YumHistPackageYumDB(object):
""" Class to pretend to be yumdb_info for history packages. """
def __init__(self, pkg):
self._pkg = pkg
_valid_yumdb_keys = set(["command_line",
"from_repo", "from_repo_revision",
"from_repo_timestamp",
"installed_by", "changed_by",
"reason", "releasever"])
def __getattr__(self, attr):
""" Load yumdb attributes from the history sqlite. """
pkg = self._pkg
if attr.startswith('_'):
raise AttributeError("%s has no yum attribute %s" % (pkg, attr))
if attr not in self._valid_yumdb_keys:
raise AttributeError("%s has no yum attribute %s" % (pkg, attr))
val = pkg._history._load_yumdb_key(pkg, attr)
if False and val is None:
raise AttributeError("%s has no yum attribute %s" % (pkg, attr))
if val is None:
return None
val = str(val) or ""
setattr(self, attr, val)
return val
def __contains__(self, attr):
# This is faster than __iter__ and it makes things fail in a much more
# obvious way in weird FS corruption cases like: BZ 593436
x = self.get(attr)
return x is not None
def get(self, attr, default=None):
"""retrieve an add'l data obj"""
try:
res = getattr(self, attr)
except AttributeError:
return default
return res
@functools.total_ordering
class YumHistoryPackage(object):
def __init__(self, name, arch, epoch, version, release, checksum=None,
history=None):
self.name = name
self.version = version
self.release = release
self.epoch = epoch
self.arch = arch
self.pkgtup = (self.name, self.arch,
self.epoch, self.version, self.release)
if checksum is None:
self._checksums = [] # (type, checksum, id(0,1)
else:
chk = checksum.split(':')
self._checksums = [(chk[0], chk[1], 1)] # (type, checksum, id(0,1))
self.repoid = "<history>"
self._history = history
self.yumdb_info = _YumHistPackageYumDB(self)
_valid_rpmdb_keys = set(["buildtime", "buildhost",
"license", "packager",
"size", "sourcerpm", "url", "vendor",
# ?
"committer", "committime"])
def __le__(self, other):
"""Test whether the *self* is less than or equal to the *other*."""
ret = self.verCMP(other)
if ret != 0:
return ret < 0 # less or grater
if self.arch != other.arch:
return self.arch < other.arch # less or greater
try:
self_repoid, other_repoid = self.repoid, other.repoid
except AttributeError:
return True # equal
if self_repoid == other_repoid:
return True # equal
# We want 'installed' to appear over 'abcd' and 'xyz', so boost that
if self_repoid == 'installed':
return False # greater
if other_repoid == 'installed':
return True # less
return self_repoid < other_repoid # less or grater
@staticmethod
def __comparePoEVR(po1, po2):
"""
Compare two Package or PackageEVR objects.
"""
(e1, v1, r1) = (po1.epoch, po1.version, po1.release)
(e2, v2, r2) = (po2.epoch, po2.version, po2.release)
return dnf.rpm.miscutils.compareEVR((e1, v1, r1), (e2, v2, r2))
def __eq__(self, other):
""" Compare packages for yes/no equality, includes everything in the
UI package comparison. """
if not other:
return False
if not hasattr(other, 'pkgtup') or self.pkgtup != other.pkgtup:
return False
if hasattr(self, 'repoid') and hasattr(other, 'repoid'):
if self.repoid != other.repoid:
return False
return True
def __getattr__(self, attr):
""" Load rpmdb attributes from the history sqlite. """
if attr.startswith('_'):
raise AttributeError("%s has no attribute %s" % (self, attr))
if attr not in self._valid_rpmdb_keys:
raise AttributeError("%s has no attribute %s" % (self, attr))
val = self._history._load_rpmdb_key(self, attr)
if False and val is None:
raise AttributeError("%s has no attribute %s" % (self, attr))
if val is None:
return None
val = str(val) or ""
setattr(self, attr, val)
return val
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return ("<%s : %s (%s)>" %
(self.__class__.__name__, str(self), hex(id(self))))
def __str__(self):
return self.ui_envra
@property
def envra(self):
return ('%s:%s-%s-%s.%s' %
(self.epoch, self.name, self.version, self.release, self.arch))
@property
def nevra(self):
return ('%s-%s:%s-%s.%s' %
(self.name, self.epoch, self.version, self.release, self.arch))
@property
def nvra(self):
return ('%s-%s-%s.%s' %
(self.name, self.version, self.release, self.arch))
def returnIdSum(self):
for (csumtype, csum, csumid) in self._checksums:
if csumid:
return (csumtype, csum)
@property
def ui_envra(self):
if self.epoch == '0':
return self.nvra
else:
return self.envra
def _ui_from_repo(self):
""" This reports the repo the package is from, we integrate YUMDB info.
for RPM packages so a package from "fedora" that is installed has a
ui_from_repo of "@fedora". Note that, esp. with the --releasever
option, "fedora" or "rawhide" isn't authoritative.
So we also check against the current releasever and if it is
different we also print the YUMDB releasever. This means that
installing from F12 fedora, while running F12, would report as
"@fedora/13". """
if 'from_repo' in self.yumdb_info:
self._history.releasever
end = ''
if (self._history.releasever is not None and
'releasever' in self.yumdb_info and
self.yumdb_info.releasever != self._history.releasever):
end = '/' + self.yumdb_info.releasever
return '@' + self.yumdb_info.from_repo + end
return self.repoid
ui_from_repo = property(fget=lambda self: self._ui_from_repo())
@property
def ui_nevra(self):
if self.epoch == '0':
return self.nvra
else:
return self.nevra
def verCMP(self, other):
""" Compare package to another one, only rpm-version ordering. """
if not other:
return 1
if self.name != other.name:
return -1 if self.name < other.name else +1
return self.__comparePoEVR(self, other)
class YumHistoryPackageState(YumHistoryPackage):
def __init__(self, name,arch, epoch,version,release, state, checksum=None,
history=None):
YumHistoryPackage.__init__(self, name,arch, epoch,version,release,
checksum, history)
self.done = None
self.state = state
class YumHistoryRpmdbProblem(object):
""" Class representing an rpmdb problem that existed at the time of the
transaction. """
def __init__(self, history, rpid, problem, text):
self._history = history
self.rpid = rpid
self.problem = problem
self.text = text
self._loaded_P = None
def __lt__(self, other):
if other is None:
return False
if self.problem == other.problem:
return self.rpid < other.rpid
return self.problem > other.problem
def _getProbPkgs(self):
if self._loaded_P is None:
self._loaded_P = sorted(self._history._old_prob_pkgs(self.rpid))
return self._loaded_P
packages = property(fget=lambda self: self._getProbPkgs())
class YumHistoryTransaction(object):
""" Holder for a history transaction. """
def __init__(self, history, row):
self._history = history
self.tid = row[0]
self.beg_timestamp = row[1]
self.beg_rpmdbversion = row[2]
self.end_timestamp = row[3]
self.end_rpmdbversion = row[4]
self.loginuid = row[5]
self.return_code = row[6]
self._loaded_TW = None
self._loaded_TD = None
self._loaded_TS = None
self._loaded_PROB = None
self._have_loaded_CMD = False # cmdline can validly be None
self._loaded_CMD = None
self._loaded_ER = None
self._loaded_OT = None
self.altered_lt_rpmdb = None
self.altered_gt_rpmdb = None
def __lt__(self, other):
if other is None:
return False
if self.beg_timestamp == other.beg_timestamp:
if self.end_timestamp == other.end_timestamp:
return self.tid > other.tid
else:
return self.end_timestamp < other.end_timestamp
else:
return self.beg_timestamp > other.beg_timestamp
def _getTransWith(self):
if self._loaded_TW is None:
self._loaded_TW = sorted(self._history._old_with_pkgs(self.tid))
return self._loaded_TW
def _getTransData(self):
if self._loaded_TD is None:
self._loaded_TD = sorted(self._history._old_data_pkgs(self.tid))
return self._loaded_TD
def _getTransSkip(self):
if self._loaded_TS is None:
self._loaded_TS = sorted(self._history._old_skip_pkgs(self.tid))
return self._loaded_TS
trans_with = property(fget=lambda self: self._getTransWith())
trans_data = property(fget=lambda self: self._getTransData())
trans_skip = property(fget=lambda self: self._getTransSkip())
def _getProblems(self):
if self._loaded_PROB is None:
self._loaded_PROB = sorted(self._history._old_problems(self.tid))
return self._loaded_PROB
rpmdb_problems = property(fget=lambda self: self._getProblems())
def _getCmdline(self):
if not self._have_loaded_CMD:
self._have_loaded_CMD = True
self._loaded_CMD = self._history._old_cmdline(self.tid)
return self._loaded_CMD
cmdline = property(fget=lambda self: self._getCmdline())
def _getErrors(self):
if self._loaded_ER is None:
self._loaded_ER = self._history._load_errors(self.tid)
return self._loaded_ER
def _getOutput(self):
if self._loaded_OT is None:
self._loaded_OT = self._history._load_output(self.tid)
return self._loaded_OT
errors = property(fget=lambda self: self._getErrors())
output = property(fget=lambda self: self._getOutput())
class YumMergedHistoryTransaction(YumHistoryTransaction):
def __init__(self, obj):
self._merged_tids = set([obj.tid])
self._merged_objs = [obj]
self.beg_timestamp = obj.beg_timestamp
self.beg_rpmdbversion = obj.beg_rpmdbversion
self.end_timestamp = obj.end_timestamp
self.end_rpmdbversion = obj.end_rpmdbversion
self._loaded_TW = None
self._loaded_TD = None
# Hack, this is difficult ... not sure if we want to list everything
# that was skipped. Just those things which were skipped and then not
# updated later ... or nothing. Nothing is much easier.
self._loaded_TS = []
self._loaded_PROB = None
self._have_loaded_CMD = False # cmdline can validly be None
self._loaded_CMD = None
self._loaded_ER = None
self._loaded_OT = None
self.altered_lt_rpmdb = None
self.altered_gt_rpmdb = None
def _getAllTids(self):
return sorted(self._merged_tids)
tid = property(fget=lambda self: self._getAllTids())
def _getLoginUIDs(self):
ret = set((tid.loginuid for tid in self._merged_objs))
if len(ret) == 1:
return list(ret)[0]
return sorted(ret)
loginuid = property(fget=lambda self: self._getLoginUIDs())
def _getReturnCodes(self):
ret_codes = set((tid.return_code for tid in self._merged_objs))
if len(ret_codes) == 1 and 0 in ret_codes:
return 0
if 0 in ret_codes:
ret_codes.remove(0)
return sorted(ret_codes)
return_code = property(fget=lambda self: self._getReturnCodes())
def _getTransWith(self):
ret = []
filt = set()
for obj in self._merged_objs:
for pkg in obj.trans_with:
if pkg.pkgtup in filt:
continue
filt.add(pkg.pkgtup)
ret.append(pkg)
return sorted(ret)
# This is the real tricky bit, we want to "merge" so that:
# pkgA-1 => pkgA-2
# pkgA-2 => pkgA-3
# pkgB-1 => pkgB-2
# pkgB-2 => pkgB-1
# ...becomes:
# pkgA-1 => pkgA-3
# pkgB-1 => pkgB-1 (reinstall)
# ...note that we just give up if "impossible" things happen, Eg.
# pkgA-1 => pkgA-2
# pkgA-4 => pkgA-5
@staticmethod
def _p2sk(pkg, state=None):
""" Take a pkg and return the key for it's state lookup. """
if state is None:
state = pkg.state
# Arch is needed so multilib. works, dito. basearch() -- (so .i586
# => .i686 moves are seen)
return (pkg.name, dnf.arch.basearch(pkg.arch), state)
@staticmethod
def _list2dict(pkgs):
pkgtup2pkg = {}
pkgstate2pkg = {}
for pkg in pkgs:
key = YumMergedHistoryTransaction._p2sk(pkg)
pkgtup2pkg[pkg.pkgtup] = pkg
pkgstate2pkg[key] = pkg
return pkgtup2pkg, pkgstate2pkg
@staticmethod
def _conv_pkg_state(pkg, state):
npkg = YumHistoryPackageState(pkg.name, pkg.arch,
pkg.epoch,pkg.version,pkg.release, state,
history=pkg._history)
npkg._checksums = pkg._checksums
npkg.done = pkg.done
if npkg.state in dnf.history.INSTALLING_STATES:
npkg.state_installed = True
if npkg.state in dnf.history.REMOVING_STATES:
npkg.state_installed = False
return npkg
@staticmethod
def _get_pkg(sk, pkgstate2pkg):
if type(sk) != type((0,1)):
sk = YumMergedHistoryTransaction._p2sk(sk)
if sk not in pkgstate2pkg:
return None
return pkgstate2pkg[sk]
def _move_pkg(self, sk, nstate, pkgtup2pkg, pkgstate2pkg):
xpkg = self._get_pkg(sk, pkgstate2pkg)
if xpkg is None:
return
del pkgstate2pkg[self._p2sk(xpkg)]
xpkg = self._conv_pkg_state(xpkg, nstate)
pkgtup2pkg[xpkg.pkgtup] = xpkg
pkgstate2pkg[self._p2sk(xpkg)] = xpkg
def _getTransData(self):
def _get_pkg_f(sk):
return self._get_pkg(sk, fpkgstate2pkg)
def _get_pkg_n(sk):
return self._get_pkg(sk, npkgstate2pkg)
def _move_pkg_f(sk, nstate):
self._move_pkg(sk, nstate, fpkgtup2pkg, fpkgstate2pkg)
def _move_pkg_n(sk, nstate):
self._move_pkg(sk, nstate, npkgtup2pkg, npkgstate2pkg)
def _del1_n(pkg):
del npkgtup2pkg[pkg.pkgtup]
key = self._p2sk(pkg)
if key in npkgstate2pkg: # For broken rpmdbv's and installonly
del npkgstate2pkg[key]
def _del1_f(pkg):
del fpkgtup2pkg[pkg.pkgtup]
key = self._p2sk(pkg)
if key in fpkgstate2pkg: # For broken rpmdbv's and installonly
del fpkgstate2pkg[key]
def _del2(fpkg, npkg):
assert fpkg.pkgtup == npkg.pkgtup
_del1_f(fpkg)
_del1_n(npkg)
fpkgtup2pkg = {}
fpkgstate2pkg = {}
# We need to go from oldest to newest here, so we can see what happened
# in the correct chronological order.
for obj in self._merged_objs:
npkgtup2pkg, npkgstate2pkg = self._list2dict(obj.trans_data)
# Handle Erase => Install, as update/reinstall/downgrade
for key in list(fpkgstate2pkg.keys()):
(name, arch, state) = key
if state not in ('Obsoleted', 'Erase'):
continue
fpkg = fpkgstate2pkg[key]
for xstate in ('Install', 'True-Install', 'Dep-Install',
'Obsoleting'):
npkg = _get_pkg_n(self._p2sk(fpkg, xstate))
if npkg is not None:
break
else:
continue
if False: pass
elif fpkg > npkg:
_move_pkg_f(fpkg, 'Downgraded')
if xstate != 'Obsoleting':
_move_pkg_n(npkg, 'Downgrade')
elif fpkg < npkg:
_move_pkg_f(fpkg, 'Updated')
if xstate != 'Obsoleting':
_move_pkg_n(npkg, 'Update')
else:
_del1_f(fpkg)
if xstate != 'Obsoleting':
_move_pkg_n(npkg, 'Reinstall')
sametups = set(npkgtup2pkg.keys()).intersection(fpkgtup2pkg.keys())
for pkgtup in sametups:
if pkgtup not in fpkgtup2pkg or pkgtup not in npkgtup2pkg:
continue
fpkg = fpkgtup2pkg[pkgtup]
npkg = npkgtup2pkg[pkgtup]
if False: pass
elif fpkg.state == 'Reinstall':
if npkg.state in ('Reinstall', 'Erase', 'Obsoleted',
'Downgraded', 'Updated'):
_del1_f(fpkg)
elif fpkg.state in ('Obsoleted', 'Erase'):
# Should be covered by above loop which deals with
# all goood state changes.
good_states = ('Install', 'True-Install', 'Dep-Install',
'Obsoleting')
assert npkg.state not in good_states
elif fpkg.state in ('Install', 'True-Install', 'Dep-Install'):
if False: pass
elif npkg.state in ('Erase', 'Obsoleted'):
_del2(fpkg, npkg)
elif npkg.state == 'Updated':
_del2(fpkg, npkg)
# Move '*Install' state along to newer pkg. (not for
# obsoletes).
_move_pkg_n(self._p2sk(fpkg, 'Update'), fpkg.state)
elif npkg.state == 'Downgraded':
_del2(fpkg, npkg)
# Move '*Install' state along to newer pkg. (not for
# obsoletes).
_move_pkg_n(self._p2sk(fpkg, 'Downgrade'), fpkg.state)
elif fpkg.state in ('Downgrade', 'Update', 'Obsoleting'):
if False: pass
elif npkg.state == 'Reinstall':
_del1_n(npkg)
elif npkg.state in ('Erase', 'Obsoleted'):
_del2(fpkg, npkg)
# Move 'Erase'/'Obsoleted' state to orig. pkg.
_move_pkg_f(self._p2sk(fpkg, 'Updated'), npkg.state)
_move_pkg_f(self._p2sk(fpkg, 'Downgraded'), npkg.state)
elif npkg.state in ('Downgraded', 'Updated'):
xfpkg = _get_pkg_f(self._p2sk(fpkg, 'Updated'))
if xfpkg is None:
xfpkg = _get_pkg_f(self._p2sk(fpkg, 'Downgraded'))
if xfpkg is None:
if fpkg.state != 'Obsoleting':
continue
# Was an Install*/Reinstall with Obsoletes
xfpkg = fpkg
xnpkg = _get_pkg_n(self._p2sk(npkg, 'Update'))
if xnpkg is None:
xnpkg = _get_pkg_n(self._p2sk(npkg, 'Downgrade'))
if xnpkg is None:
xnpkg = _get_pkg_n(self._p2sk(npkg, 'Obsoleting'))
if xnpkg is None:
continue
# Now we have 4 pkgs, f1, f2, n1, n2, and 3 pkgtups
# f2.pkgtup == n1.pkgtup. So we need to find out if
# f1 => n2 is an Update or a Downgrade.
_del2(fpkg, npkg)
if xfpkg == xnpkg:
nfstate = 'Reinstall'
if 'Obsoleting' in (fpkg.state, xnpkg.state):
nfstate = 'Obsoleting'
if xfpkg != fpkg:
_move_pkg_f(xfpkg, nfstate)
_del1_n(xnpkg)
elif xfpkg < xnpkg:
# Update...
nfstate = 'Updated'
nnstate = 'Update'
if 'Obsoleting' in (fpkg.state, xnpkg.state):
nnstate = 'Obsoleting'
if xfpkg != fpkg:
_move_pkg_f(xfpkg, nfstate)
_move_pkg_n(xnpkg, nnstate)
else:
# Downgrade...
nfstate = 'Downgraded'
nnstate = 'Downgrade'
if 'Obsoleting' in (fpkg.state, xnpkg.state):
nnstate = 'Obsoleting'
if xfpkg != fpkg:
_move_pkg_f(xfpkg, nfstate)
_move_pkg_n(xnpkg, nnstate)
for x in npkgtup2pkg:
fpkgtup2pkg[x] = npkgtup2pkg[x]
for x in npkgstate2pkg:
fpkgstate2pkg[x] = npkgstate2pkg[x]
return sorted(fpkgtup2pkg.values())
def _getProblems(self):
probs = set()
for tid in self._merged_objs:
for prob in tid.rpmdb_problems:
probs.add(prob)
return sorted(probs)
def _getCmdline(self):
cmdlines = []
for tid in self._merged_objs:
if not tid.cmdline:
continue
if cmdlines and cmdlines[-1] == tid.cmdline:
continue
cmdlines.append(tid.cmdline)
if not cmdlines:
return None
return cmdlines
def _getErrors(self):
ret = []
for obj in self._merged_objs:
ret.extend(obj.errors)
return ret
def _getOutput(self):
ret = []
for obj in self._merged_objs:
ret.extend(obj.output)
return ret
def merge(self, obj):
if obj.tid in self._merged_tids:
return # Already done, signal an error?
self._merged_tids.add(obj.tid)
self._merged_objs.append(obj)
# Oldest first...
self._merged_objs.sort(reverse=True)
if self.beg_timestamp > obj.beg_timestamp:
self.beg_timestamp = obj.beg_timestamp
self.beg_rpmdbversion = obj.beg_rpmdbversion
if self.end_timestamp < obj.end_timestamp:
self.end_timestamp = obj.end_timestamp
self.end_rpmdbversion = obj.end_rpmdbversion
class YumHistory(object):
""" API for accessing the history sqlite data. """
def __init__(self, db_path, yumdb, root='/', releasever=None):
self._conn = None
self.conf = misc.GenericHolder()
if not os.path.normpath(db_path).startswith(root):
self.conf.db_path = os.path.normpath(root + '/' + db_path)
else:
self.conf.db_path = os.path.normpath('/' + db_path)
self.conf.writable = False
self.conf.readable = True
self.yumdb = yumdb
self.releasever = releasever
if not os.path.exists(self.conf.db_path):
try:
os.makedirs(self.conf.db_path)
except (IOError, OSError) as e:
error = dnf.i18n.ucd(e)
msg = _("Unable to initialize DNF DB history: %s") % error
raise dnf.exceptions.Error(msg)
else:
self.conf.writable = True
else:
if os.access(self.conf.db_path, os.W_OK):
self.conf.writable = True
DBs = glob.glob('%s/history-*-*-*.sqlite' % self.conf.db_path)
self._db_file = None
for d in reversed(sorted(DBs)):
fname = os.path.basename(d)
fname = fname[len("history-"):-len(".sqlite")]
pieces = fname.split('-', 4)
if len(pieces) != 3:
continue
try:
for piece in pieces:
int(piece)
except ValueError:
continue
self._db_date = '%s-%s-%s' % (pieces[0], pieces[1], pieces[2])
self._db_file = d
break
if self._db_file is None:
self._create_db_file()
# make an addon path for where we're going to stick
# random additional history info - probably from plugins and what-not
self.conf.addon_path = self.conf.db_path + '/' + self._db_date
if not os.path.exists(self.conf.addon_path):
try:
os.makedirs(self.conf.addon_path)
except (IOError, OSError) as e:
# some sort of useful thing here? A warning?
return
else:
if os.access(self.conf.addon_path, os.W_OK):
self.conf.writable = True
def __del__(self):
self.close()
def _get_cursor(self):
if self._conn is None:
if not self.conf.readable:
return None
try:
self._conn = sqlite.connect(self._db_file)
except (sqlite.OperationalError, sqlite.DatabaseError):
self.conf.readable = False
return None
# Note that this is required due to changing the history DB in the
# callback for removed txmbrs ... which happens inside the chroot,
# as against all our other access which is outside the chroot. So
# we need sqlite to not open the journal.
# In theory this sucks, as history could be shared. In reality
# it's deep yum stuff and there should only be one yum.
executeSQL(self._conn.cursor(), "PRAGMA locking_mode = EXCLUSIVE")
return self._conn.cursor()
def _commit(self):
return self._conn.commit()
def _rollback(self):
return self._conn.rollback()
def close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
def _pkgtup2pid(self, pkgtup, checksum=None, create=True):
cur = self._get_cursor()
executeSQL(cur, """SELECT pkgtupid, checksum FROM pkgtups
WHERE name=? AND arch=? AND
epoch=? AND version=? AND release=?""", pkgtup)
for sql_pkgtupid, sql_checksum in cur:
if checksum is None and sql_checksum is None:
return sql_pkgtupid
if checksum is None:
continue
if sql_checksum is None:
continue
if checksum == sql_checksum:
return sql_pkgtupid
if not create:
return None
pkgtup = map(ucd, pkgtup)
(n,a,e,v,r) = pkgtup
if checksum is not None:
res = executeSQL(cur,
"""INSERT INTO pkgtups
(name, arch, epoch, version, release, checksum)
VALUES (?, ?, ?, ?, ?, ?)""", (n,a,e,v,r,
checksum))
else:
res = executeSQL(cur,
"""INSERT INTO pkgtups
(name, arch, epoch, version, release)
VALUES (?, ?, ?, ?, ?)""", (n,a,e,v,r))
return cur.lastrowid
def _apkg2pid(self, po, create=True):
csum = po.returnIdSum()
if csum is not None:
csum = "%s:%s" % (str(csum[0]), str(csum[1]))
return self._pkgtup2pid(po.pkgtup, csum, create)
def _ipkg2pid(self, po, create=True):
csum = None
yumdb = self.yumdb.get_package(po)
if 'checksum_type' in yumdb and 'checksum_data' in yumdb:
csum = "%s:%s" % (yumdb.checksum_type, yumdb.checksum_data)
return self._pkgtup2pid(po.pkgtup, csum, create)
def _hpkg2pid(self, po, create=False):
return self._apkg2pid(po, create)
def pkg2pid(self, po, create=True):
if isinstance(po, YumHistoryPackage):
return self._hpkg2pid(po, create)
if po.from_system:
return self._ipkg2pid(po, create)
return self._apkg2pid(po, create)
def trans_with_pid(self, pid):
cur = self._get_cursor()
if cur is None:
return None
res = executeSQL(cur,
"""INSERT INTO trans_with_pkgs
(tid, pkgtupid)
VALUES (?, ?)""", (self._tid, pid))
return cur.lastrowid
def trans_skip_pid(self, pid):
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return None
res = executeSQL(cur,
"""INSERT INTO trans_skip_pkgs
(tid, pkgtupid)
VALUES (?, ?)""", (self._tid, pid))
return cur.lastrowid
def trans_data_pid_beg(self, pid, state):
assert state is not None
if not hasattr(self, '_tid') or state is None:
return # Not configured to run
cur = self._get_cursor()
if cur is None:
return # Should never happen, due to above
res = executeSQL(cur,
"""INSERT INTO trans_data_pkgs
(tid, pkgtupid, state)
VALUES (?, ?, ?)""", (self._tid, pid, state))
return cur.lastrowid
def trans_data_pid_end(self, pid, state):
# State can be none here, Eg. TS_FAILED from rpmtrans
if not hasattr(self, '_tid') or state is None:
return # Not configured to run
cur = self._get_cursor()
if cur is None:
return # Should never happen, due to above
res = executeSQL(cur,
"""UPDATE trans_data_pkgs SET done = ?
WHERE tid = ? AND pkgtupid = ? AND state = ?
""", ('TRUE', self._tid, pid, state))
self._commit()
def _trans_rpmdb_problem(self, problem):
if not hasattr(self, '_tid'):
return # Not configured to run
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return None
# str(problem) doesn't work if problem contains unicode(),
uproblem = ucd(problem)
res = executeSQL(cur,
"""INSERT INTO trans_rpmdb_problems
(tid, problem, msg)
VALUES (?, ?, ?)""", (self._tid,
problem.problem,
uproblem))
rpid = cur.lastrowid
if not rpid:
return rpid
pkgs = {}
pkg = problem.pkg
pkgs[pkg.pkgtup] = pkg
if problem.problem == 'conflicts':
for pkg in problem.conflicts:
pkgs[pkg.pkgtup] = pkg
if problem.problem == 'duplicates':
pkgs[problem.duplicate.pkgtup] = problem.duplicate
for pkg in pkgs.values():
pid = self.pkg2pid(pkg)
if pkg.pkgtup == problem.pkg.pkgtup:
main = 'TRUE'
else:
main = 'FALSE'
res = executeSQL(cur,
"""INSERT INTO trans_prob_pkgs
(rpid, pkgtupid, main)
VALUES (?, ?, ?)""", (rpid, pid, main))
return rpid
def _trans_cmdline(self, cmdline):
if not hasattr(self, '_tid'):
return # Not configured to run
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return None
res = executeSQL(cur,
"""INSERT INTO trans_cmdline
(tid, cmdline)
VALUES (?, ?)""", (self._tid, ucd(cmdline)))
return cur.lastrowid
def beg(self, rpmdb_version, using_pkgs, tsis, skip_packages=[],
rpmdb_problems=[], cmdline=None):
cur = self._get_cursor()
if cur is None:
return
res = executeSQL(cur,
"""INSERT INTO trans_beg
(timestamp, rpmdb_version, loginuid)
VALUES (?, ?, ?)""", (int(time.time()),
str(rpmdb_version),
misc.getloginuid()))
self._tid = cur.lastrowid
for pkg in using_pkgs:
pid = self._ipkg2pid(pkg)
self.trans_with_pid(pid)
for tsi in tsis:
for (pkg, state) in tsi.history_iterator():
pid = self.pkg2pid(pkg)
self.trans_data_pid_beg(pid, state)
for pkg in skip_packages:
pid = self.pkg2pid(pkg)
self.trans_skip_pid(pid)
for problem in rpmdb_problems:
self._trans_rpmdb_problem(problem)
if cmdline:
self._trans_cmdline(cmdline)
self._commit()
def _log_errors(self, errors):
cur = self._get_cursor()
if cur is None:
return
for error in errors:
error = ucd(error)
executeSQL(cur,
"""INSERT INTO trans_error
(tid, msg) VALUES (?, ?)""", (self._tid, error))
self._commit()
def log_scriptlet_output(self, msg):
if msg is None or not hasattr(self, '_tid'):
return # Not configured to run
cur = self._get_cursor()
if cur is None:
return # Should never happen, due to above
for error in msg.splitlines():
error = ucd(error)
executeSQL(cur,
"""INSERT INTO trans_script_stdout
(tid, line) VALUES (?, ?)""", (self._tid, error))
self._commit()
def _load_errors(self, tid):
cur = self._get_cursor()
executeSQL(cur,
"""SELECT msg FROM trans_error
WHERE tid = ?
ORDER BY mid ASC""", (tid,))
ret = []
for row in cur:
ret.append(row[0])
return ret
def _load_output(self, tid):
cur = self._get_cursor()
executeSQL(cur,
"""SELECT line FROM trans_script_stdout
WHERE tid = ?
ORDER BY lid ASC""", (tid,))
ret = []
for row in cur:
ret.append(row[0])
return ret
def end(self, rpmdb_version, return_code, errors=None):
assert return_code or not errors
if not hasattr(self, '_tid'):
return # Failed at beg() time
cur = self._get_cursor()
if cur is None:
return # Should never happen, due to above
res = executeSQL(cur,
"""INSERT INTO trans_end
(tid, timestamp, rpmdb_version, return_code)
VALUES (?, ?, ?, ?)""", (self._tid,int(time.time()),
str(rpmdb_version),
return_code))
self._commit()
if not return_code:
# Simple hack, if the transaction finished. Note that this
# catches the erase cases (as we still don't get pkgtups for them),
# Eg. Updated elements.
executeSQL(cur,
"""UPDATE trans_data_pkgs SET done = ?
WHERE tid = ?""", ('TRUE', self._tid,))
self._commit()
if errors is not None:
self._log_errors(errors)
del self._tid
def write_addon_data(self, dataname, data):
"""append data to an arbitrary-named file in the history
addon_path/transaction id location,
returns True if write succeeded, False if not"""
if not hasattr(self, '_tid'):
# maybe we should raise an exception or a warning here?
return False
if not dataname:
return False
if not data:
return False
# make sure the tid dir exists
tid_dir = self.conf.addon_path + '/' + str(self._tid)
if self.conf.writable and not os.path.exists(tid_dir):
try:
os.makedirs(tid_dir, mode=0o700)
except (IOError, OSError) as e:
# emit a warning/raise an exception?
return False
# cleanup dataname
safename = dataname.replace('/', '_')
data_fn = tid_dir + '/' + safename
try:
# open file in append
fo = open(data_fn, 'wb+')
# write data
fo.write(data.encode('utf-8'))
# flush data
fo.flush()
fo.close()
except (IOError, OSError) as e:
return False
# return
return True
def return_addon_data(self, tid, item=None):
hist_and_tid = self.conf.addon_path + '/' + str(tid) + '/'
addon_info = glob.glob(hist_and_tid + '*')
addon_names = [ i.replace(hist_and_tid, '') for i in addon_info ]
if not item:
return addon_names
if item not in addon_names:
# XXX history needs SOME kind of exception, or warning, I think?
return None
fo = open(hist_and_tid + item, 'r')
data = fo.read()
fo.close()
return data
def _old_with_pkgs(self, tid):
cur = self._get_cursor()
executeSQL(cur,
"""SELECT name, arch, epoch, version, release, checksum
FROM trans_with_pkgs JOIN pkgtups USING(pkgtupid)
WHERE tid = ?
ORDER BY name ASC, epoch ASC""", (tid,))
ret = []
for row in cur:
obj = YumHistoryPackage(row[0],row[1],row[2],row[3],row[4], row[5],
history=self)
ret.append(obj)
return ret
def _old_data_pkgs(self, tid, sort=True):
cur = self._get_cursor()
sql = """SELECT name, arch, epoch, version, release,
checksum, done, state
FROM trans_data_pkgs JOIN pkgtups USING(pkgtupid)
WHERE tid = ?"""
if sort:
sql = " ".join((sql, "ORDER BY name ASC, epoch ASC, state DESC"))
executeSQL(cur, sql, (tid,))
ret = []
for row in cur:
obj = YumHistoryPackageState(row[0],row[1],row[2],row[3],row[4],
row[7], row[5], history=self)
obj.done = row[6] == 'TRUE'
obj.state_installed = None
if obj.state in dnf.history.INSTALLING_STATES:
obj.state_installed = True
if obj.state in dnf.history.REMOVING_STATES:
obj.state_installed = False
ret.append(obj)
return ret
def _old_skip_pkgs(self, tid):
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return []
executeSQL(cur,
"""SELECT name, arch, epoch, version, release, checksum
FROM trans_skip_pkgs JOIN pkgtups USING(pkgtupid)
WHERE tid = ?
ORDER BY name ASC, epoch ASC""", (tid,))
ret = []
for row in cur:
obj = YumHistoryPackage(row[0],row[1],row[2],row[3],row[4], row[5],
history=self)
ret.append(obj)
return ret
def _old_prob_pkgs(self, rpid):
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return []
executeSQL(cur,
"""SELECT name, arch, epoch, version, release, checksum, main
FROM trans_prob_pkgs JOIN pkgtups USING(pkgtupid)
WHERE rpid = ?
ORDER BY name ASC, epoch ASC""", (rpid,))
ret = []
for row in cur:
obj = YumHistoryPackage(row[0],row[1],row[2],row[3],row[4], row[5],
history=self)
obj.main = row[6] == 'TRUE'
ret.append(obj)
return ret
def _old_problems(self, tid):
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return []
executeSQL(cur,
"""SELECT rpid, problem, msg
FROM trans_rpmdb_problems
WHERE tid = ?
ORDER BY problem ASC, rpid ASC""", (tid,))
ret = []
for row in cur:
obj = YumHistoryRpmdbProblem(self, row[0], row[1], row[2])
ret.append(obj)
return ret
def _old_cmdline(self, tid):
cur = self._get_cursor()
if cur is None or not self._update_db_file_2():
return None
executeSQL(cur,
"""SELECT cmdline
FROM trans_cmdline
WHERE tid = ?""", (tid,))
ret = []
for row in cur:
return row[0]
return None
def old(self, tids=[], limit=None, complete_transactions_only=False):
""" Return a list of the last transactions, note that this includes
partial transactions (ones without an end transaction). """
cur = self._get_cursor()
if cur is None:
return []
sql = """SELECT tid,
trans_beg.timestamp AS beg_ts,
trans_beg.rpmdb_version AS beg_rv,
trans_end.timestamp AS end_ts,
trans_end.rpmdb_version AS end_rv,
loginuid, return_code
FROM trans_beg JOIN trans_end USING(tid)"""
# NOTE: sqlite doesn't do OUTER JOINs ... *sigh*. So we have to do it
# ourself.
if not complete_transactions_only:
sql = """SELECT tid,
trans_beg.timestamp AS beg_ts,
trans_beg.rpmdb_version AS beg_rv,
NULL, NULL,
loginuid, NULL
FROM trans_beg"""
params = None
if tids and len(tids) <= PATTERNS_INDEXED_MAX:
params = tids = list(set(tids))
sql += " WHERE tid IN (%s)" % ", ".join(['?'] * len(tids))
# This relies on the fact that the PRIMARY KEY in sqlite will always
# increase with each transaction. In theory we can use:
# ORDER BY beg_ts DESC ... except sometimes people do installs with a
# system clock that is very broken, and using that screws them forever.
sql += " ORDER BY tid DESC"
if limit is not None:
sql += " LIMIT " + str(limit)
executeSQL(cur, sql, params)
ret = []
tid2obj = {}
for row in cur:
if tids and len(tids) > PATTERNS_INDEXED_MAX:
if row[0] not in tids:
continue
obj = YumHistoryTransaction(self, row)
tid2obj[row[0]] = obj
ret.append(obj)
sql = """SELECT tid,
trans_end.timestamp AS end_ts,
trans_end.rpmdb_version AS end_rv,
return_code
FROM trans_end"""
params = list(tid2obj.keys())
if len(params) > PATTERNS_INDEXED_MAX:
executeSQL(cur, sql)
else:
sql += " WHERE tid IN (%s)" % ", ".join(['?'] * len(params))
executeSQL(cur, sql, params)
for row in cur:
if row[0] not in tid2obj:
continue
tid2obj[row[0]].end_timestamp = row[1]
tid2obj[row[0]].end_rpmdbversion = row[2]
tid2obj[row[0]].return_code = row[3]
# Go through backwards, and see if the rpmdb versions match
las = None
for obj in reversed(ret):
cur_rv = obj.beg_rpmdbversion
las_rv = None
if las is not None:
las_rv = las.end_rpmdbversion
if las_rv is None or cur_rv is None or (las.tid + 1) != obj.tid:
pass
elif las_rv != cur_rv:
obj.altered_lt_rpmdb = True
las.altered_gt_rpmdb = True
else:
obj.altered_lt_rpmdb = False
las.altered_gt_rpmdb = False
las = obj
return ret
def last(self, complete_transactions_only=True):
""" This is the last full transaction. So any incomplete transactions
do not count, by default. """
ret = self.old([], 1, complete_transactions_only)
if not ret:
return None
assert len(ret) == 1
return ret[0]
def _load_anydb_key(self, pkg, db, attr):
cur = self._get_cursor()
if cur is None or not self._update_db_file_3():
return None
pid = self.pkg2pid(pkg, create=False)
if pid is None:
return None
sql = """SELECT %(db)sdb_val FROM pkg_%(db)sdb
WHERE pkgtupid=? and %(db)sdb_key=? """ % {'db' : db}
executeSQL(cur, sql, (pid, attr))
for row in cur:
return row[0]
return None
def _load_rpmdb_key(self, pkg, attr):
return self._load_anydb_key(pkg, "rpm", attr)
def _load_yumdb_key(self, pkg, attr):
return self._load_anydb_key(pkg, "yum", attr)
def _save_anydb_key(self, pkg, db, attr, val):
cur = self._get_cursor()
if cur is None or not self._update_db_file_3():
return None
pid = self.pkg2pid(pkg, create=False)
if pid is None:
return None
sql = """INSERT INTO pkg_%(db)sdb (pkgtupid, %(db)sdb_key, %(db)sdb_val)
VALUES (?, ?, ?)""" % {'db' : db}
executeSQL(cur, sql, (pid, attr, ucd(val)))
return cur.lastrowid
def _save_rpmdb(self, ipkg):
""" Save all the data for rpmdb for this installed pkg, assumes
there is no data currently. """
for attr in YumHistoryPackage._valid_rpmdb_keys:
val = getattr(ipkg, attr, None)
if val is None:
continue
if not self._save_anydb_key(ipkg, "rpm", attr, val):
return False
return True
def _save_yumdb(self, ipkg):
""" Save all the data for yumdb for this installed pkg, assumes
there is no data currently. """
yumdb_info = self.yumdb.get_package(ipkg)
for attr in _YumHistPackageYumDB._valid_yumdb_keys:
val = yumdb_info.get(attr)
if val is None:
continue
if not self._save_anydb_key(ipkg, "yum", attr, val):
return False
return True
def _wipe_anydb(self, pkg, db):
""" Delete all the data for rpmdb/yumdb for this installed pkg. """
cur = self._get_cursor()
if cur is None or not self._update_db_file_3():
return False
pid = self.pkg2pid(pkg, create=False)
if pid is None:
return False
sql = """DELETE FROM pkg_%(db)sdb WHERE pkgtupid=?""" % {'db' : db}
executeSQL(cur, sql, (pid,))
return True
def sync_alldb(self, ipkg):
""" Sync. all the data for rpmdb/yumdb for this installed pkg. """
if not self._wipe_anydb(ipkg, "rpm"):
return False
if not (self._wipe_anydb(ipkg, "yum") and
self._save_rpmdb(ipkg) and
self._save_yumdb(ipkg)):
self._rollback()
return False
self._commit()
return True
def _pkg_stats(self):
""" Some stats about packages in the DB. """
ret = {'nevrac' : 0,
'nevra' : 0,
'nevr' : 0,
'na' : 0,
'rpmdb' : 0,
'yumdb' : 0,
}
cur = self._get_cursor()
if cur is None or not self._update_db_file_3():
return False
data = (('nevrac', "COUNT(*)", "pkgtups"),
('na', "COUNT(DISTINCT(name || arch))", "pkgtups"),
('nevra',"COUNT(DISTINCT(name||version||epoch||release||arch))",
"pkgtups"),
('nevr', "COUNT(DISTINCT(name||version||epoch||release))",
"pkgtups"),
('rpmdb', "COUNT(DISTINCT(pkgtupid))", "pkg_rpmdb"),
('yumdb', "COUNT(DISTINCT(pkgtupid))", "pkg_yumdb"))
for key, bsql, esql in data:
executeSQL(cur, "SELECT %s FROM %s" % (bsql, esql))
for row in cur:
ret[key] = row[0]
return ret
def _yieldSQLDataList(self, patterns, fields, ignore_case):
"""Yields all the package data for the given params. """
cur = self._get_cursor()
qsql = _FULL_PARSE_QUERY_BEG
pat_sqls = []
pat_data = []
for (pattern, rest) in patterns:
for field in fields:
if ignore_case:
pat_sqls.append("%s LIKE ?%s" % (field, rest))
else:
pat_sqls.append("%s %s ?" % (field, rest))
pat_data.append(pattern)
assert pat_sqls
qsql += " OR ".join(pat_sqls)
executeSQL(cur, qsql, pat_data)
for x in cur:
yield x
def search(self, patterns, ignore_case=True):
""" Search for history transactions which contain specified
packages al. la. "yum list". Returns transaction ids. """
# Search packages ... kind of sucks that it's search not list, pkglist?
cur = self._get_cursor()
if cur is None:
return set()
data = _setupHistorySearchSQL(patterns, ignore_case)
(need_full, npatterns, fields, names) = data
ret = []
pkgtupids = set()
if npatterns:
for row in self._yieldSQLDataList(npatterns, fields, ignore_case):
pkgtupids.add(row[0])
else:
# Too many patterns, *sigh*
pat_max = PATTERNS_MAX
if not need_full:
pat_max = PATTERNS_INDEXED_MAX
for npatterns in misc.seq_max_split(patterns, pat_max):
data = _setupHistorySearchSQL(npatterns, ignore_case)
(need_full, nps, fields, names) = data
assert nps
for row in self._yieldSQLDataList(nps, fields, ignore_case):
pkgtupids.add(row[0])
sql = """SELECT tid FROM trans_data_pkgs WHERE pkgtupid IN """
sql += "(%s)" % ",".join(['?'] * len(pkgtupids))
params = list(pkgtupids)
tids = set()
if len(params) > PATTERNS_INDEXED_MAX:
executeSQL(cur, """SELECT tid FROM trans_data_pkgs""")
for row in cur:
if row[0] in params:
tids.add(row[0])
return tids
if not params:
return tids
executeSQL(cur, sql, params)
for row in cur:
tids.add(row[0])
return tids
_update_ops_3 = ['''\
\
CREATE TABLE pkg_rpmdb (
pkgtupid INTEGER NOT NULL REFERENCES pkgtups,
rpmdb_key TEXT NOT NULL,
rpmdb_val TEXT NOT NULL);
''', '''\
CREATE INDEX i_pkgkey_rpmdb ON pkg_rpmdb (pkgtupid, rpmdb_key);
''', '''\
CREATE TABLE pkg_yumdb (
pkgtupid INTEGER NOT NULL REFERENCES pkgtups,
yumdb_key TEXT NOT NULL,
yumdb_val TEXT NOT NULL);
''', '''\
CREATE INDEX i_pkgkey_yumdb ON pkg_yumdb (pkgtupid, yumdb_key);
''']
# pylint: disable-msg=E0203
def _update_db_file_3(self):
""" Update to version 3 of history, rpmdb/yumdb data. """
if not self._update_db_file_2():
return False
if hasattr(self, '_cached_updated_3'):
return self._cached_updated_3
cur = self._get_cursor()
if cur is None:
return False
executeSQL(cur, "PRAGMA table_info(pkg_yumdb)")
# If we get anything, we're fine. There might be a better way of
# saying "anything" but this works.
for ob in cur:
break
else:
for op in self._update_ops_3:
cur.execute(op)
self._commit()
self._cached_updated_3 = True
return True
_update_ops_2 = ['''\
\
CREATE TABLE trans_skip_pkgs (
tid INTEGER NOT NULL REFERENCES trans_beg,
pkgtupid INTEGER NOT NULL REFERENCES pkgtups);
''', '''\
\
CREATE TABLE trans_cmdline (
tid INTEGER NOT NULL REFERENCES trans_beg,
cmdline TEXT NOT NULL);
''', '''\
\
CREATE TABLE trans_rpmdb_problems (
rpid INTEGER PRIMARY KEY,
tid INTEGER NOT NULL REFERENCES trans_beg,
problem TEXT NOT NULL, msg TEXT NOT NULL);
''', '''\
\
CREATE TABLE trans_prob_pkgs (
rpid INTEGER NOT NULL REFERENCES trans_rpmdb_problems,
pkgtupid INTEGER NOT NULL REFERENCES pkgtups,
main BOOL NOT NULL DEFAULT FALSE);
''', '''\
\
CREATE VIEW vtrans_data_pkgs AS
SELECT tid,name,epoch,version,release,arch,pkgtupid,
state,done,
name || '-' || epoch || ':' ||
version || '-' || release || '.' || arch AS nevra
FROM trans_data_pkgs JOIN pkgtups USING(pkgtupid)
ORDER BY name;
''', '''\
\
CREATE VIEW vtrans_with_pkgs AS
SELECT tid,name,epoch,version,release,arch,pkgtupid,
name || '-' || epoch || ':' ||
version || '-' || release || '.' || arch AS nevra
FROM trans_with_pkgs JOIN pkgtups USING(pkgtupid)
ORDER BY name;
''', '''\
\
CREATE VIEW vtrans_skip_pkgs AS
SELECT tid,name,epoch,version,release,arch,pkgtupid,
name || '-' || epoch || ':' ||
version || '-' || release || '.' || arch AS nevra
FROM trans_skip_pkgs JOIN pkgtups USING(pkgtupid)
ORDER BY name;
''', # NOTE: Old versions of sqlite don't like the normal way to do the next
# view. So we do it with the select. It's for debugging only, so
# no big deal.
'''\
\
CREATE VIEW vtrans_prob_pkgs2 AS
SELECT tid,rpid,name,epoch,version,release,arch,pkgtups.pkgtupid,
main,problem,msg,
name || '-' || epoch || ':' ||
version || '-' || release || '.' || arch AS nevra
FROM (SELECT * FROM trans_prob_pkgs,trans_rpmdb_problems WHERE
trans_prob_pkgs.rpid=trans_rpmdb_problems.rpid)
JOIN pkgtups USING(pkgtupid)
ORDER BY name;
''']
def _update_db_file_2(self):
""" Update to version 2 of history, includes trans_skip_pkgs. """
if not self.conf.writable:
return False
if hasattr(self, '_cached_updated_2'):
return self._cached_updated_2
cur = self._get_cursor()
if cur is None:
return False
executeSQL(cur, "PRAGMA table_info(trans_skip_pkgs)")
# If we get anything, we're fine. There might be a better way of
# saying "anything" but this works.
for ob in cur:
break
else:
for op in self._update_ops_2:
cur.execute(op)
self._commit()
self._cached_updated_2 = True
return True
# pylint: enable-msg=E0203
def _create_db_file(self):
""" Create a new history DB file, populating tables etc. """
self._db_date = time.strftime('%Y-%m-%d')
_db_file = '%s/%s-%s.%s' % (self.conf.db_path,
'history',
self._db_date,
'sqlite')
if self._db_file == _db_file:
os.rename(_db_file, _db_file + '.old')
# Just in case ... move the journal file too.
if os.path.exists(_db_file + '-journal'):
os.rename(_db_file + '-journal', _db_file + '-journal.old')
self._db_file = _db_file
if self.conf.writable and not os.path.exists(self._db_file):
# make them default to 0600 - sysadmin can change it later
# if they want
fo = os.open(self._db_file, os.O_CREAT, 0o600)
os.close(fo)
cur = self._get_cursor()
if cur is None:
raise IOError(_("Can not create history database at '%s'.") % \
self._db_file)
ops = ['''\
CREATE TABLE trans_beg (
tid INTEGER PRIMARY KEY,
timestamp INTEGER NOT NULL, rpmdb_version TEXT NOT NULL,
loginuid INTEGER);
''', '''\
CREATE TABLE trans_end (
tid INTEGER PRIMARY KEY REFERENCES trans_beg,
timestamp INTEGER NOT NULL, rpmdb_version TEXT NOT NULL,
return_code INTEGER NOT NULL);
''', '''\
\
CREATE TABLE trans_with_pkgs (
tid INTEGER NOT NULL REFERENCES trans_beg,
pkgtupid INTEGER NOT NULL REFERENCES pkgtups);
''', '''\
\
CREATE TABLE trans_error (
mid INTEGER PRIMARY KEY,
tid INTEGER NOT NULL REFERENCES trans_beg,
msg TEXT NOT NULL);
''', '''\
CREATE TABLE trans_script_stdout (
lid INTEGER PRIMARY KEY,
tid INTEGER NOT NULL REFERENCES trans_beg,
line TEXT NOT NULL);
''', '''\
\
CREATE TABLE trans_data_pkgs (
tid INTEGER NOT NULL REFERENCES trans_beg,
pkgtupid INTEGER NOT NULL REFERENCES pkgtups,
done BOOL NOT NULL DEFAULT FALSE, state TEXT NOT NULL);
''', '''\
\
CREATE TABLE pkgtups (
pkgtupid INTEGER PRIMARY KEY, name TEXT NOT NULL, arch TEXT NOT NULL,
epoch TEXT NOT NULL, version TEXT NOT NULL, release TEXT NOT NULL,
checksum TEXT);
''', '''\
CREATE INDEX i_pkgtup_naevr ON pkgtups (name, arch, epoch, version, release);
''']
for op in ops:
cur.execute(op)
for op in self._update_ops_2:
cur.execute(op)
for op in self._update_ops_3:
cur.execute(op)
self._commit()
_FULL_PARSE_QUERY_BEG = """
SELECT pkgtupid,name,epoch,version,release,arch,
name || "." || arch AS sql_nameArch,
name || "-" || version || "-" || release || "." || arch AS sql_nameVerRelArch,
name || "-" || version AS sql_nameVer,
name || "-" || version || "-" || release AS sql_nameVerRel,
epoch || ":" || name || "-" || version || "-" || release || "." || arch AS sql_envra,
name || "-" || epoch || ":" || version || "-" || release || "." || arch AS sql_nevra
FROM pkgtups
WHERE
"""
| gpl-2.0 | 4,121,912,397,852,109,000 | 5,768,948,904,373,041,000 | 35.310305 | 87 | 0.51775 | false |
hip-odoo/odoo | addons/bus/models/bus_presence.py | 38 | 2676 | # -*- coding: utf-8 -*-
import datetime
import time
from odoo import api, fields, models
from odoo import tools
from odoo.addons.bus.models.bus import TIMEOUT
from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
DISCONNECTION_TIMER = TIMEOUT + 5
AWAY_TIMER = 1800 # 30 minutes
class BusPresence(models.Model):
""" User Presence
Its status is 'online', 'away' or 'offline'. This model should be a one2one, but is not
attached to res_users to avoid database concurrence errors. Since the 'update' method is executed
at each poll, if the user have multiple opened tabs, concurrence errors can happend, but are 'muted-logged'.
"""
_name = 'bus.presence'
_description = 'User Presence'
_log_access = False
_sql_constraints = [('bus_user_presence_unique', 'unique(user_id)', 'A user can only have one IM status.')]
user_id = fields.Many2one('res.users', 'Users', required=True, index=True, ondelete='cascade')
last_poll = fields.Datetime('Last Poll', default=lambda self: fields.Datetime.now())
last_presence = fields.Datetime('Last Presence', default=lambda self: fields.Datetime.now())
status = fields.Selection([('online', 'Online'), ('away', 'Away'), ('offline', 'Offline')], 'IM Status', default='offline')
@api.model
def update(self, inactivity_period):
""" Updates the last_poll and last_presence of the current user
:param inactivity_period: duration in milliseconds
"""
presence = self.search([('user_id', '=', self._uid)], limit=1)
# compute last_presence timestamp
last_presence = datetime.datetime.now() - datetime.timedelta(milliseconds=inactivity_period)
values = {
'last_poll': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
# update the presence or a create a new one
if not presence: # create a new presence for the user
values['user_id'] = self._uid
values['last_presence'] = last_presence.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.create(values)
else: # update the last_presence if necessary, and write values
if datetime.datetime.strptime(presence.last_presence, DEFAULT_SERVER_DATETIME_FORMAT) < last_presence:
values['last_presence'] = last_presence.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
# Hide transaction serialization errors, which can be ignored, the presence update is not essential
with tools.mute_logger('odoo.sql_db'):
presence.write(values)
# avoid TransactionRollbackError
self.env.cr.commit() # TODO : check if still necessary
| agpl-3.0 | 3,973,484,486,767,233,000 | -1,669,446,394,834,572,500 | 47.654545 | 127 | 0.668535 | false |
stackforge/watcher | watcher/notifications/audit.py | 2 | 12071 | # -*- encoding: utf-8 -*-
# Copyright (c) 2016 b<>com
#
# Authors: Vincent FRANCOISE <vincent.francoise@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from watcher.common import exception
from watcher.notifications import base as notificationbase
from watcher.notifications import exception as exception_notifications
from watcher.notifications import goal as goal_notifications
from watcher.notifications import strategy as strategy_notifications
from watcher.objects import base
from watcher.objects import fields as wfields
CONF = cfg.CONF
@base.WatcherObjectRegistry.register_notification
class TerseAuditPayload(notificationbase.NotificationPayloadBase):
SCHEMA = {
'uuid': ('audit', 'uuid'),
'name': ('audit', 'name'),
'audit_type': ('audit', 'audit_type'),
'state': ('audit', 'state'),
'parameters': ('audit', 'parameters'),
'interval': ('audit', 'interval'),
'scope': ('audit', 'scope'),
'auto_trigger': ('audit', 'auto_trigger'),
'next_run_time': ('audit', 'next_run_time'),
'created_at': ('audit', 'created_at'),
'updated_at': ('audit', 'updated_at'),
'deleted_at': ('audit', 'deleted_at'),
}
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' boolean field,
# Added 'next_run_time' DateTime field,
# 'interval' type has been changed from Integer to String
# Version 1.2: Added 'name' string field
VERSION = '1.2'
fields = {
'uuid': wfields.UUIDField(),
'name': wfields.StringField(),
'audit_type': wfields.StringField(),
'state': wfields.StringField(),
'parameters': wfields.FlexibleDictField(nullable=True),
'interval': wfields.StringField(nullable=True),
'scope': wfields.FlexibleListOfDictField(nullable=True),
'goal_uuid': wfields.UUIDField(),
'strategy_uuid': wfields.UUIDField(nullable=True),
'auto_trigger': wfields.BooleanField(),
'next_run_time': wfields.DateTimeField(nullable=True),
'created_at': wfields.DateTimeField(nullable=True),
'updated_at': wfields.DateTimeField(nullable=True),
'deleted_at': wfields.DateTimeField(nullable=True),
}
def __init__(self, audit, goal_uuid, strategy_uuid=None, **kwargs):
super(TerseAuditPayload, self).__init__(
goal_uuid=goal_uuid, strategy_uuid=strategy_uuid, **kwargs)
self.populate_schema(audit=audit)
@base.WatcherObjectRegistry.register_notification
class AuditPayload(TerseAuditPayload):
SCHEMA = {
'uuid': ('audit', 'uuid'),
'name': ('audit', 'name'),
'audit_type': ('audit', 'audit_type'),
'state': ('audit', 'state'),
'parameters': ('audit', 'parameters'),
'interval': ('audit', 'interval'),
'scope': ('audit', 'scope'),
'auto_trigger': ('audit', 'auto_trigger'),
'next_run_time': ('audit', 'next_run_time'),
'created_at': ('audit', 'created_at'),
'updated_at': ('audit', 'updated_at'),
'deleted_at': ('audit', 'deleted_at'),
}
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' field,
# Added 'next_run_time' field
# Version 1.2: Added 'name' string field
VERSION = '1.2'
fields = {
'goal': wfields.ObjectField('GoalPayload'),
'strategy': wfields.ObjectField('StrategyPayload', nullable=True),
}
def __init__(self, audit, goal, strategy=None, **kwargs):
if not kwargs.get('goal_uuid'):
kwargs['goal_uuid'] = goal.uuid
if strategy and not kwargs.get('strategy_uuid'):
kwargs['strategy_uuid'] = strategy.uuid
super(AuditPayload, self).__init__(
audit=audit, goal=goal,
strategy=strategy, **kwargs)
@base.WatcherObjectRegistry.register_notification
class AuditStateUpdatePayload(notificationbase.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'old_state': wfields.StringField(nullable=True),
'state': wfields.StringField(nullable=True),
}
@base.WatcherObjectRegistry.register_notification
class AuditCreatePayload(AuditPayload):
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' field,
# Added 'next_run_time' field
VERSION = '1.1'
fields = {}
def __init__(self, audit, goal, strategy):
super(AuditCreatePayload, self).__init__(
audit=audit,
goal=goal,
goal_uuid=goal.uuid,
strategy=strategy)
@base.WatcherObjectRegistry.register_notification
class AuditUpdatePayload(AuditPayload):
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' field,
# Added 'next_run_time' field
VERSION = '1.1'
fields = {
'state_update': wfields.ObjectField('AuditStateUpdatePayload'),
}
def __init__(self, audit, state_update, goal, strategy):
super(AuditUpdatePayload, self).__init__(
audit=audit,
state_update=state_update,
goal=goal,
goal_uuid=goal.uuid,
strategy=strategy)
@base.WatcherObjectRegistry.register_notification
class AuditActionPayload(AuditPayload):
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' field,
# Added 'next_run_time' field
VERSION = '1.1'
fields = {
'fault': wfields.ObjectField('ExceptionPayload', nullable=True),
}
def __init__(self, audit, goal, strategy, **kwargs):
super(AuditActionPayload, self).__init__(
audit=audit,
goal=goal,
goal_uuid=goal.uuid,
strategy=strategy,
**kwargs)
@base.WatcherObjectRegistry.register_notification
class AuditDeletePayload(AuditPayload):
# Version 1.0: Initial version
# Version 1.1: Added 'auto_trigger' field,
# Added 'next_run_time' field
VERSION = '1.1'
fields = {}
def __init__(self, audit, goal, strategy):
super(AuditDeletePayload, self).__init__(
audit=audit,
goal=goal,
goal_uuid=goal.uuid,
strategy=strategy)
@notificationbase.notification_sample('audit-strategy-error.json')
@notificationbase.notification_sample('audit-strategy-end.json')
@notificationbase.notification_sample('audit-strategy-start.json')
@base.WatcherObjectRegistry.register_notification
class AuditActionNotification(notificationbase.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': wfields.ObjectField('AuditActionPayload')
}
@notificationbase.notification_sample('audit-create.json')
@base.WatcherObjectRegistry.register_notification
class AuditCreateNotification(notificationbase.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': wfields.ObjectField('AuditCreatePayload')
}
@notificationbase.notification_sample('audit-update.json')
@base.WatcherObjectRegistry.register_notification
class AuditUpdateNotification(notificationbase.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': wfields.ObjectField('AuditUpdatePayload')
}
@notificationbase.notification_sample('audit-delete.json')
@base.WatcherObjectRegistry.register_notification
class AuditDeleteNotification(notificationbase.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': wfields.ObjectField('AuditDeletePayload')
}
def _get_common_payload(audit):
goal = None
strategy = None
try:
goal = audit.goal
if audit.strategy_id:
strategy = audit.strategy
except NotImplementedError:
raise exception.EagerlyLoadedAuditRequired(audit=audit.uuid)
goal_payload = goal_notifications.GoalPayload(goal=goal)
strategy_payload = None
if strategy:
strategy_payload = strategy_notifications.StrategyPayload(
strategy=strategy)
return goal_payload, strategy_payload
def send_create(context, audit, service='infra-optim', host=None):
"""Emit an audit.create notification."""
goal_payload, strategy_payload = _get_common_payload(audit)
versioned_payload = AuditCreatePayload(
audit=audit,
goal=goal_payload,
strategy=strategy_payload,
)
notification = AuditCreateNotification(
priority=wfields.NotificationPriority.INFO,
event_type=notificationbase.EventType(
object='audit',
action=wfields.NotificationAction.CREATE),
publisher=notificationbase.NotificationPublisher(
host=host or CONF.host,
binary=service),
payload=versioned_payload)
notification.emit(context)
def send_update(context, audit, service='infra-optim',
host=None, old_state=None):
"""Emit an audit.update notification."""
goal_payload, strategy_payload = _get_common_payload(audit)
state_update = AuditStateUpdatePayload(
old_state=old_state,
state=audit.state if old_state else None)
versioned_payload = AuditUpdatePayload(
audit=audit,
state_update=state_update,
goal=goal_payload,
strategy=strategy_payload,
)
notification = AuditUpdateNotification(
priority=wfields.NotificationPriority.INFO,
event_type=notificationbase.EventType(
object='audit',
action=wfields.NotificationAction.UPDATE),
publisher=notificationbase.NotificationPublisher(
host=host or CONF.host,
binary=service),
payload=versioned_payload)
notification.emit(context)
def send_delete(context, audit, service='infra-optim', host=None):
goal_payload, strategy_payload = _get_common_payload(audit)
versioned_payload = AuditDeletePayload(
audit=audit,
goal=goal_payload,
strategy=strategy_payload,
)
notification = AuditDeleteNotification(
priority=wfields.NotificationPriority.INFO,
event_type=notificationbase.EventType(
object='audit',
action=wfields.NotificationAction.DELETE),
publisher=notificationbase.NotificationPublisher(
host=host or CONF.host,
binary=service),
payload=versioned_payload)
notification.emit(context)
def send_action_notification(context, audit, action, phase=None,
priority=wfields.NotificationPriority.INFO,
service='infra-optim', host=None):
"""Emit an audit action notification."""
goal_payload, strategy_payload = _get_common_payload(audit)
fault = None
if phase == wfields.NotificationPhase.ERROR:
fault = exception_notifications.ExceptionPayload.from_exception()
versioned_payload = AuditActionPayload(
audit=audit,
goal=goal_payload,
strategy=strategy_payload,
fault=fault,
)
notification = AuditActionNotification(
priority=priority,
event_type=notificationbase.EventType(
object='audit',
action=action,
phase=phase),
publisher=notificationbase.NotificationPublisher(
host=host or CONF.host,
binary=service),
payload=versioned_payload)
notification.emit(context)
| apache-2.0 | 4,541,049,777,306,908,700 | -683,604,285,688,785,300 | 31.536388 | 74 | 0.649739 | false |
rallylee/gem5 | src/cpu/minor/MinorCPU.py | 12 | 11679 | # Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
# Nathan Binkert
# Andrew Bardsley
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
from BaseCPU import BaseCPU
from DummyChecker import DummyChecker
from BranchPredictor import *
from TimingExpr import TimingExpr
from FuncUnit import OpClass
class MinorOpClass(SimObject):
"""Boxing of OpClass to get around build problems and provide a hook for
future additions to OpClass checks"""
type = 'MinorOpClass'
cxx_header = "cpu/minor/func_unit.hh"
opClass = Param.OpClass("op class to match")
class MinorOpClassSet(SimObject):
"""A set of matchable op classes"""
type = 'MinorOpClassSet'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = VectorParam.MinorOpClass([], "op classes to be matched."
" An empty list means any class")
class MinorFUTiming(SimObject):
type = 'MinorFUTiming'
cxx_header = "cpu/minor/func_unit.hh"
mask = Param.UInt64(0, "mask for testing ExtMachInst")
match = Param.UInt64(0, "match value for testing ExtMachInst:"
" (ext_mach_inst & mask) == match")
suppress = Param.Bool(False, "if true, this inst. is not executed by"
" this FU")
extraCommitLat = Param.Cycles(0, "extra cycles to stall commit for"
" this inst.")
extraCommitLatExpr = Param.TimingExpr(NULL, "extra cycles as a"
" run-time evaluated expression")
extraAssumedLat = Param.Cycles(0, "extra cycles to add to scoreboard"
" retire time for this insts dest registers once it leaves the"
" functional unit. For mem refs, if this is 0, the result's time"
" is marked as unpredictable and no forwarding can take place.")
srcRegsRelativeLats = VectorParam.Cycles("the maximum number of cycles"
" after inst. issue that each src reg can be available for this"
" inst. to issue")
opClasses = Param.MinorOpClassSet(MinorOpClassSet(),
"op classes to be considered for this decode. An empty set means any"
" class")
description = Param.String('', "description string of the decoding/inst."
" class")
def minorMakeOpClassSet(op_classes):
"""Make a MinorOpClassSet from a list of OpClass enum value strings"""
def boxOpClass(op_class):
return MinorOpClass(opClass=op_class)
return MinorOpClassSet(opClasses=map(boxOpClass, op_classes))
class MinorFU(SimObject):
type = 'MinorFU'
cxx_header = "cpu/minor/func_unit.hh"
opClasses = Param.MinorOpClassSet(MinorOpClassSet(), "type of operations"
" allowed on this functional unit")
opLat = Param.Cycles(1, "latency in cycles")
issueLat = Param.Cycles(1, "cycles until another instruction can be"
" issued")
timings = VectorParam.MinorFUTiming([], "extra decoding rules")
cantForwardFromFUIndices = VectorParam.Unsigned([],
"list of FU indices from which this FU can't receive and early"
" (forwarded) result")
class MinorFUPool(SimObject):
type = 'MinorFUPool'
cxx_header = "cpu/minor/func_unit.hh"
funcUnits = VectorParam.MinorFU("functional units")
class MinorDefaultIntFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntAlu'])
timings = [MinorFUTiming(description="Int",
srcRegsRelativeLats=[2])]
opLat = 3
class MinorDefaultIntMulFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntMult'])
timings = [MinorFUTiming(description='Mul',
srcRegsRelativeLats=[0])]
opLat = 3
class MinorDefaultIntDivFU(MinorFU):
opClasses = minorMakeOpClassSet(['IntDiv'])
issueLat = 9
opLat = 9
class MinorDefaultFloatSimdFU(MinorFU):
opClasses = minorMakeOpClassSet([
'FloatAdd', 'FloatCmp', 'FloatCvt', 'FloatMisc', 'FloatMult',
'FloatMultAcc', 'FloatDiv', 'FloatSqrt',
'SimdAdd', 'SimdAddAcc', 'SimdAlu', 'SimdCmp', 'SimdCvt',
'SimdMisc', 'SimdMult', 'SimdMultAcc', 'SimdShift', 'SimdShiftAcc',
'SimdSqrt', 'SimdFloatAdd', 'SimdFloatAlu', 'SimdFloatCmp',
'SimdFloatCvt', 'SimdFloatDiv', 'SimdFloatMisc', 'SimdFloatMult',
'SimdFloatMultAcc', 'SimdFloatSqrt'])
timings = [MinorFUTiming(description='FloatSimd',
srcRegsRelativeLats=[2])]
opLat = 6
class MinorDefaultMemFU(MinorFU):
opClasses = minorMakeOpClassSet(['MemRead', 'MemWrite', 'FloatMemRead',
'FloatMemWrite'])
timings = [MinorFUTiming(description='Mem',
srcRegsRelativeLats=[1], extraAssumedLat=2)]
opLat = 1
class MinorDefaultMiscFU(MinorFU):
opClasses = minorMakeOpClassSet(['IprAccess', 'InstPrefetch'])
opLat = 1
class MinorDefaultFUPool(MinorFUPool):
funcUnits = [MinorDefaultIntFU(), MinorDefaultIntFU(),
MinorDefaultIntMulFU(), MinorDefaultIntDivFU(),
MinorDefaultFloatSimdFU(), MinorDefaultMemFU(),
MinorDefaultMiscFU()]
class ThreadPolicy(Enum): vals = ['SingleThreaded', 'RoundRobin', 'Random']
class MinorCPU(BaseCPU):
type = 'MinorCPU'
cxx_header = "cpu/minor/cpu.hh"
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
threadPolicy = Param.ThreadPolicy('RoundRobin',
"Thread scheduling policy")
fetch1FetchLimit = Param.Unsigned(1,
"Number of line fetches allowable in flight at once")
fetch1LineSnapWidth = Param.Unsigned(0,
"Fetch1 'line' fetch snap size in bytes"
" (0 means use system cache line size)")
fetch1LineWidth = Param.Unsigned(0,
"Fetch1 maximum fetch size in bytes (0 means use system cache"
" line size)")
fetch1ToFetch2ForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch1 to Fetch2 (1 means next cycle)")
fetch1ToFetch2BackwardDelay = Param.Cycles(1,
"Backward cycle delay from Fetch2 to Fetch1 for branch prediction"
" signalling (0 means in the same cycle, 1 mean the next cycle)")
fetch2InputBufferSize = Param.Unsigned(2,
"Size of input buffer to Fetch2 in cycles-worth of insts.")
fetch2ToDecodeForwardDelay = Param.Cycles(1,
"Forward cycle delay from Fetch2 to Decode (1 means next cycle)")
fetch2CycleInput = Param.Bool(True,
"Allow Fetch2 to cross input lines to generate full output each"
" cycle")
decodeInputBufferSize = Param.Unsigned(3,
"Size of input buffer to Decode in cycles-worth of insts.")
decodeToExecuteForwardDelay = Param.Cycles(1,
"Forward cycle delay from Decode to Execute (1 means next cycle)")
decodeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Decode (and implicitly"
" Decode's own width)")
decodeCycleInput = Param.Bool(True,
"Allow Decode to pack instructions from more than one input cycle"
" to fill its output each cycle")
executeInputWidth = Param.Unsigned(2,
"Width (in instructions) of input to Execute")
executeCycleInput = Param.Bool(True,
"Allow Execute to use instructions from more than one input cycle"
" each cycle")
executeIssueLimit = Param.Unsigned(2,
"Number of issuable instructions in Execute each cycle")
executeMemoryIssueLimit = Param.Unsigned(1,
"Number of issuable memory instructions in Execute each cycle")
executeCommitLimit = Param.Unsigned(2,
"Number of committable instructions in Execute each cycle")
executeMemoryCommitLimit = Param.Unsigned(1,
"Number of committable memory references in Execute each cycle")
executeInputBufferSize = Param.Unsigned(7,
"Size of input buffer to Execute in cycles-worth of insts.")
executeMemoryWidth = Param.Unsigned(0,
"Width (and snap) in bytes of the data memory interface. (0 mean use"
" the system cacheLineSize)")
executeMaxAccessesInMemory = Param.Unsigned(2,
"Maximum number of concurrent accesses allowed to the memory system"
" from the dcache port")
executeLSQMaxStoreBufferStoresPerCycle = Param.Unsigned(2,
"Maximum number of stores that the store buffer can issue per cycle")
executeLSQRequestsQueueSize = Param.Unsigned(1,
"Size of LSQ requests queue (address translation queue)")
executeLSQTransfersQueueSize = Param.Unsigned(2,
"Size of LSQ transfers queue (memory transaction queue)")
executeLSQStoreBufferSize = Param.Unsigned(5,
"Size of LSQ store buffer")
executeBranchDelay = Param.Cycles(1,
"Delay from Execute deciding to branch and Fetch1 reacting"
" (1 means next cycle)")
executeFuncUnits = Param.MinorFUPool(MinorDefaultFUPool(),
"FUlines for this processor")
executeSetTraceTimeOnCommit = Param.Bool(True,
"Set inst. trace times to be commit times")
executeSetTraceTimeOnIssue = Param.Bool(False,
"Set inst. trace times to be issue times")
executeAllowEarlyMemoryIssue = Param.Bool(True,
"Allow mem refs to be issued to the LSQ before reaching the head of"
" the in flight insts queue")
enableIdling = Param.Bool(True,
"Enable cycle skipping when the processor is idle\n");
branchPred = Param.BranchPredictor(TournamentBP(
numThreads = Parent.numThreads), "Branch Predictor")
def addCheckerCpu(self):
print "Checker not yet supported by MinorCPU"
exit(1)
| bsd-3-clause | -884,015,717,347,795,500 | 8,726,916,667,008,998,000 | 40.860215 | 78 | 0.706482 | false |
SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/channels/jp/ntvnews24.py | 1 | 2396 | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2018 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
import re
import urlquick
# TO DO
# Add Videos, Replays ?
URL_ROOT = 'http://www.news24.jp'
URL_LIVE = URL_ROOT + '/livestream/'
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_LIVE,
headers={'User-Agent': web_utils.get_random_ua()},
max_age=-1)
data_account = ''
data_player = ''
data_video_id = ''
if len(re.compile(r'data-account="(.*?)"').findall(resp.text)) > 0:
data_account = re.compile(r'data-account="(.*?)"').findall(
resp.text)[0]
data_player = re.compile(r'data-player="(.*?)"').findall(resp.text)[0]
data_video_id = re.compile(r'data-video-id="(.*?)"').findall(
resp.text)[0]
else:
data_account = re.compile(r'accountId\: "(.*?)"').findall(resp.text)[0]
data_player = re.compile(r'player\: "(.*?)"').findall(resp.text)[0]
data_video_id = re.compile(r'videoId\: "(.*?)"').findall(resp.text)[0]
return resolver_proxy.get_brightcove_video_json(plugin, data_account,
data_player, data_video_id)
| gpl-2.0 | -70,064,428,640,963,420 | 2,467,373,516,547,241,500 | 35.30303 | 79 | 0.645659 | false |
mrjaydee82/SinLessKernel-4.4.4 | toolchains/a8483/share/gdb/python/gdb/__init__.py | 110 | 3408 | # Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import os
import sys
import _gdb
if sys.version_info[0] > 2:
# Python 3 moved "reload"
from imp import reload
from _gdb import *
class _GdbFile (object):
# These two are needed in Python 3
encoding = "UTF-8"
errors = "strict"
def close(self):
# Do nothing.
return None
def isatty(self):
return False
def writelines(self, iterable):
for line in iterable:
self.write(line)
def flush(self):
flush()
class GdbOutputFile (_GdbFile):
def write(self, s):
write(s, stream=STDOUT)
sys.stdout = GdbOutputFile()
class GdbOutputErrorFile (_GdbFile):
def write(self, s):
write(s, stream=STDERR)
sys.stderr = GdbOutputErrorFile()
# Default prompt hook does nothing.
prompt_hook = None
# Ensure that sys.argv is set to something.
# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
sys.argv = ['']
# Initial pretty printers.
pretty_printers = []
# Initial type printers.
type_printers = []
# Convenience variable to GDB's python directory
PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
# Auto-load all functions/commands.
# Packages to auto-load.
packages = [
'function',
'command'
]
# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
# manually iterate the list, collating the Python files in each module
# path. Construct the module name, and import.
def auto_load_packages():
for package in packages:
location = os.path.join(os.path.dirname(__file__), package)
if os.path.exists(location):
py_files = filter(lambda x: x.endswith('.py')
and x != '__init__.py',
os.listdir(location))
for py_file in py_files:
# Construct from foo.py, gdb.module.foo
modname = "%s.%s.%s" % ( __name__, package, py_file[:-3] )
try:
if modname in sys.modules:
# reload modules with duplicate names
reload(__import__(modname))
else:
__import__(modname)
except:
sys.stderr.write (traceback.format_exc() + "\n")
auto_load_packages()
def GdbSetPythonDirectory(dir):
"""Update sys.path, reload gdb and auto-load packages."""
global PYTHONDIR
try:
sys.path.remove(PYTHONDIR)
except ValueError:
pass
sys.path.insert(0, dir)
PYTHONDIR = dir
# note that reload overwrites the gdb module without deleting existing
# attributes
reload(__import__(__name__))
auto_load_packages()
| gpl-2.0 | -1,413,807,384,686,501,600 | -1,164,905,207,695,189,200 | 26.483871 | 74 | 0.626467 | false |
xuru/restler | lib/usr/sqlalchemy/dialects/mssql/pyodbc.py | 2 | 9228 | # mssql/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for MS-SQL via pyodbc.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
^^^^^^^^^^
Examples of pyodbc connection string URLs:
* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
The connection string that is created will appear like::
dsn=mydsn;Trusted_Connection=Yes
* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named
``mydsn`` passing in the ``UID`` and ``PWD`` information. The
connection string that is created will appear like::
dsn=mydsn;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects
using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD``
information, plus the additional connection configuration option
``LANGUAGE``. The connection string that is created will appear
like::
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
that would appear like::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string which includes the port
information using the comma syntax. This will create the following
connection string::
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
string that includes the port
information as a separate ``port`` keyword. This will create the
following connection string::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection
string that includes a custom
ODBC driver name. This will create the following connection string::
DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass
If you require a connection string that is outside the options
presented above, use the ``odbc_connect`` keyword to pass in a
urlencoded connection string. What gets passed in will be urldecoded
and passed directly.
For example::
mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb
would create the following connection string::
dsn=mydsn;Database=db
Encoding your connection string can be easily accomplished through
the python shell. For example::
>>> import urllib
>>> urllib.quote_plus('dsn=mydsn;Database=db')
'dsn%3Dmydsn%3BDatabase%3Ddb'
Unicode Binds
^^^^^^^^^^^^^
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
alter how strings are received. The PyODBC dialect attempts to use all the information
it knows to determine whether or not a Python unicode literal can be
passed directly to the PyODBC driver or not; while SQLAlchemy can encode
these to bytestrings first, some users have reported that PyODBC mis-handles
bytestrings for certain encodings and requires a Python unicode object,
while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
"""
from sqlalchemy.dialects.mssql.base import MSExecutionContext, MSDialect
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, util
import decimal
class _MSNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
This is the only method that is proven to work with Pyodbc+MSSQL
without crashing (floats can be used but seem to cause sporadic
crashes).
"""
def bind_processor(self, dialect):
super_process = super(_MSNumeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int)-1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error, e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
pyodbc_driver_name = 'SQL Server'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric:_MSNumeric_pyodbc
}
)
def __init__(self, description_encoding='latin-1', **params):
super(MSDialect_pyodbc, self).__init__(**params)
self.description_encoding = description_encoding
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
dialect = MSDialect_pyodbc
| mit | 8,152,324,756,238,605,000 | 8,814,931,339,875,877,000 | 35.330709 | 98 | 0.638383 | false |
seem-sky/kbengine | kbe/src/lib/python/Lib/test/test_generators.py | 72 | 50910 | import gc
import sys
import unittest
import weakref
from test import support
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, io
>>> old, sys.stderr = sys.stderr, io.StringIO()
>>> g = f()
>>> next(g)
>>> del g
>>> "RuntimeError: generator ignored GeneratorExit" in sys.stderr.getvalue()
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> import sys, io
>>> old = sys.stderr
>>> try:
... sys.stderr = io.StringIO()
... class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("test")
...
... l = Leaker()
... del l
... err = sys.stderr.getvalue().strip()
... "Exception ignored in" in err
... "RuntimeError: test" in err
... "Traceback" in err
... "in invoke" in err
... finally:
... sys.stderr = old
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_unittest(__name__)
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| lgpl-3.0 | 3,463,397,655,559,226,400 | 7,520,725,678,027,957,000 | 25.242268 | 88 | 0.511412 | false |
genedna/containerops | component/python/analysis/memory_profiler/memory_profiler.py | 5 | 42427 | """Profile the memory usage of a Python program"""
# .. we'll use this to pass it to the child script ..
_CLEAN_GLOBALS = globals().copy()
__version__ = '0.47'
_CMD_USAGE = "python -m memory_profiler script_file.py"
import time
import sys
import os
import pdb
import warnings
import linecache
import inspect
import subprocess
import logging
import traceback
from signal import SIGKILL
import json
import yaml
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Process, Pipe
except ImportError:
from multiprocessing.dummy import Process, Pipe
try:
from IPython.core.magic import Magics, line_cell_magic, magics_class
except ImportError:
# ipython_version < '0.13'
Magics = object
line_cell_magic = lambda func: func
magics_class = lambda cls: cls
PY2 = sys.version_info[0] == 2
_TWO_20 = float(2 ** 20)
if PY2:
import __builtin__ as builtins
else:
import builtins
def unicode(x, *args):
return str(x)
# .. get available packages ..
try:
import psutil
has_psutil = True
except ImportError:
has_psutil = False
try:
import tracemalloc
has_tracemalloc = True
except ImportError:
has_tracemalloc = False
class MemitResult(object):
"""memit magic run details.
Object based on IPython's TimeitResult
"""
def __init__(self, mem_usage, baseline, repeat, timeout, interval,
include_children):
self.mem_usage = mem_usage
self.baseline = baseline
self.repeat = repeat
self.timeout = timeout
self.interval = interval
self.include_children = include_children
def __str__(self):
max_mem = max(self.mem_usage)
inc = max_mem - self.baseline
return 'peak memory: %.02f MiB, increment: %.02f MiB' % (max_mem, inc)
def _repr_pretty_(self, p, cycle):
msg = str(self)
p.text(u'<MemitResult : ' + msg + u'>')
def _get_child_memory(process, meminfo_attr=None):
"""
Returns a generator that yields memory for all child processes.
"""
if not has_psutil:
raise NotImplementedError((
"The psutil module is required to monitor the "
"memory usage of child processes."
))
# Convert a pid to a process
if isinstance(process, int):
if process == -1: process = os.getpid()
process = psutil.Process(process)
if not meminfo_attr:
# Use the psutil 2.0 attr if the older version isn't passed in.
meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') else 'get_memory_info'
# Select the psutil function get the children similar to how we selected
# the memory_info attr (a change from excepting the AttributeError).
children_attr = 'children' if hasattr(process, 'children') else 'get_children'
# Loop over the child processes and yield their memory
try:
for child in getattr(process, children_attr)(recursive=True):
yield getattr(child, meminfo_attr)()[0] / _TWO_20
except psutil.NoSuchProcess:
# https://github.com/fabianp/memory_profiler/issues/71
yield 0.0
def _get_memory(pid, backend, timestamps=False, include_children=False, filename=None):
# .. low function to get memory consumption ..
if pid == -1:
pid = os.getpid()
def tracemalloc_tool():
# .. cross-platform but but requires Python 3.4 or higher ..
stat = next(filter(lambda item: str(item).startswith(filename),
tracemalloc.take_snapshot().statistics('filename')))
mem = stat.size / _TWO_20
if timestamps:
return mem, time.time()
else:
return mem
def ps_util_tool():
# .. cross-platform but but requires psutil ..
process = psutil.Process(pid)
try:
# avoid useing get_memory_info since it does not exists
# in psutil > 2.0 and accessing it will cause exception.
meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') \
else 'get_memory_info'
mem = getattr(process, meminfo_attr)()[0] / _TWO_20
if include_children:
mem += sum(_get_child_memory(process, meminfo_attr))
if timestamps:
return mem, time.time()
else:
return mem
except psutil.AccessDenied:
pass
# continue and try to get this from ps
def posix_tool():
# .. scary stuff ..
if include_children:
raise NotImplementedError((
"The psutil module is required to monitor the "
"memory usage of child processes."
))
warnings.warn("psutil module not found. memory_profiler will be slow")
# ..
# .. memory usage in MiB ..
# .. this should work on both Mac and Linux ..
# .. subprocess.check_output appeared in 2.7, using Popen ..
# .. for backwards compatibility ..
out = subprocess.Popen(['ps', 'v', '-p', str(pid)],
stdout=subprocess.PIPE
).communicate()[0].split(b'\n')
try:
vsz_index = out[0].split().index(b'RSS')
mem = float(out[1].split()[vsz_index]) / 1024
if timestamps:
return mem, time.time()
else:
return mem
except:
if timestamps:
return -1, time.time()
else:
return -1
if backend == 'tracemalloc' and \
(filename is None or filename == '<unknown>'):
raise RuntimeError(
'There is no access to source file of the profiled function'
)
tools = {'tracemalloc': tracemalloc_tool,
'psutil': ps_util_tool,
'posix': posix_tool}
return tools[backend]()
class MemTimer(Process):
"""
Fetch memory consumption from over a time interval
"""
def __init__(self, monitor_pid, interval, pipe, backend, max_usage=False,
*args, **kw):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.cont = True
self.backend = backend
self.max_usage = max_usage
self.n_measurements = 1
self.timestamps = kw.pop("timestamps", False)
self.include_children = kw.pop("include_children", False)
# get baseline memory usage
self.mem_usage = [
_get_memory(self.monitor_pid, self.backend, timestamps=self.timestamps,
include_children=self.include_children)]
super(MemTimer, self).__init__(*args, **kw)
def run(self):
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(
self.monitor_pid, self.backend, timestamps=self.timestamps,
include_children=self.include_children,)
if not self.max_usage:
self.mem_usage.append(cur_mem)
else:
self.mem_usage[0] = max(cur_mem, self.mem_usage[0])
self.n_measurements += 1
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
self.pipe.send(self.mem_usage)
self.pipe.send(self.n_measurements)
def memory_usage(proc=-1, interval=.1, timeout=None, timestamps=False,
include_children=False, multiprocess=False, max_usage=False,
retval=False, stream=None, backend=None):
"""
Return the memory usage of a process or piece of code
Parameters
----------
proc : {int, string, tuple, subprocess.Popen}, optional
The process to monitor. Can be given by an integer/string
representing a PID, by a Popen object or by a tuple
representing a Python function. The tuple contains three
values (f, args, kw) and specifies to run the function
f(*args, **kw).
Set to -1 (default) for current process.
interval : float, optional
Interval at which measurements are collected.
timeout : float, optional
Maximum amount of time (in seconds) to wait before returning.
max_usage : bool, optional
Only return the maximum memory usage (default False)
retval : bool, optional
For profiling python functions. Save the return value of the profiled
function. Return value of memory_usage becomes a tuple:
(mem_usage, retval)
timestamps : bool, optional
if True, timestamps of memory usage measurement are collected as well.
include_children : bool, optional
if True, sum the memory of all forked processes as well
multiprocess : bool, optional
if True, track the memory usage of all forked processes.
stream : File
if stream is a File opened with write access, then results are written
to this file instead of stored in memory and returned at the end of
the subprocess. Useful for long-running processes.
Implies timestamps=True.
Returns
-------
mem_usage : list of floating-point values
memory usage, in MiB. It's length is always < timeout / interval
if max_usage is given, returns the two elements maximum memory and
number of measurements effectuated
ret : return value of the profiled function
Only returned if retval is set to True
"""
backend = choose_backend(backend)
if stream is not None:
timestamps = True
if not max_usage:
ret = []
else:
ret = -1
if timeout is not None:
max_iter = int(timeout / interval)
elif isinstance(proc, int):
# external process and no timeout
max_iter = 1
else:
# for a Python function wait until it finishes
max_iter = float('inf')
if callable(proc):
proc = (proc, (), {})
if isinstance(proc, (list, tuple)):
if len(proc) == 1:
f, args, kw = (proc[0], (), {})
elif len(proc) == 2:
f, args, kw = (proc[0], proc[1], {})
elif len(proc) == 3:
f, args, kw = (proc[0], proc[1], proc[2])
else:
raise ValueError
while True:
child_conn, parent_conn = Pipe() # this will store MemTimer's results
p = MemTimer(os.getpid(), interval, child_conn, backend,
timestamps=timestamps,
max_usage=max_usage,
include_children=include_children)
p.start()
parent_conn.recv() # wait until we start getting memory
# When there is an exception in the "proc" - the (spawned) monitoring processes don't get killed.
# Therefore, the whole process hangs indefinitely. Here, we are ensuring that the process gets killed!
try:
returned = f(*args, **kw)
parent_conn.send(0) # finish timing
ret = parent_conn.recv()
n_measurements = parent_conn.recv()
if retval:
ret = ret, returned
except Exception:
if has_psutil:
parent = psutil.Process(os.getpid())
for child in parent.children(recursive=True):
os.kill(child.pid, SIGKILL)
p.join(0)
raise
p.join(5 * interval)
if n_measurements > 4 or interval < 1e-6:
break
interval /= 10.
elif isinstance(proc, subprocess.Popen):
# external process, launched from Python
line_count = 0
while True:
if not max_usage:
mem_usage = _get_memory(
proc.pid, backend, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
# Write children to the stream file
if multiprocess:
for idx, chldmem in enumerate(_get_child_memory(proc.pid)):
stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(idx, chldmem, time.time()))
else:
# Create a nested list with the child memory
if multiprocess:
mem_usage = [mem_usage]
for chldmem in _get_child_memory(proc.pid):
mem_usage.append(chldmem)
# Append the memory usage to the return value
ret.append(mem_usage)
else:
ret = max(ret,
_get_memory(
proc.pid, backend, include_children=include_children))
time.sleep(interval)
line_count += 1
# flush every 50 lines. Make 'tail -f' usable on profile file
if line_count > 50:
line_count = 0
if stream is not None:
stream.flush()
if timeout is not None:
max_iter -= 1
if max_iter == 0:
break
if proc.poll() is not None:
break
else:
# external process
if max_iter == -1:
max_iter = 1
counter = 0
while counter < max_iter:
counter += 1
if not max_usage:
mem_usage = _get_memory(
proc, backend, timestamps=timestamps,
include_children=include_children)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*mem_usage))
# Write children to the stream file
if multiprocess:
for idx, chldmem in enumerate(_get_child_memory(proc.pid)):
stream.write("CHLD {0} {1:.6f} {2:.4f}\n".format(idx, chldmem, time.time()))
else:
# Create a nested list with the child memory
if multiprocess:
mem_usage = [mem_usage]
for chldmem in _get_child_memory(proc.pid):
mem_usage.append(chldmem)
# Append the memory usage to the return value
ret.append(mem_usage)
else:
ret = max([ret,
_get_memory(proc, backend, include_children=include_children)
])
time.sleep(interval)
# Flush every 50 lines.
if counter % 50 == 0 and stream is not None:
stream.flush()
if stream:
return None
return ret
# ..
# .. utility functions for line-by-line ..
def _find_script(script_name):
""" Find the script.
If the input is not a file, then $PATH will be searched.
"""
if os.path.isfile(script_name):
return script_name
path = os.getenv('PATH', os.defpath).split(os.pathsep)
for folder in path:
if not folder:
continue
fn = os.path.join(folder, script_name)
if os.path.isfile(fn):
return fn
sys.stderr.write('Could not find script {0}\n'.format(script_name))
raise SystemExit(1)
class _TimeStamperCM(object):
"""Time-stamping context manager."""
def __init__(self, timestamps, filename, backend):
self.timestamps = timestamps
self.filename = filename
self.backend = backend
def __enter__(self):
self.timestamps.append(
_get_memory(os.getpid(), self.backend, timestamps=True, filename=self.filename))
def __exit__(self, *args):
self.timestamps.append(
_get_memory(os.getpid(), self.backend, timestamps=True, filename=self.filename))
class TimeStamper:
""" A profiler that just records start and end execution times for
any decorated function.
"""
def __init__(self, backend):
self.functions = {}
self.backend = backend
def __call__(self, func=None, precision=None):
if func is not None:
if not callable(func):
raise ValueError("Value must be callable")
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
else:
def inner_partial(f):
return self.__call__(f, precision=precision)
return inner_partial
def timestamp(self, name="<block>"):
"""Returns a context manager for timestamping a block of code."""
# Make a fake function
func = lambda x: x
func.__module__ = ""
func.__name__ = name
self.add_function(func)
timestamps = []
self.functions[func].append(timestamps)
# A new object is required each time, since there can be several
# nested context managers.
try:
filename = inspect.getsourcefile(func)
except TypeError:
filename = '<unknown>'
return _TimeStamperCM(timestamps, filename, self.backend)
def add_function(self, func):
if func not in self.functions:
self.functions[func] = []
def wrap_function(self, func):
""" Wrap a function to timestamp it.
"""
def f(*args, **kwds):
# Start time
try:
filename = inspect.getsourcefile(func)
except TypeError:
filename = '<unknown>'
timestamps = [
_get_memory(os.getpid(), self.backend, timestamps=True, filename=filename)]
self.functions[func].append(timestamps)
try:
return func(*args, **kwds)
finally:
# end time
timestamps.append(_get_memory(os.getpid(), self.backend, timestamps=True,
filename=filename))
return f
def show_results(self, stream=None):
if stream is None:
stream = sys.stdout
for func, timestamps in self.functions.items():
function_name = "%s.%s" % (func.__module__, func.__name__)
for ts in timestamps:
stream.write("FUNC %s %.4f %.4f %.4f %.4f\n" % (
(function_name,) + ts[0] + ts[1]))
class CodeMap(dict):
def __init__(self, include_children, backend):
self.include_children = include_children
self._toplevel = []
self.backend = backend
def add(self, code, toplevel_code=None):
if code in self:
return
if toplevel_code is None:
filename = code.co_filename
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if not os.path.exists(filename):
print('ERROR: Could not find file ' + filename)
if filename.startswith(("ipython-input", "<ipython-input")):
print(
"NOTE: %mprun can only be used on functions defined in"
" physical files, and not in the IPython environment.")
return
toplevel_code = code
(sub_lines, start_line) = inspect.getsourcelines(code)
linenos = range(start_line,
start_line + len(sub_lines))
self._toplevel.append((filename, code, linenos))
self[code] = {}
else:
self[code] = self[toplevel_code]
for subcode in filter(inspect.iscode, code.co_consts):
self.add(subcode, toplevel_code=toplevel_code)
def trace(self, code, lineno):
memory = _get_memory(-1, self.backend, include_children=self.include_children,
filename=code.co_filename)
# if there is already a measurement for that line get the max
previous_memory = self[code].get(lineno, 0)
self[code][lineno] = max(memory, previous_memory)
def items(self):
"""Iterate on the toplevel code blocks."""
for (filename, code, linenos) in self._toplevel:
measures = self[code]
if not measures:
continue # skip if no measurement
line_iterator = ((line, measures.get(line)) for line in linenos)
yield (filename, line_iterator)
class LineProfiler(object):
""" A profiler that records the amount of memory for each line """
def __init__(self, **kw):
include_children = kw.get('include_children', False)
backend = kw.get('backend', 'psutil')
self.code_map = CodeMap(
include_children=include_children, backend=backend)
self.enable_count = 0
self.max_mem = kw.get('max_mem', None)
self.prevlines = []
self.backend = choose_backend(kw.get('backend', None))
def __call__(self, func=None, precision=1):
if func is not None:
self.add_function(func)
f = self.wrap_function(func)
f.__module__ = func.__module__
f.__name__ = func.__name__
f.__doc__ = func.__doc__
f.__dict__.update(getattr(func, '__dict__', {}))
return f
else:
def inner_partial(f):
return self.__call__(f, precision=precision)
return inner_partial
def add_function(self, func):
""" Record line profiling information for the given Python function.
"""
try:
# func_code does not exist in Python3
code = func.__code__
except AttributeError:
warnings.warn("Could not extract a code object for the object %r"
% func)
else:
self.code_map.add(code)
def wrap_function(self, func):
""" Wrap a function to profile it.
"""
def f(*args, **kwds):
self.enable_by_count()
try:
return func(*args, **kwds)
finally:
self.disable_by_count()
return f
def runctx(self, cmd, globals, locals):
""" Profile a single executable statement in the given namespaces.
"""
self.enable_by_count()
try:
exec(cmd, globals, locals)
finally:
self.disable_by_count()
return self
def enable_by_count(self):
""" Enable the profiler if it hasn't been enabled before.
"""
if self.enable_count == 0:
self.enable()
self.enable_count += 1
def disable_by_count(self):
""" Disable the profiler if the number of disable requests matches the
number of enable requests.
"""
if self.enable_count > 0:
self.enable_count -= 1
if self.enable_count == 0:
self.disable()
def trace_memory_usage(self, frame, event, arg):
"""Callback for sys.settrace"""
if frame.f_code in self.code_map:
if event == 'call':
# "call" event just saves the lineno but not the memory
self.prevlines.append(frame.f_lineno)
elif event == 'line':
self.code_map.trace(frame.f_code, self.prevlines[-1])
self.prevlines[-1] = frame.f_lineno
elif event == 'return':
self.code_map.trace(frame.f_code, self.prevlines.pop())
if self._original_trace_function is not None:
self._original_trace_function(frame, event, arg)
return self.trace_memory_usage
def trace_max_mem(self, frame, event, arg):
# run into PDB as soon as memory is higher than MAX_MEM
if event in ('line', 'return') and frame.f_code in self.code_map:
c = _get_memory(-1, self.backend, filename=frame.f_code.co_filename)
if c >= self.max_mem:
t = ('Current memory {0:.2f} MiB exceeded the '
'maximum of {1:.2f} MiB\n'.format(c, self.max_mem))
sys.stdout.write(t)
sys.stdout.write('Stepping into the debugger \n')
frame.f_lineno -= 2
p = pdb.Pdb()
p.quitting = False
p.stopframe = frame
p.returnframe = None
p.stoplineno = frame.f_lineno - 3
p.botframe = None
return p.trace_dispatch
if self._original_trace_function is not None:
(self._original_trace_function)(frame, event, arg)
return self.trace_max_mem
def __enter__(self):
self.enable_by_count()
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable_by_count()
def enable(self):
self._original_trace_function = sys.gettrace()
if self.max_mem is not None:
sys.settrace(self.trace_max_mem)
else:
sys.settrace(self.trace_memory_usage)
def disable(self):
sys.settrace(self._original_trace_function)
def json_results(prof, precision=1):
ret = []
for (filename, lines) in prof.code_map.items():
st = {'filename': filename, 'lines': []}
all_lines = linecache.getlines(filename)
mem_old = None
float_format = u'{0}.{1}f'.format(precision + 4, precision)
template_mem = u'{0:' + float_format + '} MiB'
for (lineno, mem) in lines:
if mem:
inc = (mem - mem_old) if mem_old else 0
mem_old = mem
mem = template_mem.format(mem)
inc = template_mem.format(inc)
else:
mem = u''
inc = u''
st['lines'].append({
'Line #': lineno,
'Mem usage': mem,
'Increment': inc,
'Line Contents': all_lines[lineno - 1]
})
ret.append(st)
return ret
def _func_exec(stmt, ns):
# helper for magic_memit, just a function proxy for the exec
# statement
exec(stmt, ns)
@magics_class
class MemoryProfilerMagics(Magics):
# A lprun-style %mprun magic for IPython.
@line_cell_magic
def mprun(self, parameter_s='', cell=None):
""" Execute a statement under the line-by-line memory profiler from the
memory_profiler module.
Usage, in line mode:
%mprun -f func1 -f func2 <statement>
Usage, in cell mode:
%%mprun -f func1 -f func2 [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
LineProfiler. Profiling is enabled for the functions specified by the -f
options. The statistics will be shown side-by-side with the code through
the pager once the statement has completed.
Options:
-f <function>: LineProfiler only profiles functions and methods it is told
to profile. This option tells the profiler about these functions. Multiple
-f options may be used. The argument may be any expression that gives
a Python function or method object. However, one must be careful to avoid
spaces that may confuse the option parser. Additionally, functions defined
in the interpreter at the In[] prompt or via %run currently cannot be
displayed. Write these functions out to a separate file and import them.
One or more -f options are required to get any useful results.
-T <filename>: dump the text-formatted statistics with the code
side-by-side out to a text file.
-r: return the LineProfiler object after it has completed profiling.
-c: If present, add the memory usage of any children process to the report.
"""
from io import StringIO
from memory_profiler import show_results, LineProfiler
# Local imports to avoid hard dependency.
from distutils.version import LooseVersion
import IPython
ipython_version = LooseVersion(IPython.__version__)
if ipython_version < '0.11':
from IPython.genutils import page
from IPython.ipstruct import Struct
from IPython.ipapi import UsageError
else:
from IPython.core.page import page
from IPython.utils.ipstruct import Struct
from IPython.core.error import UsageError
# Escape quote markers.
opts_def = Struct(T=[''], f=[])
parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
opts, arg_str = self.parse_options(parameter_s, 'rf:T:c',
list_all=True)
opts.merge(opts_def)
global_ns = self.shell.user_global_ns
local_ns = self.shell.user_ns
if cell is not None:
arg_str += '\n' + cell
# Get the requested functions.
funcs = []
for name in opts.f:
try:
funcs.append(eval(name, global_ns, local_ns))
except Exception as e:
raise UsageError('Could not find function %r.\n%s: %s' % (name,
e.__class__.__name__,
e))
include_children = 'c' in opts
profile = LineProfiler(include_children=include_children)
for func in funcs:
profile(func)
# Add the profiler to the builtins for @profile.
if 'profile' in builtins.__dict__:
had_profile = True
old_profile = builtins.__dict__['profile']
else:
had_profile = False
old_profile = None
builtins.__dict__['profile'] = profile
try:
profile.runctx(arg_str, global_ns, local_ns)
message = ''
except SystemExit:
message = "*** SystemExit exception caught in code being profiled."
except KeyboardInterrupt:
message = ("*** KeyboardInterrupt exception caught in code being "
"profiled.")
finally:
if had_profile:
builtins.__dict__['profile'] = old_profile
# Trap text output.
stdout_trap = StringIO()
show_results(profile, stdout_trap)
output = stdout_trap.getvalue()
output = output.rstrip()
if ipython_version < '0.11':
page(output, screen_lines=self.shell.rc.screen_length)
else:
page(output)
print(message, )
text_file = opts.T[0]
if text_file:
with open(text_file, 'w') as pfile:
pfile.write(output)
print('\n*** Profile printout saved to text file %s. %s' % (
text_file,
message))
return_value = None
if 'r' in opts:
return_value = profile
return return_value
# a timeit-style %memit magic for IPython
@line_cell_magic
def memit(self, line='', cell=None):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-r<R>t<T>i<I>] statement
Usage, in cell mode:
%%memit [-r<R>t<T>i<I>] setup_code
code...
code...
This function can be used both as a line and cell magic:
- In line mode you can measure a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not measured) and the body of the cell is measured.
The cell body has access to any variables created in the setup code.
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-t<T>: timeout after <T> seconds. Default: None
-i<I>: Get time information at an interval of I times per second.
Defaults to 0.1 so that there is ten measurements per second.
-c: If present, add the memory usage of any children process to the report.
-o: If present, return a object containing memit run details
-q: If present, be quiet and do not output a result.
Examples
--------
::
In [1]: %memit range(10000)
peak memory: 21.42 MiB, increment: 0.41 MiB
In [2]: %memit range(1000000)
peak memory: 52.10 MiB, increment: 31.08 MiB
In [3]: %%memit l=range(1000000)
...: len(l)
...:
peak memory: 52.14 MiB, increment: 0.08 MiB
"""
from memory_profiler import memory_usage, _func_exec
opts, stmt = self.parse_options(line, 'r:t:i:coq', posix=False,
strict=False)
if cell is None:
setup = 'pass'
else:
setup = stmt
stmt = cell
repeat = int(getattr(opts, 'r', 1))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
interval = float(getattr(opts, 'i', 0.1))
include_children = 'c' in opts
return_result = 'o' in opts
quiet = 'q' in opts
# I've noticed we get less noisier measurements if we run
# a garbage collection first
import gc
gc.collect()
_func_exec(setup, self.shell.user_ns)
mem_usage = []
counter = 0
baseline = memory_usage()[0]
while counter < repeat:
counter += 1
tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)),
timeout=timeout, interval=interval,
max_usage=True,
include_children=include_children)
mem_usage.append(tmp[0])
result = MemitResult(mem_usage, baseline, repeat, timeout, interval,
include_children)
if not quiet:
if mem_usage:
print(result)
else:
print('ERROR: could not read memory usage, try with a '
'lower interval or more iterations')
if return_result:
return result
@classmethod
def register_magics(cls, ip):
from distutils.version import LooseVersion
import IPython
ipython_version = LooseVersion(IPython.__version__)
if ipython_version < '0.13':
try:
_register_magic = ip.define_magic
except AttributeError: # ipython 0.10
_register_magic = ip.expose_magic
_register_magic('mprun', cls.mprun.__func__)
_register_magic('memit', cls.memit.__func__)
else:
ip.register_magics(cls)
# commenting out due to failures with some versions of IPython
# see https://github.com/fabianp/memory_profiler/issues/106
# # Ensuring old interface of magics expose for IPython 0.10
# magic_mprun = MemoryProfilerMagics().mprun.__func__
# magic_memit = MemoryProfilerMagics().memit.__func__
def load_ipython_extension(ip):
"""This is called to load the module as an IPython extension."""
MemoryProfilerMagics.register_magics(ip)
def profile(func=None, stream=None, precision=1, backend='psutil'):
"""
Decorator that will run the function and print a line-by-line profile
"""
backend = choose_backend(backend)
if backend == 'tracemalloc' and has_tracemalloc:
if not tracemalloc.is_tracing():
tracemalloc.start()
if func is not None:
def wrapper(*args, **kwargs):
prof = LineProfiler(backend=backend)
val = prof(func)(*args, **kwargs)
show_results(prof, stream=stream, precision=precision)
return val
return wrapper
else:
def inner_wrapper(f):
return profile(f, stream=stream, precision=precision,
backend=backend)
return inner_wrapper
def choose_backend(new_backend=None):
"""
Function that tries to setup backend, chosen by user, and if failed,
setup one of the allowable backends
"""
_backend = 'no_backend'
all_backends = [
('psutil', has_psutil),
('posix', os.name == 'posix'),
('tracemalloc', has_tracemalloc),
('no_backend', True)
]
backends_indices = dict((b[0], i) for i, b in enumerate(all_backends))
if new_backend is not None:
all_backends.insert(0, all_backends.pop(backends_indices[new_backend]))
for n_backend, is_available in all_backends:
if is_available:
_backend = n_backend
break
if _backend == 'no_backend':
raise NotImplementedError(
'Tracemalloc or psutil module is required for non-unix '
'platforms')
if _backend != new_backend and new_backend is not None:
warnings.warn('{0} can not be used, {1} used instead'.format(
new_backend, _backend))
return _backend
# Insert in the built-ins to have profile
# globally defined (global variables is not enough
# for all cases, e.g. a script that imports another
# script where @profile is used)
if PY2:
def exec_with_profiler(filename, profiler, backend):
builtins.__dict__['profile'] = profiler
ns = dict(_CLEAN_GLOBALS, profile=profiler)
choose_backend(backend)
execfile(filename, ns, ns)
else:
def exec_with_profiler(filename, profiler, backend):
_backend = choose_backend(backend)
if _backend == 'tracemalloc' and has_tracemalloc:
tracemalloc.start()
builtins.__dict__['profile'] = profiler
# shadow the profile decorator defined above
ns = dict(_CLEAN_GLOBALS, profile=profiler)
try:
with open(filename) as f:
exec(compile(f.read(), filename, 'exec'), ns, ns)
finally:
if has_tracemalloc and tracemalloc.is_tracing():
tracemalloc.stop()
class LogFile(object):
"""File-like object to log text using the `logging` module and the log
report can be customised."""
def __init__(self, name=None, reportIncrementFlag=False):
"""
:param name: name of the logger module
reportIncrementFlag: This must be set to True if only the steps
with memory increments are to be reported
:type self: object
name: string
reportIncrementFlag: bool
"""
self.logger = logging.getLogger(name)
self.reportIncrementFlag = reportIncrementFlag
def write(self, msg, level=logging.INFO):
if self.reportIncrementFlag:
if "MiB" in msg and float(msg.split("MiB")[1].strip()) > 0:
self.logger.log(level, msg)
elif msg.__contains__("Filename:") or msg.__contains__(
"Line Contents"):
self.logger.log(level, msg)
else:
self.logger.log(level, msg)
def flush(self):
for handler in self.logger.handlers:
handler.flush()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage=_CMD_USAGE, version=__version__)
parser.disable_interspersed_args()
parser.add_option(
'--pdb-mmem', dest='max_mem', metavar='MAXMEM',
type='float', action='store',
help='step into the debugger when memory exceeds MAXMEM')
parser.add_option(
'--precision', dest='precision', type='int',
action='store', default=3,
help='precision of memory output in number of significant digits')
parser.add_option(
'--yaml', dest='yaml', type='str',
action='store', default='False',
help='use yaml')
parser.add_option('-o', dest='out_filename', type='str',
action='store', default=None,
help='path to a file where results will be written')
parser.add_option('--timestamp', dest='timestamp', default=False,
action='store_true',
help='''print timestamp instead of memory measurement for
decorated functions''')
parser.add_option('--backend', dest='backend', type='choice',
action='store',
choices=['tracemalloc', 'psutil', 'posix'],
default='psutil',
help='backend using for getting memory info '
'(one of the {tracemalloc, psutil, posix})')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args # Remove every memory_profiler arguments
script_filename = _find_script(args[0])
_backend = choose_backend(options.backend)
if options.timestamp:
prof = TimeStamper(_backend)
else:
prof = LineProfiler(max_mem=options.max_mem, backend=_backend)
try:
exec_with_profiler(script_filename, prof, options.backend)
finally:
if options.out_filename is not None:
out_file = open(options.out_filename, "a")
else:
out_file = sys.stdout
if options.timestamp:
prof.show_results(stream=out_file)
else:
ret = json_results(prof, precision=options.precision)
if options.yaml == 'True':
out = bytes(yaml.safe_dump(ret), 'utf-8')
print('[COUT] CO_YAML_CONTENT {}'.format(str(out)[1:]))
else:
print('[COUT] CO_JSON_CONTENT {}'.format(json.dumps(ret)))
| apache-2.0 | 5,405,167,597,833,779,000 | 2,853,564,395,128,358,000 | 33.353846 | 114 | 0.556603 | false |
sdague/home-assistant | homeassistant/components/alarm_control_panel/device_condition.py | 9 | 5397 | """Provide the device automations for Alarm control panel."""
from typing import Dict, List
import voluptuous as vol
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN
from .const import (
CONDITION_ARMED_AWAY,
CONDITION_ARMED_CUSTOM_BYPASS,
CONDITION_ARMED_HOME,
CONDITION_ARMED_NIGHT,
CONDITION_DISARMED,
CONDITION_TRIGGERED,
)
CONDITION_TYPES = {
CONDITION_TRIGGERED,
CONDITION_DISARMED,
CONDITION_ARMED_HOME,
CONDITION_ARMED_AWAY,
CONDITION_ARMED_NIGHT,
CONDITION_ARMED_CUSTOM_BYPASS,
}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for Alarm control panel devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# We need a state or else we can't populate the different armed conditions
if state is None:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
# Add conditions for each entity that belongs to this integration
conditions += [
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_DISARMED,
},
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_TRIGGERED,
},
]
if supported_features & SUPPORT_ALARM_ARM_HOME:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_HOME,
}
)
if supported_features & SUPPORT_ALARM_ARM_AWAY:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_AWAY,
}
)
if supported_features & SUPPORT_ALARM_ARM_NIGHT:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_NIGHT,
}
)
if supported_features & SUPPORT_ALARM_ARM_CUSTOM_BYPASS:
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: CONDITION_ARMED_CUSTOM_BYPASS,
}
)
return conditions
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == CONDITION_TRIGGERED:
state = STATE_ALARM_TRIGGERED
elif config[CONF_TYPE] == CONDITION_DISARMED:
state = STATE_ALARM_DISARMED
elif config[CONF_TYPE] == CONDITION_ARMED_HOME:
state = STATE_ALARM_ARMED_HOME
elif config[CONF_TYPE] == CONDITION_ARMED_AWAY:
state = STATE_ALARM_ARMED_AWAY
elif config[CONF_TYPE] == CONDITION_ARMED_NIGHT:
state = STATE_ALARM_ARMED_NIGHT
elif config[CONF_TYPE] == CONDITION_ARMED_CUSTOM_BYPASS:
state = STATE_ALARM_ARMED_CUSTOM_BYPASS
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
| apache-2.0 | 4,092,792,255,655,854,600 | -2,898,105,796,486,169,000 | 32.110429 | 85 | 0.597739 | false |
shenqicang/openmc | tests/test_cmfd_jfnk/test_cmfd_jfnk.py | 8 | 2112 | #!/usr/bin/env python
import os
from subprocess import Popen, STDOUT, PIPE, call
import filecmp
import glob
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--mpi_exec', dest='mpi_exec', default='')
parser.add_option('--mpi_np', dest='mpi_np', default='3')
parser.add_option('--exe', dest='exe')
(opts, args) = parser.parse_args()
cwd = os.getcwd()
def test_run():
if opts.mpi_exec != '':
proc = Popen([opts.mpi_exec, '-np', opts.mpi_np, opts.exe, cwd],
stderr=STDOUT, stdout=PIPE)
else:
proc = Popen([opts.exe, cwd], stderr=STDOUT, stdout=PIPE)
print(proc.communicate()[0])
returncode = proc.returncode
assert returncode == 0, 'OpenMC did not exit successfully.'
def test_created_statepoint():
statepoint = glob.glob(os.path.join(cwd, 'statepoint.20.*'))
assert len(statepoint) == 1, 'Either multiple or no statepoint files exist.'
assert statepoint[0].endswith('binary') or statepoint[0].endswith('h5'),\
'Statepoint file is not a binary or hdf5 file.'
def test_output_exists():
assert os.path.exists(os.path.join(cwd, 'tallies.out')), 'Tally output file does not exist.'
def test_results():
statepoint = glob.glob(os.path.join(cwd, 'statepoint.20.*'))
call(['python', 'results.py', statepoint[0]])
compare = filecmp.cmp('results_test.dat', 'results_true.dat')
if not compare:
os.rename('results_test.dat', 'results_error.dat')
assert compare, 'Results do not agree.'
def teardown():
output = glob.glob(os.path.join(cwd, 'statepoint.20.*'))
output.append(os.path.join(cwd, 'tallies.out'))
output.append(os.path.join(cwd, 'results_test.dat'))
for f in output:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
# test for openmc executable
if opts.exe is None:
raise Exception('Must specify OpenMC executable from command line with --exe.')
# run tests
try:
test_run()
test_created_statepoint()
test_output_exists()
test_results()
finally:
teardown()
| mit | 4,365,554,192,924,498,000 | 2,011,871,935,182,349,800 | 30.058824 | 96 | 0.641572 | false |
askhl/ase | ase/visualize/vtk/volume.py | 14 | 2387 |
import numpy as np
from vtk import vtkContourFilter, vtkDepthSortPolyData
from ase.visualize.vtk.grid import vtkVolumeGrid
from ase.visualize.vtk.module import vtkPolyDataModule
from ase.visualize.vtk.pipeline import vtkSurfaceSmootherPipeline, \
vtkDepthSortPipeline
# -------------------------------------------------------------------
class vtkIsoSurfaceModule(vtkVolumeGrid, vtkPolyDataModule):
def __init__(self, data, cell, vtk_renderer, contours=1, depthsort=True):
#TODO don't require vtk_renderer... implement vtkScene
#TODO contour values from list or autocontours if int
# Make sure data argument is a valid array
if not isinstance(data, np.ndarray):
data = np.array(data)
vtkVolumeGrid.__init__(self, data.shape, cell)
self.vtk_iso = vtkContourFilter() #vtkMarchingContourFilter?
self.vtk_iso.SetInput(self.get_structured_points()) #TODO non-orthogonal
self.vtk_iso.SetValue(0, 0.25)
self.vtk_iso.SetValue(1, -0.25)
self.smoothpipe = vtkSurfaceSmootherPipeline(self, vtk_iso)
#TODO use vtkDepthSortPipeline - but vtkPolyDataModule isn't a pipeline
self.depthsort = depthsort
if self.depthsort:
# The depht sort object is set up to generate scalars representing
# the sort depth. A camera is assigned for the sorting. The camera
# defines the sort vector (position and focal point).
self.vtk_ds = vtkDepthSortPolyData()
self.vtk_ds.SetCamera(vtk_renderer.GetActiveCamera())
self.vtk_ds.SetInputConnection(self.vtk_iso.GetOutputPort())
self.vtk_ds.SetDirectionToBackToFront()
#vtk_ds.SetVector(1, 1, 1)
#vtk_ds.SortScalarsOn()
#vtk_ds.Update()
vtk_renderer.ResetCamera()
vtkPolyDataModule.__init__(self, self.vtk_ds)
else:
vtkPolyDataModule.__init__(self, self.vtk_iso)
#TODO add color function
"""
vtk_dmap = vtk.vtkPolyDataMapper()
vtk_dmap.ScalarVisibilityOn()
vtk_dmap.SetScalarRange(0, vtk_sda_x.GetMaxNorm()) #TODO GetMinNorm non-existing!
vtk_dmap.SetScalarModeToUsePointFieldData()
vtk_dmap.SelectColorArray("x")
#vtk_dmap.ColorByArrayComponent("x", 0)
"""
| gpl-2.0 | -4,702,511,491,122,021,000 | 2,852,938,042,869,221,400 | 37.5 | 89 | 0.631755 | false |
keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/ammonia.py | 1 | 28836 | """
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import fitter
import matplotlib.cbook as mpcb
import copy
import model
line_names = ['oneone','twotwo','threethree','fourfour']
freq_dict = {
'oneone': 23.694506e9,
'twotwo': 23.722633335e9,
'threethree': 23.8701296e9,
'fourfour': 24.1394169e9,
}
aval_dict = {
'oneone': 1.712e-7, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 2.291e-7, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 2.625e-7, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
'fourfour': 3.167e-7, #64*!pi**4/(3*h*c**3)*nu44**3*mu0**2*(4/5.)
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': True,
'fourfour': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [19.8513, 19.3159, 7.88669, 7.46967, 7.35132, 0.460409, 0.322042,
-0.0751680, -0.213003, 0.311034, 0.192266, -0.132382, -0.250923, -7.23349,
-7.37280, -7.81526, -19.4117, -19.5500],
'twotwo':[26.5263, 26.0111, 25.9505, 16.3917, 16.3793, 15.8642, 0.562503,
0.528408, 0.523745, 0.0132820, -0.00379100, -0.0132820, -0.501831,
-0.531340, -0.589080, -15.8547, -16.3698, -16.3822, -25.9505, -26.0111,
-26.5263],
'threethree':[29.195098, 29.044147, 28.941877, 28.911408, 21.234827,
21.214619, 21.136387, 21.087456, 1.005122, 0.806082, 0.778062,
0.628569, 0.016754, -0.005589, -0.013401, -0.639734, -0.744554,
-1.031924, -21.125222, -21.203441, -21.223649, -21.076291, -28.908067,
-28.938523, -29.040794, -29.191744],
'fourfour':[ 0. , -30.49783692, 30.49783692, 0., 24.25907811,
-24.25907811, 0. ]
}
tau_wts_dict = {
'oneone': [0.0740740, 0.148148, 0.0925930, 0.166667, 0.0185190, 0.0370370,
0.0185190, 0.0185190, 0.0925930, 0.0333330, 0.300000, 0.466667,
0.0333330, 0.0925930, 0.0185190, 0.166667, 0.0740740, 0.148148],
'twotwo': [0.00418600, 0.0376740, 0.0209300, 0.0372090, 0.0260470,
0.00186000, 0.0209300, 0.0116280, 0.0106310, 0.267442, 0.499668,
0.146512, 0.0116280, 0.0106310, 0.0209300, 0.00186000, 0.0260470,
0.0372090, 0.0209300, 0.0376740, 0.00418600],
'threethree': [0.012263, 0.008409, 0.003434, 0.005494, 0.006652, 0.008852,
0.004967, 0.011589, 0.019228, 0.010387, 0.010820, 0.009482, 0.293302,
0.459109, 0.177372, 0.009482, 0.010820, 0.019228, 0.004967, 0.008852,
0.006652, 0.011589, 0.005494, 0.003434, 0.008409, 0.012263],
'fourfour': [0.2431, 0.0162, 0.0162, 0.3008, 0.0163, 0.0163, 0.3911]}
def ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1,
xoff_v=0.0, fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
thin=False, verbose=False, return_components=False, debug=False ):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
ntot can be specified as a column density (e.g., 10^15) or a log-column-density (e.g., 15)
tex can be specified or can be assumed LTE if unspecified, if tex>tkin, or if "thin"
is specified
"thin" uses a different parametetrization and requires only the optical depth, width, offset,
and tkin to be specified. In the 'thin' approximation, tex is not used in computation of
the partition function - LTE is implicitly assumed
If tau is specified, ntot is NOT fit but is set to a fixed value
fillingfraction is an arbitrary scaling factor to apply to the model
fortho is the ortho/(ortho+para) fraction. The default is to assume all ortho.
xoff_v is the velocity offset in km/s
tau refers to the optical depth of the 1-1 line. The optical depths of the
other lines are fixed relative to tau_oneone
(not implemented) if tau is specified, ntot is ignored
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('GHz')
if tex is not None:
if tex > tkin: # cannot have Tex > Tkin
tex = tkin
elif thin: # tex is not used in this case
tex = tkin
else:
tex = tkin
if thin:
ntot = 1e15
elif 5 < ntot < 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
ntot = 10**ntot
elif (25 < ntot < 1e5) or (ntot < 5):
# these are totally invalid for log/non-log
return 0
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)
# Generate Partition Functions
nlevs = 51
jv=np.arange(nlevs)
ortho = jv % 3 == 0
para = True-ortho
Jpara = jv[para]
Jortho = jv[ortho]
Brot = 298117.06e6
Crot = 186726.36e6
runspec = np.zeros(len(xarr))
tau_dict = {}
para_count = 0
ortho_count = 1 # ignore 0-0
if tau is not None and thin:
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic temperature
Tex is still a free parameter in the final spectrum calculation at the bottom
(technically, I think this process assumes LTE; Tex should come into play in
these equations, not just the final one)
"""
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
else:
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
Given the complexity of these equations, it would be worth my while to
comment each step carefully.
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*tkin))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*tkin))
for linename in line_names:
if ortho_dict[linename]:
orthoparafrac = fortho
Z = Zortho
count = ortho_count
ortho_count += 1
else:
orthoparafrac = 1.0-fortho
Z = Zpara
count = para_count # need to treat partition function separately
para_count += 1
tau_dict[linename] = (ntot * orthoparafrac * Z[count]/(Z.sum()) / ( 1
+ np.exp(-h*freq_dict[linename]/(kb*tkin) )) * ccms**2 /
(8*np.pi*freq_dict[linename]**2) * aval_dict[linename]*
(1-np.exp(-h*freq_dict[linename]/(kb*tex))) /
(width/ckms*freq_dict[linename]*np.sqrt(2*np.pi)) )
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None and not thin:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in tau_dict.iteritems():
tau_dict[linename] = t * tau/tau11_temp
components =[]
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,no in enumerate(nuoff):
tauprof += (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr+no-lines[kk])**2 / (2.0*nuwidth[kk]**2)) *
fillingfraction)
components.append( tauprof )
T0 = (h*xarr*1e9/kb) # "temperature" of wavelength
if tau is not None and thin:
#runspec = tauprof+runspec
# is there ever a case where you want to ignore the optical depth function? I think no
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
else:
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
if runspec.min() < 0:
raise ValueError("Model dropped below zero. That is not possible normally. Here are the input values: "+
("tex: %f " % tex) +
("tkin: %f " % tkin) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
print "tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction)
if return_components:
return (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-1*np.array(components)))
if return_tau:
return tau_dict
return runspec
class ammonia_model(model.SpectralModel):
def __init__(self,npeaks=1,npars=6,multisingle='multi',**kwargs):
self.npeaks = npeaks
self.npars = npars
self._default_parnames = ['tkin','tex','ntot','width','xoff_v','fortho']
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
if multisingle in ('multi','single'):
self.multisingle = multisingle
else:
raise Exception("multisingle must be multi or single")
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'tkin' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
def __call__(self,*args,**kwargs):
#if 'use_lmfit' in kwargs: kwargs.pop('use_lmfit')
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
tkin,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)/len(parnames)) for p in parnames]
npars = len(parvals) / self.npeaks
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
else:
npars = len(parvals) / self.npeaks
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in xrange(self.npeaks):
modelkwargs = kwargs.copy()
for ii in xrange(npars):
name = parnames[ii+jj*npars].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*npars]})
v += ammonia(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False):
"""
Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call
"""
comps=[]
for ii in xrange(self.npeaks):
if hyperfine:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( ammonia(xarr,return_components=True,**modelkwargs) )
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5),
parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True), minpars=(2.73,2.73,0,0,0,0),
parinfo=None,
maxpars=(0,0,0,0,0,1), quiet=True, shh=True, veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
self.npars = len(params) / npeaks
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None: parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params','parnames','fixed','limitedmin','limitedmax','minpars','maxpars'],
[params,parnames,fixed,limitedmin,limitedmax,minpars,maxpars]))
# make sure all various things are the right length; if they're not, fix them using the defaults
for partype,parlist in partype_dict.iteritems():
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by npars to get to the
# right number of gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
print "WARNING! Input parameters were longer than allowed for variable ",parlist
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars: # all have minima of zero except kinetic temperature, which can't be below CMB. Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(ii/self.npars),
'mpmaxstep':float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in xrange(len(partype_dict['params'])) ]
# hack: remove 'fixed' pars
parinfo_with_fixed = parinfo
parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),p['value']) for p in parinfo_with_fixed if p['fixed'])
# don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
# this is OK - not a permanent change
parnames = [p['parname'] for p in parinfo]
# not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
else:
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
parinfo_with_fixed = None
fixed_kwargs = {}
fitfun_kwargs = dict(kwargs.items()+fixed_kwargs.items())
npars = len(parinfo)/self.npeaks
# (fortho0 is not fortho)
# this doesn't work if parinfo_with_fixed is not None:
# this doesn't work for p in parinfo_with_fixed:
# this doesn't work # users can change the defaults while holding them fixed
# this doesn't work if p['fixed']:
# this doesn't work kwargs.update({p['parname']:p['value']})
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))/err]
return f
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
print "Fit status: ",mp.status
print "Fit message: ",mp.errmsg
print "Final fit values: "
for i,p in enumerate(mpp):
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
if any(['tex' in s for s in parnames]) and any(['tkin' in s for s in parnames]):
texnum = (i for i,s in enumerate(parnames) if 'tex' in s)
tkinnum = (i for i,s in enumerate(parnames) if 'tkin' in s)
for txn,tkn in zip(texnum,tkinnum):
if mpp[txn] > mpp[tkn]: mpp[txn] = mpp[tkn] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)
self.mp = mp
if parinfo_with_fixed is not None:
# self self.parinfo preserving the 'fixed' parameters
# ORDER MATTERS!
for p in parinfo:
parinfo_with_fixed[p['n']] = p
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo_with_fixed], preserve_order=True)
else:
self.parinfo = parinfo
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
# I don't THINK these are necessary?
#self.parinfo = parinfo
#self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# need to restore the fixed parameters....
# though the above commented out section indicates that I've done and undone this dozens of times now
# (a test has been added to test_nh3.py)
# this was NEVER included or tested because it breaks the order
#for par in parinfo_with_fixed:
# if par.parname not in self.parinfo.keys():
# self.parinfo.append(par)
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames, **kwargs)(xax)
#if self.model.sum() == 0:
# print "DON'T FORGET TO REMOVE THIS ERROR!"
# raise ValueError("Model is zeros.")
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars] for jj in xrange(len(self.parinfo)/self.npars)]
modelkwargs = [
dict([(p['parname'].strip("0123456789").lower(),p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K','tex':'T_{ex}','ntot':'N','fortho':'F_o','width':'\\sigma','xoff_v':'v','fillingfraction':'FF','tau':'\\tau_{1-1}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
class ammonia_model_vtau(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_vtau,self).__init__()
self.parnames = ['tkin','tex','tau','width','xoff_v','fortho']
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
| mit | 7,138,208,289,486,199,000 | -3,180,228,449,338,753,500 | 44.554502 | 188 | 0.575808 | false |
Yelp/pootle | tests/import_export/import.py | 1 | 1745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from django.core.files.uploadedfile import SimpleUploadedFile
from import_export.utils import import_file
from import_export.exceptions import UnsupportedFiletypeError
from pootle_store.models import NEW, PARSED, Store
TEST_PO_DIR = "tests/data/po/tutorial/en"
IMPORT_SUCCESS = "headers_correct.po"
IMPORT_UNSUPP_FILE = "tutorial.ts"
def _import_file(file_name, file_dir=TEST_PO_DIR,
content_type="text/x-gettext-translation"):
with open(os.path.join(file_dir, file_name), "r") as f:
import_file(SimpleUploadedFile(file_name,
f.read(),
content_type))
@pytest.mark.django_db
def test_import_success(en_tutorial_po_no_file):
assert en_tutorial_po_no_file.state == NEW
_import_file(IMPORT_SUCCESS)
store = Store.objects.get(pk=en_tutorial_po_no_file.pk)
assert store.state == PARSED
@pytest.mark.django_db
def test_import_failure(file_import_failure, en_tutorial_po):
filename, exception = file_import_failure
with pytest.raises(exception):
_import_file(filename)
@pytest.mark.django_db
def test_import_unsupported(en_tutorial_ts, ts_directory):
with pytest.raises(UnsupportedFiletypeError):
_import_file(IMPORT_UNSUPP_FILE,
file_dir=os.path.join(ts_directory, "tutorial/en"),
content_type="text/vnd.trolltech.linguist")
| gpl-3.0 | -8,215,698,197,154,138,000 | -239,409,379,407,125,200 | 31.924528 | 77 | 0.684241 | false |
jakobj/nest-simulator | pynest/nest/tests/test_urbanczik_synapse.py | 14 | 9912 | # -*- coding: utf-8 -*-
#
# test_urbanczik_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Test functionality of the Urbanczik synapse
"""
import unittest
import nest
import numpy as np
HAVE_GSL = nest.ll_api.sli_func("statusdict/have_gsl ::")
@nest.ll_api.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class UrbanczikSynapseTestCase(unittest.TestCase):
"""Test Urbanczik synapse"""
def test_ConnectNeuronsWithUrbanczikSynapse(self):
"""Ensures that the restriction to supported neuron models works."""
nest.set_verbosity('M_WARNING')
# Multi-compartment models
mc_models = [
"iaf_cond_alpha_mc",
"pp_cond_exp_mc_urbanczik",
]
# Supported models
supported_models = [
"pp_cond_exp_mc_urbanczik",
]
# Compute not supported models
not_supported_models = [n for n in nest.Models(mtype='nodes')
if n not in supported_models]
# Connect supported models with Urbanczik synapse
for nm in supported_models:
nest.ResetKernel()
r_type = 0
if nm in mc_models:
syns = nest.GetDefaults(nm)["receptor_types"]
r_type = syns["soma_exc"]
n = nest.Create(nm, 2)
nest.Connect(n, n, {"rule": "all_to_all"},
{"synapse_model": "urbanczik_synapse", "receptor_type": r_type})
# Ensure that connecting not supported models fails
for nm in not_supported_models:
nest.ResetKernel()
r_type = 0
if nm in mc_models:
syns = nest.GetDefaults(nm)["receptor_types"]
r_type = syns["soma_exc"]
n = nest.Create(nm, 2)
# try to connect with urbanczik synapse
with self.assertRaises(nest.kernel.NESTError):
nest.Connect(n, n, {"rule": "all_to_all"},
{"synapse_model": "urbanczik_synapse", "receptor_type": r_type})
def test_SynapseDepressionFacilitation(self):
"""Ensure that depression and facilitation work correctly"""
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
resolution = 0.1
nest.SetKernelStatus({'resolution': resolution})
'''
neuron parameters
'''
nrn_model = 'pp_cond_exp_mc_urbanczik'
nrn_params = {
't_ref': 3.0, # refractory period
'g_sp': 600.0, # somato-dendritic coupling conductance
'soma': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # somatic leak conductance
'E_ex': 0.0, # resting potential for exc input
'E_in': -75.0, # resting potential for inh input
'tau_syn_ex': 3.0, # time constant of exc conductance
'tau_syn_in': 3.0, # time constant of inh conductance
},
'dendritic': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # dendritic leak conductance
'tau_syn_ex': 3.0, # time constant of exc input current
'tau_syn_in': 3.0, # time constant of inh input current
},
# parameters of rate function
'phi_max': 0.15, # max rate
'rate_slope': 0.5, # called 'k' in the paper
'beta': 1.0 / 3.0,
'theta': -55.0,
}
'''
synapse params
'''
syns = nest.GetDefaults(nrn_model)['receptor_types']
init_w = 100.0
syn_params = {
'synapse_model': 'urbanczik_synapse_wr',
'receptor_type': syns['dendritic_exc'],
'tau_Delta': 100.0, # time constant of low pass filtering of the weight change
'eta': 0.75, # learning rate
'weight': init_w,
'Wmax': 4.5*nrn_params['dendritic']['C_m'],
'delay': resolution,
}
'''
neuron and devices
'''
nest.SetDefaults(nrn_model, nrn_params)
nrn = nest.Create(nrn_model)
# spike generator is connected to a parrot neuron which is connected to the mc neuron
prrt_nrn = nest.Create('parrot_neuron')
# excitatory input to the dendrite
pre_syn_spike_times = np.array([1.0, 98.0])
sg_prox = nest.Create('spike_generator', params={
'spike_times': pre_syn_spike_times})
# excitatory input to the soma
spike_times_soma_inp = np.arange(10.0, 50.0, resolution)
spike_weights_soma = 10.0*np.ones_like(spike_times_soma_inp)
sg_soma_exc = nest.Create('spike_generator',
params={'spike_times': spike_times_soma_inp, 'spike_weights': spike_weights_soma})
# for recording all parameters of the Urbanczik neuron
rqs = nest.GetDefaults(nrn_model)['recordables']
mm = nest.Create('multimeter', params={
'record_from': rqs, 'interval': 0.1})
# for recoding the synaptic weights of the Urbanczik synapses
wr = nest.Create('weight_recorder')
# for recording the spiking of the soma
sr_soma = nest.Create('spike_recorder')
'''
create connections
'''
nest.Connect(sg_prox, prrt_nrn, syn_spec={'delay': resolution})
nest.CopyModel('urbanczik_synapse', 'urbanczik_synapse_wr',
{'weight_recorder': wr[0]})
nest.Connect(prrt_nrn, nrn, syn_spec=syn_params)
nest.Connect(sg_soma_exc, nrn,
syn_spec={'receptor_type': syns['soma_exc'], 'weight': 10.0*resolution, 'delay': resolution})
nest.Connect(mm, nrn, syn_spec={'delay': resolution})
nest.Connect(nrn, sr_soma, syn_spec={'delay': resolution})
'''
simulation
'''
nest.Simulate(100.0)
'''
read out devices
'''
# multimeter
rec = nest.GetStatus(mm)[0]['events']
t = rec['times']
V_w = rec['V_m.p']
# compute dendritic prediction of somatic membrane potential
g_D = nrn_params['g_sp']
g_L = nrn_params['soma']['g_L']
E_L = nrn_params['soma']['E_L']
V_w_star = (g_L*E_L + g_D*V_w) / (g_L + g_D)
# weight recorder
data = nest.GetStatus(wr)
senders = data[0]['events']['senders']
targets = data[0]['events']['targets']
weights = data[0]['events']['weights']
times = data[0]['events']['times']
# spike recorder
data = nest.GetStatus(sr_soma)[0]['events']
spike_times_soma = data['times']
# compute predicted rate
phi_max = nrn_params['phi_max']
k = nrn_params['rate_slope']
beta = nrn_params['beta']
theta = nrn_params['theta']
rate = (phi_max / (1.0 + k*np.exp(beta*(theta - V_w_star))))
# compute h(V_w_star)
h = (15.0*beta / (1.0 + np.exp(-beta*(theta - V_w_star)) / k))
# compute alpha response kernel
tau_s = nrn_params['dendritic']['tau_syn_ex']
g_L_prox = nrn_params['dendritic']['g_L']
C_m_prox = nrn_params['dendritic']['C_m']
tau_L = C_m_prox / g_L_prox
E_L_prox = nrn_params['dendritic']['E_L']
t0 = 1.2
alpha_response = (np.heaviside(t - t0, 0.5)*tau_s*(np.exp(-(t - t0) / tau_L) - np.exp(-(t - t0) / tau_s)) /
(g_L_prox*(tau_L - tau_s)))
# compute PI(t)
if len(spike_times_soma) > 0:
t = np.around(t, 4)
spike_times_soma = np.around(spike_times_soma + 0.2, 4)
idx = np.nonzero(np.in1d(t, spike_times_soma))[0]
rate[idx] -= 1.0 / resolution
w_change_raw = -15.0*C_m_prox*rate*h*alpha_response
# compute low pass filtered version of PI
tau_Delta = syn_params['tau_Delta']
eta = syn_params['eta']
w_change_low_pass = eta * np.exp(-t / tau_Delta)*np.cumsum(
np.exp(t / tau_Delta)*w_change_raw)*resolution / tau_Delta
integrated_w_change = np.cumsum(w_change_low_pass)*resolution
syn_weight_comp = init_w + integrated_w_change
'''
comparison between Nest and python implementation
'''
# extract the weight computed in python at the times of the presynaptic spikes
idx = np.nonzero(np.in1d(np.around(t, 4), np.around(pre_syn_spike_times + resolution, 4)))[0]
syn_w_comp_at_spike_times = syn_weight_comp[idx]
realtive_error = (
(weights[-1] - syn_w_comp_at_spike_times[-1]) / (weights[-1] - init_w))
self.assertTrue(abs(realtive_error) < 0.001)
def suite():
suite = unittest.makeSuite(UrbanczikSynapseTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 | -291,150,620,444,384,260 | 714,421,226,850,319,400 | 34.783394 | 116 | 0.551049 | false |
skirsdeda/django | tests/test_discovery_sample/doctests.py | 471 | 1245 | """
Doctest example from the official Python documentation.
https://docs.python.org/3/library/doctest.html
"""
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30) # doctest: +ELLIPSIS
265252859812191058636308480000000...
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0) # doctest: +ELLIPSIS
265252859812191058636308480000000...
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n + 1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
| bsd-3-clause | 1,393,371,769,841,483,500 | 4,669,490,233,542,060,000 | 26.065217 | 72 | 0.604016 | false |
CopeX/odoo | addons/website_twitter/models/twitter_config.py | 377 | 2095 | import logging
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class twitter_config_settings(osv.osv_memory):
_inherit = 'website.config.settings'
_columns = {
'twitter_api_key': fields.related(
'website_id', 'twitter_api_key', type="char",
string='Twitter API Key',
help="Twitter API key you can get it from https://apps.twitter.com/app/new"),
'twitter_api_secret': fields.related(
'website_id', 'twitter_api_secret', type="char",
string='Twitter API secret',
help="Twitter API secret you can get it from https://apps.twitter.com/app/new"),
'twitter_tutorial': fields.dummy(
type="boolean", string="Show me how to obtain the Twitter API Key and Secret"),
'twitter_screen_name': fields.related(
'website_id', 'twitter_screen_name',
type="char", string='Get favorites from this screen name',
help="Screen Name of the Twitter Account from which you want to load favorites."
"It does not have to match the API Key/Secret."),
}
def _check_twitter_authorization(self, cr, uid, config_id, context=None):
website_obj = self.pool['website']
website_config = self.browse(cr, uid, config_id, context=context)
try:
website_obj.fetch_favorite_tweets(cr, uid, [website_config.website_id.id], context=context)
except Exception:
_logger.warning('Failed to verify twitter API authorization', exc_info=True)
raise osv.except_osv(_('Twitter authorization error!'), _('Please double-check your Twitter API Key and Secret'))
def create(self, cr, uid, vals, context=None):
res_id = super(twitter_config_settings, self).create(cr, uid, vals, context=context)
if vals.get('twitter_api_key') and vals.get('twitter_api_secret'):
self._check_twitter_authorization(cr, uid, res_id, context=context)
return res_id | agpl-3.0 | -3,405,962,722,175,598,600 | -6,844,041,889,664,679,000 | 48.904762 | 125 | 0.624344 | false |
TriOptima/tri.form | tests/test_compat.py | 1 | 3336 | import pytest
from tri_struct import merged
from tri_form import Field, Form
from tri_form.compat import render_to_string, format_html, field_defaults_factory, render_template, Template
from .compat import RequestFactory, SafeText
def test_render_to_string():
assert render_to_string(
template_name='tri_form/non_editable.html',
request=RequestFactory().get('/'),
context=dict(
field=dict(
id=SafeText('<a b c><d><e>'),
rendered_value=SafeText('<a b c><d><e>'),
),
)
).strip() == '<span id="<a b c><d><e>"><a b c><d><e></span>'
def test_format_html():
assert format_html('<{a}>{b}{c}', a='a', b=format_html('<b>'), c='<c>') == '<a><b><c>'
def test_format_html2():
assert render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=format_html('<a href="foo">foo</a>'))) == '<a href="foo">foo</a>'
def test_format_html3():
assert render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=format_html('{}', format_html('<a href="foo">foo</a>')))) == '<a href="foo">foo</a>'
def test_format_html4():
actual = render_template(
RequestFactory().get('/'),
Template('{{foo}}'),
dict(
foo=Form(fields=[Field(name='foo')]),
)
)
print(actual)
assert '<input type="text" value="" name="foo" id="id_foo"' in actual
def test_format_html5():
actual = Form(fields=[Field(name='foo')], request=RequestFactory().get('/')).render()
print(actual)
assert type(actual) == SafeText
def test_format_html6():
form = Form(fields=[Field(name='foo')], request=RequestFactory().get('/'))
actual = form.fields_by_name.foo.render()
print(actual)
assert type(actual) == SafeText
def test_render_template():
actual = render_template(RequestFactory().get('/'), Template('{{foo}}'), dict(foo=1))
print(actual)
assert type(actual) == SafeText
@pytest.mark.django
def test_field_defaults_factory():
from django.db import models
base = dict(parse_empty_string_as_none=True, required=True, display_name=None)
assert field_defaults_factory(models.CharField(null=False, blank=False)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.CharField(null=False, blank=True)) == merged(base, dict(parse_empty_string_as_none=False, required=False))
assert field_defaults_factory(models.CharField(null=True, blank=False)) == merged(base, dict(required=False))
assert field_defaults_factory(models.CharField(null=True, blank=True)) == merged(base, dict(required=False))
@pytest.mark.django
def test_field_defaults_factory_boolean():
from django.db import models
django_null_default = not models.BooleanField().null
base = dict(parse_empty_string_as_none=django_null_default, display_name=None)
assert field_defaults_factory(models.BooleanField(null=False, blank=False)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.BooleanField(null=False, blank=True)) == merged(base, dict(parse_empty_string_as_none=False))
assert field_defaults_factory(models.BooleanField(null=True, blank=False)) == base
assert field_defaults_factory(models.BooleanField(null=True, blank=True)) == base
| bsd-3-clause | 6,926,693,378,631,930,000 | 4,643,094,064,103,053,000 | 36.483146 | 168 | 0.658273 | false |
ceb8/astroquery | astroquery/imcce/tests/test_miriade.py | 2 | 4481 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import os
import astropy.units as u
from ...utils.testing_tools import MockResponse
from .. import Miriade, MiriadeClass
# files in data/ for different query types
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
# monkeypatch replacement request function
def nonremote_request(self, request_type, url, **kwargs):
filename = '3552_coordtype{}.dat'.format(
kwargs['params']['-tcoor'])
with open(data_path(filename), 'rb') as f:
response = MockResponse(content=f.read(), url=url)
return response
# use a pytest fixture to create a dummy 'requests.get' function,
# that mocks(monkeypatches) the actual 'requests.get' function:
@pytest.fixture
def patch_request(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(MiriadeClass, '_request',
nonremote_request)
return mp
# --------------------------------- actual test functions
def test_spherical_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=1)
cols = ('target', 'epoch', 'RA', 'DEC', 'delta', 'V', 'alpha', 'elong',
'RAcosD_rate', 'DEC_rate', 'delta_rate')
units = (None, u.d, u.deg, u.deg, u.au, u.mag, u.deg, u.deg,
u.arcsec / u.minute, u.arcsec / u.minute, u.km / u.s)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_rectangular_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=2)
cols = ('target', 'epoch', 'x', 'y', 'z',
'vx', 'vy', 'vz', 'delta', 'V',
'alpha', 'elong', 'rv', 'heldist',
'x_h', 'y_h', 'z_h',
'vx_h', 'vy_h', 'vz_h')
units = (None, u.d, u.au, u.au, u.au, u.au/u.day, u.au/u.day,
u.au/u.day, u.au, u.mag, u.deg, u.deg, u.km/u.s,
u.au, u.au, u.au, u.au, u.au/u.day, u.au/u.day, u.au/u.day)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_local_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=3)
cols = ('target', 'epoch', 'AZ', 'EL', 'V', 'delta', 'alpha', 'elong')
units = (None, u.day, u.deg, u.deg, u.mag, u.au, u.deg, u.deg)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_hourangle_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=4)
cols = ('target', 'epoch', 'hourangle',
'DEC', 'V', 'delta', 'alpha', 'elong')
units = (None, u.d, u.deg, u.deg, u.mag, u.au, u.deg, u.deg)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_observation_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=5)
cols = ('target', 'epoch', 'siderealtime', 'RAJ2000', 'DECJ2000',
'hourangle', 'DEC', 'AZ', 'EL', 'refraction',
'V', 'delta', 'heldist', 'alpha', 'elong', 'posunc',
'RAcosD_rate', 'DEC_rate', 'delta_rate')
units = (None, u.d, u.h, u.deg, u.deg, u.deg, u.deg, u.deg, u.deg,
u.arcsec, u.mag, u.au, u.au, u.deg, u.deg, u.arcsec,
u.arcsec / u.minute, u.arcsec / u.minute, u.km / u.s)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_aoobservation_coordinates(patch_request):
eph = Miriade.get_ephemerides('3552', coordtype=6)
cols = ('target', 'epoch', 'siderealtime', 'RAJ2000', 'DECJ2000',
'refraction', 'V', 'delta', 'heldist', 'alpha',
'elong', 'posunc', 'RAcosD_rate', 'DEC_rate', 'delta_rate')
units = (None, u.d, u.h, u.deg, u.deg, u.arcsec, u.mag,
u.au, u.au, u.deg, u.deg, u.arcsec, u.arcsec / u.minute,
u.arcsec / u.minute, u.km / u.s)
for i in range(len(cols)):
assert cols[i] in eph.columns
assert eph[cols[i]].unit == units[i]
def test_get_raw_response(patch_request):
raw_eph = Miriade.get_ephemerides(
'3552', coordtype=1, get_raw_response=True)
assert "<?xml version='1.0' encoding='UTF-8'?>" in raw_eph
| bsd-3-clause | 3,279,243,539,212,787,000 | 5,133,159,267,179,020,000 | 35.729508 | 75 | 0.587369 | false |
TiddlySpace/tiddlywebplugins.jsondispatcher | setup.py | 1 | 1099 | AUTHOR = 'Chris Dent'
AUTHOR_EMAIL = 'cdent@peermore.com'
MAINTAINER = 'Ben Paddock'
MAINTAINER_EMAIL = 'pads@thisispads.me.uk'
NAME = 'tiddlywebplugins.jsondispatcher'
DESCRIPTION = 'A TiddlyWeb plugin to allow the dispatching of tiddlers to non-Python handlers by serialising tiddler data to JSON'
VERSION = '0.1.4'
import os
from setuptools import setup, find_packages
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = file(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
maintainer = MAINTAINER,
maintainer_email = MAINTAINER_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
packages = find_packages(exclude=['test']),
platforms = 'Posix; MacOS X; Windows',
install_requires = [
'tiddlyweb',
'tiddlywebplugins.dispatcher',
'tiddlywebplugins.utils',
'beanstalkc'
],
zip_safe = False,
)
| bsd-3-clause | -2,432,216,874,146,367,500 | -589,757,327,608,765,200 | 33.34375 | 130 | 0.630573 | false |
yannrouillard/weboob | modules/parolesmania/test.py | 4 | 1700 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
from weboob.capabilities.base import NotLoaded
class ParolesmaniaTest(BackendTest):
BACKEND = 'parolesmania'
def test_search_song_n_get(self):
l_lyrics = list(self.backend.iter_lyrics('song', 'chien'))
for songlyrics in l_lyrics:
assert songlyrics.id
assert songlyrics.title
assert songlyrics.artist
assert songlyrics.content is NotLoaded
full_lyr = self.backend.get_lyrics(songlyrics.id)
assert full_lyr.id
assert full_lyr.title
assert full_lyr.artist
assert full_lyr.content is not NotLoaded
def test_search_artist(self):
l_lyrics = list(self.backend.iter_lyrics('artist', 'boris'))
for songlyrics in l_lyrics:
assert songlyrics.id
assert songlyrics.title
assert songlyrics.artist
assert songlyrics.content is NotLoaded
| agpl-3.0 | -8,757,459,900,866,138,000 | -9,167,656,209,541,011,000 | 35.956522 | 77 | 0.687059 | false |
digimarc/django | django/http/__init__.py | 98 | 1186 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (HttpRequest, QueryDict,
RawPostDataException, UnreadablePostError, build_request_repr)
from django.http.response import (
HttpResponse, StreamingHttpResponse, FileResponse,
HttpResponseRedirect, HttpResponsePermanentRedirect,
HttpResponseNotModified, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseNotFound, HttpResponseNotAllowed, HttpResponseGone,
HttpResponseServerError, Http404, BadHeaderError, JsonResponse,
)
from django.http.utils import fix_location_header, conditional_content_removal
__all__ = [
'SimpleCookie', 'parse_cookie', 'HttpRequest', 'QueryDict',
'RawPostDataException', 'UnreadablePostError', 'build_request_repr',
'HttpResponse', 'StreamingHttpResponse', 'HttpResponseRedirect',
'HttpResponsePermanentRedirect', 'HttpResponseNotModified',
'HttpResponseBadRequest', 'HttpResponseForbidden', 'HttpResponseNotFound',
'HttpResponseNotAllowed', 'HttpResponseGone', 'HttpResponseServerError',
'Http404', 'BadHeaderError', 'fix_location_header', 'JsonResponse',
'FileResponse', 'conditional_content_removal',
]
| bsd-3-clause | -1,532,551,249,307,980,000 | -8,423,163,711,806,679,000 | 52.909091 | 78 | 0.790051 | false |
brodyberg/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyComplex/autorestcomplextestservice/models/sawshark.py | 4 | 1804 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .shark import Shark
class Sawshark(Shark):
"""Sawshark
:param species:
:type species: str
:param length:
:type length: float
:param siblings:
:type siblings: list of :class:`Fish
<fixtures.acceptancetestsbodycomplex.models.Fish>`
:param fishtype: Polymorphic Discriminator
:type fishtype: str
:param age:
:type age: int
:param birthday:
:type birthday: datetime
:param picture:
:type picture: bytearray
"""
_validation = {
'length': {'required': True},
'fishtype': {'required': True},
'birthday': {'required': True},
}
_attribute_map = {
'species': {'key': 'species', 'type': 'str'},
'length': {'key': 'length', 'type': 'float'},
'siblings': {'key': 'siblings', 'type': '[Fish]'},
'fishtype': {'key': 'fishtype', 'type': 'str'},
'age': {'key': 'age', 'type': 'int'},
'birthday': {'key': 'birthday', 'type': 'iso-8601'},
'picture': {'key': 'picture', 'type': 'bytearray'},
}
def __init__(self, length, birthday, species=None, siblings=None, age=None, picture=None):
super(Sawshark, self).__init__(species=species, length=length, siblings=siblings, age=age, birthday=birthday)
self.picture = picture
self.fishtype = 'sawshark'
| mit | 4,939,018,736,220,144,000 | -803,289,656,077,437,200 | 32.407407 | 117 | 0.559313 | false |
bouncestorage/swift | swift/obj/ssync_sender.py | 6 | 16006 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from itertools import ifilter
from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import http
class Sender(object):
"""
Sends SSYNC requests to the object server.
These requests are eventually handled by
:py:mod:`.ssync_receiver` and full documentation about the
process is there.
"""
def __init__(self, daemon, node, job, suffixes, remote_check_objs=None):
self.daemon = daemon
self.df_mgr = self.daemon._diskfile_mgr
self.node = node
self.job = job
self.suffixes = suffixes
self.connection = None
self.response = None
self.response_buffer = ''
self.response_chunk_left = 0
# available_map has an entry for each object in given suffixes that
# is available to be sync'd; each entry is a hash => timestamp
self.available_map = {}
# When remote_check_objs is given in job, ssync_sender trys only to
# make sure those objects exist or not in remote.
self.remote_check_objs = remote_check_objs
# send_list has an entry for each object that the receiver wants to
# be sync'ed; each entry is an object hash
self.send_list = []
self.failures = 0
def __call__(self):
"""
Perform ssync with remote node.
:returns: a 2-tuple, in the form (success, can_delete_objs) where
success is a boolean and can_delete_objs is the map of
objects that are in sync with the receiver. Each entry in
can_delete_objs maps a hash => timestamp
"""
if not self.suffixes:
return True, {}
try:
# Double try blocks in case our main error handler fails.
try:
# The general theme for these functions is that they should
# raise exceptions.MessageTimeout for client timeouts and
# exceptions.ReplicationException for common issues that will
# abort the replication attempt and log a simple error. All
# other exceptions will be logged with a full stack trace.
self.connect()
self.missing_check()
if self.remote_check_objs is None:
self.updates()
can_delete_obj = self.available_map
else:
# when we are initialized with remote_check_objs we don't
# *send* any requested updates; instead we only collect
# what's already in sync and safe for deletion
in_sync_hashes = (set(self.available_map.keys()) -
set(self.send_list))
can_delete_obj = dict((hash_, self.available_map[hash_])
for hash_ in in_sync_hashes)
self.disconnect()
if not self.failures:
return True, can_delete_obj
else:
return False, {}
except (exceptions.MessageTimeout,
exceptions.ReplicationException) as err:
self.daemon.logger.error(
'%s:%s/%s/%s %s', self.node.get('replication_ip'),
self.node.get('replication_port'), self.node.get('device'),
self.job.get('partition'), err)
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
self.daemon.logger.exception(
'%s:%s/%s/%s EXCEPTION in replication.Sender',
self.node.get('replication_ip'),
self.node.get('replication_port'),
self.node.get('device'), self.job.get('partition'))
except Exception:
# We don't want any exceptions to escape our code and possibly
# mess up the original replicator code that called us since it
# was originally written to shell out to rsync which would do
# no such thing.
# This particular exception handler does the minimal amount as it
# would only get called if the above except Exception handler
# failed (bad node or job data).
self.daemon.logger.exception('EXCEPTION in replication.Sender')
return False, {}
def connect(self):
"""
Establishes a connection and starts an SSYNC request
with the object server.
"""
with exceptions.MessageTimeout(
self.daemon.conn_timeout, 'connect send'):
self.connection = bufferedhttp.BufferedHTTPConnection(
'%s:%s' % (self.node['replication_ip'],
self.node['replication_port']))
self.connection.putrequest('SSYNC', '/%s/%s' % (
self.node['device'], self.job['partition']))
self.connection.putheader('Transfer-Encoding', 'chunked')
self.connection.putheader('X-Backend-Storage-Policy-Index',
int(self.job['policy']))
# a sync job must use the node's index for the frag_index of the
# rebuilt fragments instead of the frag_index from the job which
# will be rebuilding them
self.connection.putheader(
'X-Backend-Ssync-Frag-Index', self.node.get(
'index', self.job.get('frag_index', '')))
# a revert job to a handoff will not have a node index
self.connection.putheader('X-Backend-Ssync-Node-Index',
self.node.get('index', ''))
self.connection.endheaders()
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'connect receive'):
self.response = self.connection.getresponse()
if self.response.status != http.HTTP_OK:
self.response.read()
raise exceptions.ReplicationException(
'Expected status %s; got %s' %
(http.HTTP_OK, self.response.status))
def readline(self):
"""
Reads a line from the SSYNC response body.
httplib has no readline and will block on read(x) until x is
read, so we have to do the work ourselves. A bit of this is
taken from Python's httplib itself.
"""
data = self.response_buffer
self.response_buffer = ''
while '\n' not in data and len(data) < self.daemon.network_chunk_size:
if self.response_chunk_left == -1: # EOF-already indicator
break
if self.response_chunk_left == 0:
line = self.response.fp.readline()
i = line.find(';')
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
self.response_chunk_left = int(line.strip(), 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
if self.response_chunk_left == 0:
self.response_chunk_left = -1
break
chunk = self.response.fp.read(min(
self.response_chunk_left,
self.daemon.network_chunk_size - len(data)))
if not chunk:
# close the connection as protocol synchronisation is
# probably lost
self.response.close()
raise exceptions.ReplicationException('Early disconnect')
self.response_chunk_left -= len(chunk)
if self.response_chunk_left == 0:
self.response.fp.read(2) # discard the trailing \r\n
data += chunk
if '\n' in data:
data, self.response_buffer = data.split('\n', 1)
data += '\n'
return data
def missing_check(self):
"""
Handles the sender-side of the MISSING_CHECK step of a
SSYNC request.
Full documentation of this can be found at
:py:meth:`.Receiver.missing_check`.
"""
# First, send our list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check start'):
msg = ':MISSING_CHECK: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
hash_gen = self.df_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
if self.remote_check_objs is not None:
hash_gen = ifilter(
lambda path_objhash_timestamp:
path_objhash_timestamp[1] in
self.remote_check_objs, hash_gen)
for path, object_hash, timestamp in hash_gen:
self.available_map[object_hash] = timestamp
with exceptions.MessageTimeout(
self.daemon.node_timeout,
'missing_check send line'):
msg = '%s %s\r\n' % (
urllib.quote(object_hash),
urllib.quote(timestamp))
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'missing_check end'):
msg = ':MISSING_CHECK: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, retrieve the list of what they want.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'missing_check line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':MISSING_CHECK: END':
break
parts = line.split()
if parts:
self.send_list.append(parts[0])
def updates(self):
"""
Handles the sender-side of the UPDATES step of an SSYNC
request.
Full documentation of this can be found at
:py:meth:`.Receiver.updates`.
"""
# First, send all our subrequests based on the send_list.
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates start'):
msg = ':UPDATES: START\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for object_hash in self.send_list:
try:
df = self.df_mgr.get_diskfile_from_hash(
self.job['device'], self.job['partition'], object_hash,
self.job['policy'], frag_index=self.job.get('frag_index'))
except exceptions.DiskFileNotExist:
continue
url_path = urllib.quote(
'/%s/%s/%s' % (df.account, df.container, df.obj))
try:
df.open()
# EC reconstructor may have passed a callback to build
# an alternative diskfile...
df = self.job.get('sync_diskfile_builder', lambda *args: df)(
self.job, self.node, df.get_metadata())
except exceptions.DiskFileDeleted as err:
self.send_delete(url_path, err.timestamp)
except exceptions.DiskFileError:
pass
else:
self.send_put(url_path, df)
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'updates end'):
msg = ':UPDATES: END\r\n'
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
# Now, read their response for any issues.
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates start wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: START':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
while True:
with exceptions.MessageTimeout(
self.daemon.http_timeout, 'updates line wait'):
line = self.readline()
if not line:
raise exceptions.ReplicationException('Early disconnect')
line = line.strip()
if line == ':UPDATES: END':
break
elif line:
raise exceptions.ReplicationException(
'Unexpected response: %r' % line[:1024])
def send_delete(self, url_path, timestamp):
"""
Sends a DELETE subrequest with the given information.
"""
msg = ['DELETE ' + url_path, 'X-Timestamp: ' + timestamp.internal]
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_delete'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
def send_put(self, url_path, df):
"""
Sends a PUT subrequest for the url_path using the source df
(DiskFile) and content_length.
"""
msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)]
# Sorted to make it easier to test.
for key, value in sorted(df.get_metadata().items()):
if key not in ('name', 'Content-Length'):
msg.append('%s: %s' % (key, value))
msg = '\r\n'.join(msg) + '\r\n\r\n'
with exceptions.MessageTimeout(self.daemon.node_timeout, 'send_put'):
self.connection.send('%x\r\n%s\r\n' % (len(msg), msg))
for chunk in df.reader():
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'send_put chunk'):
self.connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
def disconnect(self):
"""
Closes down the connection to the object server once done
with the SSYNC request.
"""
try:
with exceptions.MessageTimeout(
self.daemon.node_timeout, 'disconnect'):
self.connection.send('0\r\n\r\n')
except (Exception, exceptions.Timeout):
pass # We're okay with the above failing.
self.connection.close()
| apache-2.0 | -4,739,404,034,470,820,000 | -3,987,104,821,990,623,700 | 43.461111 | 79 | 0.547982 | false |
maurerpe/FreeCAD | src/Mod/Ship/shipAreasCurve/__init__.py | 60 | 1809 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2016 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import TaskPanel
def load():
""" Loads the tool """
TaskPanel.createTask()
| lgpl-2.1 | -6,251,038,228,516,936,000 | -480,892,241,442,482,300 | 60.37931 | 76 | 0.385296 | false |
cctaylor/googleads-python-lib | examples/dfp/v201408/custom_field_service/create_custom_fields.py | 4 | 2145 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomFieldService.createCustomFields
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201408')
# Create custom field objects.
custom_fields = [
{
'name': 'Customer comments #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'STRING',
'visibility': 'FULL'
}, {
'name': 'Internal approval status #%s' % uuid.uuid4(),
'entityType': 'LINE_ITEM',
'dataType': 'DROP_DOWN',
'visibility': 'FULL'
}
]
# Add custom fields.
custom_fields = custom_field_service.createCustomFields(custom_fields)
# Display results.
for custom_field in custom_fields:
print ('Custom field with ID \'%s\' and name \'%s\' was created.'
% (custom_field['id'], custom_field['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 | 5,263,476,237,174,868,000 | -5,772,091,983,416,200,000 | 30.086957 | 77 | 0.684848 | false |
bdero/edx-platform | cms/djangoapps/contentstore/features/course-export.py | 11 | 2452 | # pylint: disable=C0111
# pylint: disable=W0621
# pylint: disable=W0613
from lettuce import world, step
from component_settings_editor_helpers import enter_xml_in_advanced_problem
from nose.tools import assert_true, assert_equal
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from contentstore.utils import reverse_usage_url
@step('I go to the export page$')
def i_go_to_the_export_page(step):
world.click_tools()
link_css = 'li.nav-course-tools-export a'
world.css_click(link_css)
@step('I export the course$')
def i_export_the_course(step):
step.given('I go to the export page')
world.css_click('a.action-export')
@step('I edit and enter bad XML$')
def i_enter_bad_xml(step):
enter_xml_in_advanced_problem(step,
"""<problem><h1>Smallest Canvas</h1>
<p>You want to make the smallest canvas you can.</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice>
<choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice>
</choicegroup>
</multiplechoiceresponse>
</problem>"""
)
@step('I edit and enter an ampersand$')
def i_enter_an_ampersand(step):
enter_xml_in_advanced_problem(step, "<problem>&</problem>")
@step('I get an error dialog$')
def get_an_error_dialog(step):
assert_true(world.is_css_present("div.prompt.error"))
@step('I can click to go to the unit with the error$')
def i_click_on_error_dialog(step):
world.click_link_by_text('Correct failed component')
assert_true(world.css_html("span.inline-error").startswith("Problem i4x://MITx/999/problem"))
course_key = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
# we don't know the actual ID of the vertical. So just check that we did go to a
# vertical page in the course (there should only be one).
vertical_usage_key = course_key.make_usage_key("vertical", None)
vertical_url = reverse_usage_url('container_handler', vertical_usage_key)
# Remove the trailing "/None" from the URL - we don't know the course ID, so we just want to
# check that we visited a vertical URL.
if vertical_url.endswith("/None"):
vertical_url = vertical_url[:-5]
assert_equal(1, world.browser.url.count(vertical_url))
| agpl-3.0 | 5,369,503,969,909,592,000 | -1,526,953,048,484,495,400 | 37.920635 | 124 | 0.680669 | false |
camptocamp/ngo-addons-backport | addons/l10n_es/__init__.py | 100 | 1273 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2007 ACYSOS S.L. (http://acysos.com) All Rights Reserved.
# Pedro Tarrafeta (pedro@acysos.com)
# Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,294,771,143,902,993,000 | 3,675,230,359,691,472,400 | 44.464286 | 91 | 0.600943 | false |
vincentchoqueuse/parametrix | parametrix/bayesian_linear_model/signal_models.py | 1 | 1666 | from parametrix.core.signal_models import BaseSignalModel
import numpy as np
import scipy.linalg as lg
from scipy.stats import multivariate_normal
class M_Bayesian_L(BaseSignalModel):
""" Bayesian Linear Signal Model (Eq 10.26)[KAY93]_
.. math::
\\textbf{y}=\\textbf{H}\\textbf{x}+\\textbf{b}
where *x* is a p * 1 random vector with prior PDF N(m_x,C_x) and b is a random vector with PDF N(0,C_w)
.. [KAY93] Kay, Steven M. "Fundamentals of statistical signal processing: estimation theory." (1993).
"""
name="M_Bayesian_L"
def __init__(self,H,m_x,C_x,C_w,x=None,name="M_Bayesian_L"):
self.H=H
self.m_x=m_x
self.C_x=C_x
self.C_w=C_w
self.N=H.shape[0]
self.L=self.H.shape[1]
self.name=name
if isinstance(x, np.ndarray):
self.x=x
else:
self.x=m_x
def compute_sigma2s(self):
return np.trace(self.C_x)/self.C_x.shape[0]
@property
def sigma2(self):
return np.trace(self.C_w)/self.C_w.shape[0]
def __setattr__(self, attribute, value):
if attribute =="SNR":
sigma2=self.get_sigma2s()/(10**(value/10))
self.C_w=(sigma2/self.sigma2)*self.C_w
else:
super().__setattr__(attribute, value)
@property
def S(self):
self.x=multivariate_normal.rvs(mean=self.m_x,cov=self.C_x)
H=np.matrix(self.H)
x=np.matrix(self.x).T
return H*x
def rvs(self):
b=np.matrix(multivariate_normal.rvs(cov=self.C_w)).T
return self.S+b
| bsd-3-clause | 2,467,208,576,941,098,500 | -4,262,574,495,870,573,000 | 27.237288 | 111 | 0.553421 | false |
vipul-sharma20/oh-mainline | vendor/packages/celery/celery/worker/buckets.py | 18 | 11077 | # -*- coding: utf-8 -*-
"""
celery.worker.buckets
~~~~~~~~~~~~~~~~~~~~~
This module implements the rate limiting of tasks,
by having a token bucket queue for each task type.
When a task is allowed to be processed it's moved
over the the ``ready_queue``
The :mod:`celery.worker.mediator` is then responsible
for moving tasks from the ``ready_queue`` to the worker pool.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import threading
from collections import deque
from time import time, sleep
from Queue import Queue, Empty
from ..datastructures import TokenBucket
from ..utils import timeutils
from ..utils.compat import zip_longest, chain_from_iterable
class RateLimitExceeded(Exception):
"""The token buckets rate limit has been exceeded."""
class TaskBucket(object):
"""This is a collection of token buckets, each task type having
its own token bucket. If the task type doesn't have a rate limit,
it will have a plain :class:`~Queue.Queue` object instead of a
:class:`TokenBucketQueue`.
The :meth:`put` operation forwards the task to its appropriate bucket,
while the :meth:`get` operation iterates over the buckets and retrieves
the first available item.
Say we have three types of tasks in the registry: `celery.ping`,
`feed.refresh` and `video.compress`, the TaskBucket will consist
of the following items::
{"celery.ping": TokenBucketQueue(fill_rate=300),
"feed.refresh": Queue(),
"video.compress": TokenBucketQueue(fill_rate=2)}
The get operation will iterate over these until one of the buckets
is able to return an item. The underlying datastructure is a `dict`,
so the order is ignored here.
:param task_registry: The task registry used to get the task
type class for a given task name.
"""
def __init__(self, task_registry):
self.task_registry = task_registry
self.buckets = {}
self.init_with_registry()
self.immediate = deque()
self.mutex = threading.Lock()
self.not_empty = threading.Condition(self.mutex)
def put(self, request):
"""Put a :class:`~celery.worker.job.TaskRequest` into
the appropiate bucket."""
with self.mutex:
if request.task_name not in self.buckets:
self.add_bucket_for_type(request.task_name)
self.buckets[request.task_name].put_nowait(request)
self.not_empty.notify()
put_nowait = put
def _get_immediate(self):
try:
return self.immediate.popleft()
except IndexError:
raise Empty()
def _get(self):
# If the first bucket is always returning items, we would never
# get to fetch items from the other buckets. So we always iterate over
# all the buckets and put any ready items into a queue called
# "immediate". This queue is always checked for cached items first.
try:
return 0, self._get_immediate()
except Empty:
pass
remaining_times = []
for bucket in self.buckets.values():
remaining = bucket.expected_time()
if not remaining:
try:
# Just put any ready items into the immediate queue.
self.immediate.append(bucket.get_nowait())
except Empty:
pass
except RateLimitExceeded:
remaining_times.append(bucket.expected_time())
else:
remaining_times.append(remaining)
# Try the immediate queue again.
try:
return 0, self._get_immediate()
except Empty:
if not remaining_times:
# No items in any of the buckets.
raise
# There's items, but have to wait before we can retrieve them,
# return the shortest remaining time.
return min(remaining_times), None
def get(self, block=True, timeout=None):
"""Retrive the task from the first available bucket.
Available as in, there is an item in the queue and you can
consume tokens from it.
"""
time_start = time()
did_timeout = lambda: timeout and time() - time_start > timeout
with self.not_empty:
while True:
try:
remaining_time, item = self._get()
except Empty:
if not block or did_timeout():
raise
self.not_empty.wait(timeout)
continue
if remaining_time:
if not block or did_timeout():
raise Empty()
sleep(min(remaining_time, timeout or 1))
else:
return item
def get_nowait(self):
return self.get(block=False)
def init_with_registry(self):
"""Initialize with buckets for all the task types in the registry."""
for task in self.task_registry.keys():
self.add_bucket_for_type(task)
def refresh(self):
"""Refresh rate limits for all task types in the registry."""
for task in self.task_registry.keys():
self.update_bucket_for_type(task)
def get_bucket_for_type(self, task_name):
"""Get the bucket for a particular task type."""
if task_name not in self.buckets:
return self.add_bucket_for_type(task_name)
return self.buckets[task_name]
def _get_queue_for_type(self, task_name):
bucket = self.buckets[task_name]
if isinstance(bucket, TokenBucketQueue):
return bucket.queue
return bucket
def update_bucket_for_type(self, task_name):
task_type = self.task_registry[task_name]
rate_limit = getattr(task_type, "rate_limit", None)
rate_limit = timeutils.rate(rate_limit)
task_queue = FastQueue()
if task_name in self.buckets:
task_queue = self._get_queue_for_type(task_name)
else:
task_queue = FastQueue()
if rate_limit:
task_queue = TokenBucketQueue(rate_limit, queue=task_queue)
self.buckets[task_name] = task_queue
return task_queue
def add_bucket_for_type(self, task_name):
"""Add a bucket for a task type.
Will read the tasks rate limit and create a :class:`TokenBucketQueue`
if it has one. If the task doesn't have a rate limit
:class:`FastQueue` will be used instead.
"""
if task_name not in self.buckets:
return self.update_bucket_for_type(task_name)
def qsize(self):
"""Get the total size of all the queues."""
return sum(bucket.qsize() for bucket in self.buckets.values())
def empty(self):
"""Returns :const:`True` if all of the buckets are empty."""
return all(bucket.empty() for bucket in self.buckets.values())
def clear(self):
"""Delete the data in all of the buckets."""
for bucket in self.buckets.values():
bucket.clear()
@property
def items(self):
"""Flattens the data in all of the buckets into a single list."""
# for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)]
# zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8]
return filter(None, chain_from_iterable(zip_longest(*[bucket.items
for bucket in self.buckets.values()])))
class FastQueue(Queue):
""":class:`Queue.Queue` supporting the interface of
:class:`TokenBucketQueue`."""
def clear(self):
return self.queue.clear()
def expected_time(self, tokens=1):
return 0
def wait(self, block=True):
return self.get(block=block)
@property
def items(self):
return self.queue
class TokenBucketQueue(object):
"""Queue with rate limited get operations.
This uses the token bucket algorithm to rate limit the queue on get
operations.
:param fill_rate: The rate in tokens/second that the bucket will
be refilled.
:keyword capacity: Maximum number of tokens in the bucket.
Default is 1.
"""
RateLimitExceeded = RateLimitExceeded
def __init__(self, fill_rate, queue=None, capacity=1):
self._bucket = TokenBucket(fill_rate, capacity)
self.queue = queue
if not self.queue:
self.queue = Queue()
def put(self, item, block=True):
"""Put an item onto the queue."""
self.queue.put(item, block=block)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
:raises Queue.Full: If a free slot is not immediately available.
"""
return self.put(item, block=False)
def get(self, block=True):
"""Remove and return an item from the queue.
:raises RateLimitExceeded: If a token could not be consumed from the
token bucket (consuming from the queue
too fast).
:raises Queue.Empty: If an item is not immediately available.
"""
get = block and self.queue.get or self.queue.get_nowait
if not block and not self.items:
raise Empty()
if not self._bucket.can_consume(1):
raise RateLimitExceeded()
return get()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
:raises RateLimitExceeded: If a token could not be consumed from the
token bucket (consuming from the queue
too fast).
:raises Queue.Empty: If an item is not immediately available.
"""
return self.get(block=False)
def qsize(self):
"""Returns the size of the queue."""
return self.queue.qsize()
def empty(self):
"""Returns :const:`True` if the queue is empty."""
return self.queue.empty()
def clear(self):
"""Delete all data in the queue."""
return self.items.clear()
def wait(self, block=False):
"""Wait until a token can be retrieved from the bucket and return
the next item."""
get = self.get
expected_time = self.expected_time
while 1:
remaining = expected_time()
if not remaining:
return get(block=block)
sleep(remaining)
def expected_time(self, tokens=1):
"""Returns the expected time in seconds of when a new token should be
available."""
if not self.items:
return 0
return self._bucket.expected_time(tokens)
@property
def items(self):
"""Underlying data. Do not modify."""
return self.queue.queue
| agpl-3.0 | -7,292,107,619,219,128,000 | -4,543,761,833,413,395,000 | 31.967262 | 78 | 0.591857 | false |
benfinke/ns_python | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbmonbindings_binding.py | 3 | 3899 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbmonbindings_binding(base_resource):
""" Binding class showing the resources that can be bound to lbmonbindings_binding.
"""
def __init__(self) :
self._monitorname = ""
self.lbmonbindings_servicegroup_binding = []
self.lbmonbindings_service_binding = []
@property
def monitorname(self) :
ur"""The name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
ur"""The name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def lbmonbindings_service_bindings(self) :
ur"""service that can be bound to lbmonbindings.
"""
try :
return self._lbmonbindings_service_binding
except Exception as e:
raise e
@property
def lbmonbindings_servicegroup_bindings(self) :
ur"""servicegroup that can be bound to lbmonbindings.
"""
try :
return self._lbmonbindings_servicegroup_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonbindings_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonbindings_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.monitorname is not None :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, monitorname) :
ur""" Use this API to fetch lbmonbindings_binding resource.
"""
try :
if type(monitorname) is not list :
obj = lbmonbindings_binding()
obj.monitorname = monitorname
response = obj.get_resource(service)
else :
if monitorname and len(monitorname) > 0 :
obj = [lbmonbindings_binding() for _ in range(len(monitorname))]
for i in range(len(monitorname)) :
obj[i].monitorname = monitorname[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class lbmonbindings_binding_response(base_response) :
def __init__(self, length=1) :
self.lbmonbindings_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonbindings_binding = [lbmonbindings_binding() for _ in range(length)]
| apache-2.0 | -3,999,676,108,951,094,000 | -5,172,060,989,213,303,000 | 30.443548 | 123 | 0.714286 | false |
espadrine/opera | chromium/src/tools/grit/grit/tool/postprocess_unittest.py | 61 | 1851 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test that checks postprocessing of files.
Tests postprocessing by having the postprocessor
modify the grd data tree, changing the message name attributes.
'''
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import grit.tool.postprocess_interface
from grit.tool import rc2grd
class PostProcessingUnittest(unittest.TestCase):
def testPostProcessing(self):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor'
result = tool.Process(rctext, '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1')
self.failUnless(
result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2')
class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
'''
Post processing replaces all message name attributes containing "DUMMY" to
"SMART".
'''
def Process(self, rctext, rcpath, grdnode):
smarter = re.compile(r'(DUMMY)(.*)')
messages = grdnode.children[2].children[2]
for node in messages.children:
name_attr = node.attrs['name']
m = smarter.search(name_attr)
if m:
node.attrs['name'] = 'SMART' + m.group(2)
return grdnode
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -2,467,759,613,336,688,600 | -1,408,105,574,501,049,000 | 28.380952 | 83 | 0.681253 | false |
milwaukeegdi/milwaukeegdi.github.io | intro/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| mit | -5,818,770,755,195,642,000 | -1,102,749,538,477,993,300 | 40.099453 | 80 | 0.64924 | false |
Glf9832/AjkSpider | AjkSpider/spiders/AjkZsSpider.py | 1 | 2002 | # -*- coding: utf-8 -*-
import scrapy
from AjkSpider.items import AjkZsspiderItem
class AjkzsspiderSpider(scrapy.Spider):
name = "AjkZsSpider"
allowed_domains = ["zs.fang.anjuke.com"]
start_urls = ['http://zs.fang.anjuke.com/']
def parse(self, response):
subSelector = response.xpath('//div[@class="item-mod"]')
# items = []
for sub in subSelector:
item = AjkZsspiderItem()
if sub.xpath('./div[@class="infos"]/div[@class="lp-name"]/h3/a/text()') == []:
item['loupanname'] = [u'无']
else:
item['loupanname'] = sub.xpath('./div[@class="infos"]/div[@class="lp-name"]/h3/a/text()').extract()
if sub.xpath('./div[@class="infos"]/div[@class="lp-name"]/h3/a/@href') == []:
item['url'] = [u'无']
else:
item['url'] = sub.xpath('./div[@class="infos"]/div[@class="lp-name"]/h3/a/@href').extract()
if sub.xpath('./div[@class="infos"]/p[@class="address"]/a/text()') == []:
item['address'] = [u'无']
else:
item['address'] = sub.xpath('./div[@class="infos"]/p[@class="address"]/a/text()').extract()
if sub.xpath('./div[@class="infos"]/div[@class="data-brief"]/a/text()') == []:
item['brief'] = [u'暂无动态']
else:
item['brief'] = sub.xpath('./div[@class="infos"]/div[@class="data-brief"]/a/text()').extract()
if sub.xpath('./div[@class="favor-pos"]/p[@class="price"]/span/text()') == []:
item['price'] = [u'0']
else:
item['price'] = sub.xpath('./div[@class="favor-pos"]/p[@class="price"]/span/text()').extract()
# items.append(item)
yield item
nextpage = response.xpath('//div[@class="list-page"]/div[@class="pagination"]/a[@class="next-page next-link"]/@href')[0].extract()
yield scrapy.Request(url=nextpage,callback=self.parse)
| gpl-3.0 | -3,502,293,404,289,637,000 | -9,184,260,195,879,288,000 | 44.181818 | 138 | 0.515091 | false |
NightKev/fireplace | fireplace/managers.py | 2 | 7529 | from hearthstone.enums import GameTag
from . import enums
class Manager(object):
def __init__(self, obj):
self.obj = obj
self.observers = []
def __getitem__(self, tag):
if self.map.get(tag):
return getattr(self.obj, self.map[tag], 0)
raise KeyError
def __setitem__(self, tag, value):
setattr(self.obj, self.map[tag], value)
def __iter__(self):
for k in self.map:
if self.map[k]:
yield k
def get(self, k, default=None):
return self[k] if k in self.map else default
def items(self):
for k, v in self.map.items():
if v is not None:
yield k, self[k]
def register(self, observer):
self.observers.append(observer)
def update(self, tags):
for k, v in tags.items():
if self.map.get(k) is not None:
self[k] = v
class GameManager(Manager):
map = {
GameTag.CARDTYPE: "type",
GameTag.NEXT_STEP: "next_step",
GameTag.NUM_MINIONS_KILLED_THIS_TURN: "minions_killed_this_turn",
GameTag.PROPOSED_ATTACKER: "proposed_attacker",
GameTag.PROPOSED_DEFENDER: "proposed_defender",
GameTag.STATE: "state",
GameTag.STEP: "step",
GameTag.TURN: "turn",
GameTag.ZONE: "zone",
}
def __init__(self, obj):
super().__init__(obj)
self.counter = 1
obj.entity_id = self.counter
def action_start(self, type, source, index, target):
for observer in self.observers:
observer.action_start(type, source, index, target)
def action_end(self, type, source):
for observer in self.observers:
observer.action_end(type, source)
def new_entity(self, entity):
self.counter += 1
entity.entity_id = self.counter
for observer in self.observers:
observer.new_entity(entity)
def start_game(self):
for observer in self.observers:
observer.start_game()
def step(self, step, next_step=None):
for observer in self.observers:
observer.game_step(step, next_step)
self.obj.step = step
if next_step is not None:
self.obj.next_step = next_step
def turn(self, player):
for observer in self.observers:
observer.turn(player)
class BaseObserver:
def action_start(self, type, source, index, target):
pass
def action_end(self, type, source):
pass
def game_step(self, step, next_step):
pass
def new_entity(self, entity):
pass
def start_game():
pass
def turn(self, player):
pass
class PlayerManager(Manager):
map = {
GameTag.CANT_DRAW: "cant_draw",
GameTag.CARDTYPE: "type",
GameTag.COMBO_ACTIVE: "combo",
GameTag.CONTROLLER: "controller",
GameTag.CURRENT_PLAYER: "current_player",
GameTag.CURRENT_SPELLPOWER: "spellpower",
GameTag.EMBRACE_THE_SHADOW: "healing_as_damage",
GameTag.FATIGUE: "fatigue_counter",
GameTag.FIRST_PLAYER: "first_player",
GameTag.HEALING_DOUBLE: "healing_double",
GameTag.HERO_ENTITY: "hero",
GameTag.LAST_CARD_PLAYED: "last_card_played",
GameTag.MAXHANDSIZE: "max_hand_size",
GameTag.MAXRESOURCES: "max_resources",
GameTag.NUM_CARDS_DRAWN_THIS_TURN: "cards_drawn_this_turn",
GameTag.NUM_CARDS_PLAYED_THIS_TURN: "cards_played_this_turn",
GameTag.NUM_MINIONS_PLAYED_THIS_TURN: "minions_played_this_turn",
GameTag.NUM_MINIONS_PLAYER_KILLED_THIS_TURN: "minions_killed_this_turn",
GameTag.NUM_TIMES_HERO_POWER_USED_THIS_GAME: "times_hero_power_used_this_game",
GameTag.OVERLOAD_LOCKED: "overload_locked",
GameTag.OVERLOAD_OWED: "overloaded",
GameTag.PLAYSTATE: "playstate",
GameTag.RESOURCES: "max_mana",
GameTag.RESOURCES_USED: "used_mana",
GameTag.SPELLPOWER_DOUBLE: "spellpower_double",
GameTag.STARTHANDSIZE: "start_hand_size",
GameTag.HERO_POWER_DOUBLE: "hero_power_double",
GameTag.TEMP_RESOURCES: "temp_mana",
GameTag.TIMEOUT: "timeout",
GameTag.TURN_START: "turn_start",
enums.CANT_OVERLOAD: "cant_overload",
}
CARD_ATTRIBUTE_MAP = {
GameTag.ADJACENT_BUFF: "adjacent_buff",
GameTag.ARMOR: "armor",
GameTag.ATK: "atk",
GameTag.ATTACKING: "attacking",
GameTag.ATTACHED: "owner",
GameTag.AURA: "aura",
GameTag.BATTLECRY: "has_battlecry",
GameTag.CANNOT_ATTACK_HEROES: "cannot_attack_heroes",
GameTag.CANT_ATTACK: "cant_attack",
GameTag.CANT_BE_ATTACKED: "cant_be_attacked",
GameTag.CANT_BE_DAMAGED: "cant_be_damaged",
GameTag.CANT_BE_TARGETED_BY_ABILITIES: "cant_be_targeted_by_abilities",
GameTag.CANT_BE_TARGETED_BY_HERO_POWERS: "cant_be_targeted_by_hero_powers",
GameTag.CANT_BE_TARGETED_BY_OPPONENTS: "cant_be_targeted_by_opponents",
GameTag.CANT_PLAY: "cant_play",
GameTag.CARD_ID: "id",
GameTag.CARD_TARGET: "target",
GameTag.CARDNAME: "name",
GameTag.CARDRACE: "race",
GameTag.CARDTYPE: "type",
GameTag.CHARGE: "charge",
GameTag.CLASS: "card_class",
GameTag.COMBO: "has_combo",
GameTag.CONTROLLER: "controller",
GameTag.COST: "cost",
GameTag.CREATOR: "creator",
GameTag.DAMAGE: "damage",
GameTag.DEATHRATTLE: "has_deathrattle",
GameTag.DEFENDING: "defending",
GameTag.DIVINE_SHIELD: "divine_shield",
GameTag.DURABILITY: "max_durability",
GameTag.EMBRACE_THE_SHADOW: "healing_as_damage",
GameTag.ENRAGED: "enrage",
GameTag.EXHAUSTED: "exhausted",
GameTag.EXTRA_DEATHRATTLES: "extra_deathrattles",
GameTag.FORGETFUL: "forgetful",
GameTag.FROZEN: "frozen",
GameTag.HEALING_DOUBLE: "healing_double",
GameTag.HEALTH: "max_health",
GameTag.HEALTH_MINIMUM: "min_health",
GameTag.HEAVILY_ARMORED: "heavily_armored",
GameTag.HEROPOWER_ADDITIONAL_ACTIVATIONS: "additional_activations",
GameTag.HEROPOWER_DAMAGE: "heropower_damage",
GameTag.INCOMING_DAMAGE_MULTIPLIER: "incoming_damage_multiplier",
GameTag.ImmuneToSpellpower: "immune_to_spellpower",
GameTag.IMMUNE_WHILE_ATTACKING: "immune_while_attacking",
GameTag.INSPIRE: "has_inspire",
GameTag.NUM_ATTACKS_THIS_TURN: "num_attacks",
GameTag.NUM_TURNS_IN_PLAY: "turns_in_play",
GameTag.TAG_ONE_TURN_EFFECT: "one_turn_effect",
GameTag.OVERLOAD: "overload",
GameTag.PARENT_CARD: "parent_card",
GameTag.POISONOUS: "poisonous",
GameTag.POWERED_UP: "powered_up",
GameTag.RARITY: "rarity",
GameTag.RECEIVES_DOUBLE_SPELLDAMAGE_BONUS: "receives_double_spelldamage_bonus",
GameTag.SECRET: "secret",
GameTag.SHADOWFORM: "shadowform",
GameTag.SHOULDEXITCOMBAT: "should_exit_combat",
GameTag.SILENCED: "silenced",
GameTag.SPELLPOWER: "spellpower",
GameTag.SPELLPOWER_DOUBLE: "spellpower_double",
GameTag.SPELLS_COST_HEALTH: "spells_cost_health",
GameTag.STEALTH: "stealthed",
GameTag.TAG_AI_MUST_PLAY: "autocast",
GameTag.HERO_POWER_DOUBLE: "hero_power_double",
GameTag.TAUNT: "taunt",
GameTag.WINDFURY: "windfury",
GameTag.ZONE: "zone",
GameTag.ZONE_POSITION: "zone_position",
enums.ALWAYS_WINS_BRAWLS: "always_wins_brawls",
enums.EXTRA_BATTLECRIES: "extra_battlecries",
enums.KILLED_THIS_TURN: "killed_this_turn",
GameTag.AFFECTED_BY_SPELL_POWER: None,
GameTag.ARTISTNAME: None,
GameTag.AttackVisualType: None,
GameTag.CARD_SET: None,
GameTag.CARDTEXT_INHAND: None,
GameTag.CardTextInPlay: None,
GameTag.Collectible: None,
GameTag.DevState: None,
GameTag.ELITE: None,
GameTag.ENCHANTMENT_IDLE_VISUAL: None,
GameTag.ENCHANTMENT_BIRTH_VISUAL: None,
GameTag.EVIL_GLOW: None,
GameTag.FACTION: None,
GameTag.FLAVORTEXT: None,
GameTag.FREEZE: None,
GameTag.HealTarget: None,
GameTag.HIDE_COST: None,
GameTag.HOW_TO_EARN: None,
GameTag.HOW_TO_EARN_GOLDEN: None,
GameTag.InvisibleDeathrattle: None,
GameTag.MORPH: None,
GameTag.SILENCE: None,
GameTag.SUMMONED: None,
GameTag.SPARE_PART: None,
GameTag.SHOWN_HERO_POWER: None,
GameTag.TARGETING_ARROW_TEXT: None,
GameTag.TOPDECK: None,
GameTag.TAG_AI_MUST_PLAY: None,
GameTag.TRIGGER_VISUAL: None,
}
class CardManager(Manager):
map = CARD_ATTRIBUTE_MAP
| agpl-3.0 | 4,870,430,410,606,891,000 | -6,812,337,924,634,071,000 | 28.996016 | 81 | 0.727587 | false |
MKuranowski/WarsawGTFS | static/downloader.py | 1 | 11293 | from dataclasses import dataclass
from datetime import date, datetime, timedelta
from operator import itemgetter
from typing import Dict, List, Optional, Set, Tuple
from pytz import timezone
import libarchive.public
import logging
import ftplib
import json
import re
import os
from .const import DIR_DOWNLOAD, DIR_CONVERTED, FTP_ADDR
from .util import ensure_dir_exists
"""
Module containg code responsible for synchornising feeds with the FTP server.
Calculates which feeds need to be converted, gets and decompresses required files.
Information about specific feeds is passed around with the FileInfo objects.
Main functionality is exposed in the sync_feeds() and append_modtimes() procedures.
"""
_logger = logging.getLogger("WarsawGTFS.downloader")
@dataclass
class FileInfo:
"""Info about a source file"""
__slots__ = ("path", "version", "modtime", "start", "end", "is_converted")
path: str
version: str
modtime: str
start: date
end: date
is_converted: bool
def read_modtimes() -> Dict[str, str]:
"""Reads {DIR_CONVERTED}/modtimes.json to determine modtimes of currently converted files.
"""
modtimes_file = os.path.join(DIR_CONVERTED, "modtimes.json")
# File doesn't exist: no known files
if not os.path.isfile(modtimes_file):
return {}
with open(str(modtimes_file), mode="r") as f:
return json.load(f)
def write_modtimes(x: Dict[str, str]):
"""Writes new content to the {DIR_CONVERTED}/modtimes.json"""
with open(os.path.join(DIR_CONVERTED, "modtimes.json"), "w") as f:
json.dump(x, f, indent=2)
def append_modtimes(new: FileInfo):
"""Once a converted feed has been written to {DIR_CONVERTED}, call this function.
Adds info about just converted feed to {DIR_CONVERTED}/modtimes.json/
"""
c = read_modtimes()
c[new.version] = new.modtime
write_modtimes(c)
def which_versions_ok(required_feeds: List[FileInfo],
current_modtimes: Dict[str, str]) -> Set[str]:
"""Returns a set of versions which don't need to bee re-converted."""
ok_versions: Set[str] = set()
for i in required_feeds:
curr_modtime = current_modtimes.get(i.version, "")
if curr_modtime == i.modtime:
ok_versions.add(i.version)
return ok_versions
def remove_unused_converted(versions_ok: Set[str], curr_modtimes: Dict[str, str]):
"""Removes outdated and unnecessary feeds from {DIR_CONVERTED},
updates {DIR_CONVERTED}/modtimes.json
"""
_logger.info("removing unnecessary files from DIR_CONVERTED")
# Calculate which files to remove
all_files = set(os.listdir(DIR_CONVERTED))
expected_files = {"modtimes.json"}
expected_files.update(i + ".zip" for i in versions_ok)
unexpected_files = all_files.difference(expected_files)
# Schedule removal of unexpected files
for f in unexpected_files:
os.remove(os.path.join(DIR_CONVERTED, f))
# Remove versions from curr_modtimes
new_modtimes = {k: v for k, v in curr_modtimes.items() if k in versions_ok}
write_modtimes(new_modtimes)
def list_files(ftp: ftplib.FTP, max_files: int = 5,
start_date: Optional[date] = None) -> List[FileInfo]:
"""Lists all files required to create a valid feed.
At most {max_files} will be returned (defaults to 5).
Required files are evaulated starting from start_date, which defaults to 'today' in Warsaw.
"""
_logger.info("calculating required files")
files = ftp.mlsd()
fn_match = re.compile(r"^RA\d{6}\.7z")
# Ignore non-schedule files & sort files by date
files = sorted(
filter(lambda i: fn_match.fullmatch(str(i[0])), files),
key=itemgetter(0)
)
# User hasn't specified when the feed should be valid: start from 'today' (in Warsaw)
if start_date is None:
start_date = datetime.now(timezone("Europe/Warsaw")).date()
active_files: List[FileInfo] = []
# Check which files should be converted
for idx, (file_name, file_meta) in enumerate(files):
file_start = datetime.strptime(file_name, "RA%y%m%d.7z").date()
# Get last day when file is active (next file - 1 day)
try:
file_end = datetime.strptime(str(files[idx + 1][0]), "RA%y%m%d.7z").date()
file_end -= timedelta(days=1)
except IndexError:
file_end = date.max
# We don't need anything for previous dates
if file_end < start_date:
continue
active_files.append(FileInfo(
path=file_name, version=file_name[:-3], modtime=file_meta["modify"],
start=file_start, end=file_end, is_converted=False
))
# Limit files to max_files
active_files = active_files[:max_files]
# Last file shouldn't have an end_date
active_files[-1].end = date.max
return active_files
def list_single_file(ftp: ftplib.FTP, for_day: Optional[date] = None) -> FileInfo:
"""Returns FileInfo about file valid in the given day (or today in Warsaw)"""
# Variables from loop
file_name = ""
file_meta = {}
file_start = date.min
# List files from FTP
files = ftp.mlsd()
fn_match = re.compile(r"^RA\d{6}\.7z")
# Ignore non-schedule files & sort files by date
files = sorted(
filter(lambda i: fn_match.fullmatch(str(i[0])), files),
key=itemgetter(0)
)
# Ensure for_day is not None
if for_day is None:
for_day = datetime.now(timezone("Europe/Warsaw")).date()
# Iterate over files and to get one valid for `for_day`
max_idx = len(files) - 1
for idx, (file_name, file_meta) in enumerate(files):
file_start = datetime.strptime(file_name, "RA%y%m%d.7z").date()
# If user requested file before the very first file - raise an error
if idx == 0 and for_day < file_start:
raise FileNotFoundError(f"No files for day {for_day.strftime('%Y-%m-%d')}")
# Last file always matches
if idx != max_idx:
next_file_start = datetime.strptime(str(files[idx + 1][0]), "RA%y%m%d.7z").date()
# Next file starts past for_day, so current file matches - break out of the loop
if next_file_start > for_day:
break
# guard against no matches
if not file_name:
raise FileNotFoundError(f"Error matching files for day {for_day.strftime('%Y-%m-%d')}")
# file_path, file_start and file_meta now contain info about matched file
return FileInfo(
path=file_name, version=file_name[:-3], modtime=file_meta["modify"],
start=file_start, end=date.max, is_converted=False
)
def get_and_decompress(ftp: ftplib.FTP, i: FileInfo):
"""Requests given file from the FTP server, and decompresses the included TXT file.
Provided FileInfo object will be modified as such:
- finfo.path points to the decompressed txt file
- finfo.is_converted is False
"""
# Download the 7z file into DIR_DOWNLOAD
archive_local_path = os.path.join(DIR_DOWNLOAD, i.path)
txt_local_path = os.path.join(DIR_DOWNLOAD, (i.version + ".txt"))
_logger.debug(f"Downloading file for version {i.version}")
with open(archive_local_path, mode="wb") as f:
ftp.retrbinary("RETR " + str(i.path), f.write)
# Open the 7z file and decompress the txt file
_logger.debug(f"Decompressing file for version {i.version}")
with libarchive.public.file_reader(str(archive_local_path)) as arch:
# Iterate over each file inside the archive
for arch_file in arch:
name = arch_file.pathname.upper()
# Assert the file inside the archive is the TXT file we're looking for
if name != f"{i.version}.TXT":
continue
# Open the target txt file
with open(txt_local_path, mode="wb") as f:
# Decompress the TXT file block by block and save it to the reader
for block in arch_file.get_blocks():
f.write(block)
# only one TXT file should be inside the archive
break
else:
raise FileNotFoundError(f"no schedule file for ver {i.version!r} found inside "
f"archive {i.path!r}")
# Modify given FileInfo
i.path = txt_local_path
i.is_converted = False
# Remove downloaded archive
os.remove(archive_local_path)
def mark_as_converted(i: FileInfo):
"""Modifies given FileInfo object to mark it as already-converted.
finfo.is_converted will be Ture, and finfo.path will point to the GTFS .zip.
"""
i.path = os.path.join(DIR_CONVERTED, (i.version + ".zip"))
i.is_converted = True
def sync_files(max_files: int = 5, start_date: Optional[date] = None, reparse_all: bool = False) \
-> Tuple[List[FileInfo], bool]:
"""Manages required source feeds.
1. Lists required source feeds
2. Determines which feeds were already converted.
3. Removes non-required and outdated feeds
4. Downloads new/changed source feeds amd extarcts them.
Returns 2 values:
1. List of *all* required feeds
2. Whether any new files were downloaded
Please call append_modtimes for each sucessfuly converted file.
"""
# Ensure DIR_DOWNLOAD and DIR_CONVERTED exist
ensure_dir_exists(DIR_DOWNLOAD, clear=True)
ensure_dir_exists(DIR_CONVERTED, clear=False)
with ftplib.FTP(FTP_ADDR) as ftp:
ftp.login()
# List source feeds
required_feeds = list_files(ftp, max_files, start_date)
# Determine which feeeds were already converted
if not reparse_all:
current_modtimes = read_modtimes()
converted_versions = which_versions_ok(required_feeds, current_modtimes)
else:
current_modtimes = {}
converted_versions = set()
# Check if new files will be downloaded
new_files = sum(1 for i in required_feeds if i.version not in converted_versions)
# Clean-up {DIR_CONVERTED}
if new_files > 0:
_logger.info(f"new files to download&convert: {new_files}")
remove_unused_converted(converted_versions, current_modtimes)
else:
_logger.info("no new files")
# Download txt files or mark feeds as converted
for i in required_feeds:
if i.version in converted_versions:
mark_as_converted(i)
else:
get_and_decompress(ftp, i)
return required_feeds, (new_files > 0)
def sync_single_file(valid_day: Optional[date] = None) -> FileInfo:
"""Manages required feed to create schedules for given day.
Downloads and decompresses detected required file.
Returns the FileInfo object containing data bout the required feed.
Call append_modtimes after successfuly converting this feed.
"""
# Ensure DIR_DOWNLOAD and DIR_CONVERTED exist
ensure_dir_exists(DIR_DOWNLOAD, clear=True)
with ftplib.FTP(FTP_ADDR) as ftp:
ftp.login()
# Check which file to download
finfo = list_single_file(ftp, valid_day)
# Download said file
_logger.info(f"Downloading feed for {finfo.version}")
get_and_decompress(ftp, finfo)
return finfo
| mit | -827,972,517,933,055,700 | 2,077,003,163,513,820,200 | 33.117825 | 98 | 0.644736 | false |
nextgis/nextgisweb | nextgisweb/vector_layer/test/test_source_error.py | 1 | 6886 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from uuid import uuid4
import six
import pytest
from osgeo import ogr
from nextgisweb.models import DBSession
from nextgisweb.auth import User
from nextgisweb.compat import Path
from nextgisweb.core.exception import ValidationError
from nextgisweb.vector_layer import VectorLayer
from nextgisweb.spatial_ref_sys import SRS
path = Path(__file__).parent / 'data' / 'errors'
# List of creation test cases: file name, creation options, and final checks.
CREATE_TEST_PARAMS = (
(
'geom-collection.geojson',
dict(),
dict(exception=ValidationError),
),
(
'geom-collection.geojson',
dict(fix_errors='SAFE'),
dict(feature_count=2),
),
(
'incomplete-linestring.geojson',
dict(fix_errors='LOSSY'),
dict(exception=ValidationError),
),
(
'incomplete-linestring.geojson',
dict(skip_errors=True),
dict(feature_count=1)
),
(
'incomplete-polygon.geojson',
dict(fix_errors='LOSSY'),
dict(exception=ValidationError),
),
(
'mixed-feature-geom.geojson',
dict(geometry_type='POINT', skip_other_geometry_types=True),
dict(geometry_type='MULTIPOINT', feature_count=2),
),
(
# The second MULTIPOINT geometry must be converted to a SINGLE geometry.
# The first POINT should be taken in LOSSY mode.
'mixed-feature-geom.geojson',
dict(
geometry_type='POINT', skip_other_geometry_types=True,
fix_errors='LOSSY', is_multi=False),
dict(geometry_type='POINT', feature_count=2),
),
(
# The layer has only one LINESTRING geometry and it's valid.
'mixed-feature-geom.geojson',
dict(geometry_type='LINESTRING', skip_other_geometry_types=True),
dict(geometry_type='LINESTRING', feature_count=1),
),
(
'non-multi-geom.geojson',
dict(),
dict(geometry_type='MULTIPOINT', feature_count=2),
),
(
'null-geom.geojson',
dict(skip_other_geometry_types=True),
dict(geometry_type='POINT', feature_count=1),
),
(
# Geometries with topology errors are accepted.
'self-intersection.geojson',
dict(fix_errors=None),
dict(feature_count=1),
),
# (
# 'self-intersection.geojson',
# dict(fix_errors='LOSSY'),
# dict(geometry_type='POLYGON', feature_count=1),
# ),
(
'single-geom-collection.geojson',
dict(geometry_type='POINT', fix_errors='SAFE'),
dict(geometry_type='POINT', feature_count=1),
),
(
'single-geom-collection.geojson',
dict(geometry_type='POINT', skip_other_geometry_types=True),
dict(geometry_type='POINT', feature_count=0),
),
(
'single-geom-collection.geojson',
dict(geometry_type='POINT', fix_errors='LOSSSY'),
dict(geometry_type='POINT', feature_count=1),
),
(
'single-geom-collection.geojson',
dict(geometry_type='LINESTRING', fix_errors='SAFE'),
dict(exception=ValidationError),
),
(
'single-geom-collection.geojson',
dict(geometry_type='LINESTRING', fix_errors='LOSSY'),
dict(geometry_type='LINESTRING', feature_count=1),
),
(
# It's not possible to chose geometry type here.
'empty.geojson',
dict(),
dict(exception=ValidationError),
),
(
# An empty layer with MULTIPOINTZ must be created.
'empty.geojson',
dict(geometry_type='POINT', is_multi=True, has_z=True),
dict(geometry_type='MULTIPOINTZ', feature_count=0),
),
(
# The unclosed ring must be reported as an error.
'unclosed-ring.geojson',
dict(),
dict(exception=ValidationError),
),
(
# The unclosed ring must be closed in SAFE mode, QGIS does it sielently.
'unclosed-ring.geojson',
dict(fix_errors='SAFE'),
dict(feature_count=1),
),
(
# Just check loading of POINTZ layers.
'pointz.geojson',
dict(geometry_type='POINT'),
dict(geometry_type='POINTZ', feature_count=1),
),
(
# Explicit setting of geometry type.
'pointz.geojson',
dict(geometry_type='POINT', is_multi=False, has_z=True),
dict(geometry_type='POINTZ', feature_count=1),
),
(
# Z coordinate should be stripped here.
'pointz.geojson',
dict(geometry_type='POINT', has_z=False, fix_errors='LOSSY'),
dict(geometry_type='POINT', feature_count=1),
),
)
@pytest.mark.parametrize('filename, options, checks', CREATE_TEST_PARAMS)
def test_create(filename, options, checks, ngw_resource_group, ngw_txn):
obj = VectorLayer(
parent_id=ngw_resource_group, display_name='vector_layer',
owner_user=User.by_keyname('administrator'),
srs=SRS.filter_by(id=3857).one(),
tbl_uuid=six.text_type(uuid4().hex)
).persist()
src = str(path / filename)
ds = ogr.Open(src)
layer = ds.GetLayer(0)
geom_cast_params = dict(
geometry_type=options.get('geometry_type'),
is_multi=options.get('is_multi'),
has_z=options.get('has_z'))
def setup_and_load():
setup_kwargs = dict()
load_kwargs = dict()
if 'skip_other_geometry_types' in options:
setup_kwargs['skip_other_geometry_types'] = options['skip_other_geometry_types']
load_kwargs['skip_other_geometry_types'] = options['skip_other_geometry_types']
if 'fix_errors' in options:
load_kwargs['fix_errors'] = options['fix_errors']
if 'skip_errors' in options:
load_kwargs['skip_errors'] = options['skip_errors']
obj.setup_from_ogr(layer, geom_cast_params=geom_cast_params, **setup_kwargs)
obj.load_from_ogr(layer, **load_kwargs)
if 'exception' in checks:
with pytest.raises(checks['exception']):
setup_and_load()
DBSession.expunge(obj)
else:
setup_and_load()
DBSession.flush()
if 'geometry_type' in checks:
exp_geometry_type = checks['geometry_type']
assert obj.geometry_type == exp_geometry_type, \
"Expected geometry type was {} but actually got {}".format(
exp_geometry_type, obj.geometry_type)
if 'feature_count' in checks:
exp_feature_count = checks['feature_count']
query = obj.feature_query()
feature_count = query().total_count
assert feature_count == exp_feature_count, \
"Expected feature count was {} but got {}".format(
exp_feature_count, feature_count)
| gpl-3.0 | 1,569,897,126,478,394,000 | 669,306,744,465,341,600 | 29.334802 | 92 | 0.596573 | false |
glogiotatidis/compose | compose/container.py | 39 | 5882 | from __future__ import unicode_literals
from __future__ import absolute_import
import six
from functools import reduce
from .const import LABEL_CONTAINER_NUMBER, LABEL_SERVICE
class Container(object):
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + get_container_name(dictionary),
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id))
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:10]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def name_without_project(self):
return '{0}_{1}'.format(self.labels.get(LABEL_SERVICE), self.number)
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
return dict(var.split("=", 1) for var in self.get('Config.Env') or [])
@property
def is_running(self):
return self.get('State.Running')
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
# TODO: only used by tests, move to test module
def links(self):
links = []
for container in self.client.containers():
for name in container['Names']:
bits = name.split('/')
if len(bits) > 2 and bits[1] == self.name:
links.append(bits[2])
return links
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def attach_socket(self, **kwargs):
return self.client.attach_socket(self.id, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]
| apache-2.0 | 171,782,883,806,892,220 | 298,942,856,356,213,300 | 28.263682 | 78 | 0.591295 | false |
dsapandora/die_hard | diehard.py | 1 | 1608 | #!/usr/bin/env python
import roslib; roslib.load_manifest('DroneInteractiveEnviroment')
import rospy
import json
import requests
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from ardrone_autonomy.msg import Navdata
class DieHard(object):
def __init__(self):
self.twist = Twist()
rospy.init_node('DIEHARD')
self.pub = rospy.Publisher('/cmd_vel', Twist, latch=True)
rospy.Subscriber('/ardrone/navdata', Navdata, self.update_value)
self.twist.linear.x = 0; # our forward speed
self.twist.linear.y = 0;
self.twist.linear.z = 0; # we can't use these!
self.twist.angular.x = 0; self.twist.angular.y = 0; # or these!
self.twist.angular.z = 0;
def update_value(self, msg):
self.value = str(msg.vx) +" "+str(msg.vy)+" "+str(msg.vz)
response = requests.get('http://10.100.100.104:5000/am_gonna_die?longitude=%s&latitude=%s&elevation=%s' % (msg.vx, msg.vy, msg.vz))
rospy.loginfo(self.value)
value = json.loads(response.text)
rospy.loginfo(value)
rospy.loginfo("response:"+ str(value['distance'])+" "+str(value['latitude'])+" "+str(value['longitude']))
self.twist.linear.x = float(value['latitude']*float(value['distance']))
self.twist.linear.y = float(value['longitude']*float(value['distance']))
def run(self):
r = rospy.Rate(10)
while not rospy.is_shutdown():
self.pub.publish(self.twist)
r.sleep()
if __name__=="__main__":
DieHard().run()
| gpl-3.0 | 8,380,405,933,862,906,000 | 1,488,618,507,667,072,500 | 35.545455 | 139 | 0.603856 | false |
AndrewPeelMV/Blender2.78c | 2.78/scripts/addons/rigify/rigs/pitchipoy/limbs/super_arm.py | 2 | 47129 | import bpy, re
from ..super_widgets import create_hand_widget
from .ui import create_script
from .limb_utils import *
from mathutils import Vector
from ....utils import copy_bone, flip_bone, put_bone, create_cube_widget
from ....utils import strip_org, make_deformer_name, create_widget
from ....utils import create_circle_widget, create_sphere_widget
from ....utils import MetarigError, make_mechanism_name, org
from ....utils import create_limb_widget, connected_children_names
from rna_prop_ui import rna_idprop_ui_prop_get
from ..super_widgets import create_ikarrow_widget
from math import trunc
extra_script = """
controls = [%s]
ctrl = '%s'
if is_selected( controls ):
layout.prop( pose_bones[ ctrl ], '["%s"]')
if '%s' in pose_bones[ctrl].keys():
layout.prop( pose_bones[ ctrl ], '["%s"]', slider = True )
"""
class Rig:
def __init__(self, obj, bone_name, params):
""" Initialize arm rig and key rig properties """
self.obj = obj
self.params = params
self.org_bones = list(
[bone_name] + connected_children_names(obj, bone_name)
)[:3] # The basic limb is the first 3 bones
self.segments = params.segments
self.bbones = params.bbones
self.limb_type = params.limb_type
self.rot_axis = params.rotation_axis
# Assign values to tweak/FK layers props if opted by user
if params.tweak_extra_layers:
self.tweak_layers = list(params.tweak_layers)
else:
self.tweak_layers = None
if params.fk_extra_layers:
self.fk_layers = list(params.fk_layers)
else:
self.fk_layers = None
def create_parent(self):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
name = get_bone_name( strip_org( org_bones[0] ), 'mch', 'parent' )
mch = copy_bone( self.obj, org_bones[0], name )
orient_bone( self, eb[mch], 'y' )
eb[ mch ].length = eb[ org_bones[0] ].length / 4
eb[ mch ].parent = eb[ org_bones[0] ].parent
eb[ mch ].roll = 0.0
# Constraints
make_constraint( self, mch, {
'constraint' : 'COPY_ROTATION',
'subtarget' : 'root'
})
make_constraint( self, mch, {
'constraint' : 'COPY_SCALE',
'subtarget' : 'root'
})
# Limb Follow Driver
pb = self.obj.pose.bones
name = 'FK_limb_follow'
pb[ mch ][ name ] = 0.0
prop = rna_idprop_ui_prop_get( pb[ mch ], name, create = True )
prop["min"] = 0.0
prop["max"] = 1.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = name
drv = pb[ mch ].constraints[ 0 ].driver_add("influence").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = name
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = pb[ mch ].path_from_id() + \
'[' + '"' + name + '"' + ']'
return mch
def create_tweak(self):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
tweaks = {}
tweaks['ctrl'] = []
tweaks['mch' ] = []
# Create and parent mch and ctrl tweaks
for i,org in enumerate(org_bones):
#if (self.limb_type == 'paw'):
# idx_stop = len(org_bones)
#else:
# idx_stop = len(org_bones) - 1
if i < len(org_bones) - 1:
# if i < idx_stop:
# Create segments if specified
for j in range( self.segments ):
# MCH
name = get_bone_name( strip_org(org), 'mch', 'tweak' )
mch = copy_bone( self.obj, org, name )
# CTRL
name = get_bone_name( strip_org(org), 'ctrl', 'tweak' )
ctrl = copy_bone( self.obj, org, name )
eb[ mch ].length /= self.segments
eb[ ctrl ].length /= self.segments
# If we have more than one segments, place the head of the
# 2nd and onwards at the correct position
if j > 0:
put_bone(self.obj, mch, eb[ tweaks['mch' ][-1] ].tail)
put_bone(self.obj, ctrl, eb[ tweaks['ctrl'][-1] ].tail)
tweaks['ctrl'] += [ ctrl ]
tweaks['mch' ] += [ mch ]
# Parenting the tweak ctrls to mchs
eb[ mch ].parent = eb[ org ]
eb[ ctrl ].parent = eb[ mch ]
else: # Last limb bone - is not subdivided
name = get_bone_name( strip_org(org), 'mch', 'tweak' )
mch = copy_bone( self.obj, org_bones[i-1], name )
eb[ mch ].length = eb[org].length / 4
put_bone(
self.obj,
mch,
eb[org_bones[i-1]].tail
)
ctrl = get_bone_name( strip_org(org), 'ctrl', 'tweak' )
ctrl = copy_bone( self.obj, org, ctrl )
eb[ ctrl ].length = eb[org].length / 2
tweaks['mch'] += [ mch ]
tweaks['ctrl'] += [ ctrl ]
# Parenting the tweak ctrls to mchs
eb[ mch ].parent = eb[ org ]
eb[ ctrl ].parent = eb[ mch ]
# Scale to reduce widget size and maintain conventions!
for mch, ctrl in zip( tweaks['mch'], tweaks['ctrl'] ):
eb[ mch ].length /= 4
eb[ ctrl ].length /= 2
# Contraints
for i,b in enumerate( tweaks['mch'] ):
first = 0
middle = trunc( len( tweaks['mch'] ) / 2 )
last = len( tweaks['mch'] ) - 1
if i == first or i == middle:
make_constraint( self, b, {
'constraint' : 'COPY_SCALE',
'subtarget' : 'root'
})
elif i != last:
targets = []
dt_target_idx = middle
factor = 0
if i < middle:
targets = [first,middle]
else:
targets = [middle,last]
factor = self.segments
dt_target_idx = last
# Use copy transforms constraints to position each bone
# exactly in the location respective to its index (between
# the two edges)
make_constraint( self, b, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : tweaks['ctrl'][targets[0]]
})
make_constraint( self, b, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : tweaks['ctrl'][targets[1]],
'influence' : (i - factor) / self.segments
})
make_constraint( self, b, {
'constraint' : 'DAMPED_TRACK',
'subtarget' : tweaks['ctrl'][ dt_target_idx ],
})
# Ctrl bones Locks and Widgets
pb = self.obj.pose.bones
for t in tweaks['ctrl']:
pb[t].lock_rotation = True, False, True
pb[t].lock_scale = False, True, False
create_sphere_widget(self.obj, t, bone_transform_name=None)
if self.tweak_layers:
pb[t].bone.layers = self.tweak_layers
return tweaks
def create_def(self, tweaks):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
def_bones = []
for i,org in enumerate(org_bones):
if i < len(org_bones) - 1:
# Create segments if specified
for j in range( self.segments ):
name = get_bone_name( strip_org(org), 'def' )
def_name = copy_bone( self.obj, org, name )
eb[ def_name ].length /= self.segments
# If we have more than one segments, place the 2nd and
# onwards on the tail of the previous bone
if j > 0:
put_bone(self.obj, def_name, eb[ def_bones[-1] ].tail)
def_bones += [ def_name ]
else:
name = get_bone_name( strip_org(org), 'def' )
def_name = copy_bone( self.obj, org, name )
def_bones.append( def_name )
# Parent deform bones
for i,b in enumerate( def_bones ):
if i > 0: # For all bones but the first (which has no parent)
eb[b].parent = eb[ def_bones[i-1] ] # to previous
eb[b].use_connect = True
# Constraint def to tweaks
for d,t in zip(def_bones, tweaks):
tidx = tweaks.index(t)
make_constraint( self, d, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : t
})
if tidx != len(tweaks) - 1:
make_constraint( self, d, {
'constraint' : 'DAMPED_TRACK',
'subtarget' : tweaks[ tidx + 1 ],
})
make_constraint( self, d, {
'constraint' : 'STRETCH_TO',
'subtarget' : tweaks[ tidx + 1 ],
})
# Create bbone segments
for bone in def_bones[:-1]:
self.obj.data.bones[bone].bbone_segments = self.bbones
self.obj.data.bones[ def_bones[0] ].bbone_in = 0.0
self.obj.data.bones[ def_bones[-2] ].bbone_out = 0.0
self.obj.data.bones[ def_bones[-1] ].bbone_in = 0.0
self.obj.data.bones[ def_bones[-1] ].bbone_out = 0.0
# Rubber hose drivers
pb = self.obj.pose.bones
for i,t in enumerate( tweaks[1:-1] ):
# Create custom property on tweak bone to control rubber hose
name = 'rubber_tweak'
if i == trunc( len( tweaks[1:-1] ) / 2 ):
pb[t][name] = 0.0
else:
pb[t][name] = 1.0
prop = rna_idprop_ui_prop_get( pb[t], name, create=True )
prop["min"] = 0.0
prop["max"] = 2.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = name
for j,d in enumerate(def_bones[:-1]):
drvs = {}
if j != 0:
tidx = j
drvs[tidx] = self.obj.data.bones[d].driver_add("bbone_in").driver
if j != len( def_bones[:-1] ) - 1:
tidx = j + 1
drvs[tidx] = self.obj.data.bones[d].driver_add("bbone_out").driver
for d in drvs:
drv = drvs[d]
name = 'rubber_tweak'
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = name
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = pb[tweaks[d]].path_from_id() + \
'[' + '"' + name + '"' + ']'
return def_bones
def create_ik(self, parent):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
ctrl = get_bone_name( org_bones[0], 'ctrl', 'ik' )
mch_ik = get_bone_name( org_bones[0], 'mch', 'ik' )
mch_target = get_bone_name( org_bones[0], 'mch', 'ik_target' )
for o, ik in zip( org_bones, [ ctrl, mch_ik, mch_target ] ):
bone = copy_bone( self.obj, o, ik )
if org_bones.index(o) == len( org_bones ) - 1:
eb[ bone ].length /= 4
# Create MCH Stretch
mch_str = copy_bone(
self.obj,
org_bones[0],
get_bone_name( org_bones[0], 'mch', 'ik_stretch' )
)
eb[ mch_str ].tail = eb[ org_bones[-1] ].head
# Parenting
eb[ ctrl ].parent = eb[ parent ]
eb[ mch_str ].parent = eb[ parent ]
eb[ mch_ik ].parent = eb[ ctrl ]
make_constraint( self, mch_ik, {
'constraint' : 'IK',
'subtarget' : mch_target,
'chain_count' : 2,
})
pb = self.obj.pose.bones
pb[ mch_ik ].ik_stretch = 0.1
pb[ ctrl ].ik_stretch = 0.1
# IK constraint Rotation locks
for axis in ['x','y','z']:
if axis != self.rot_axis:
setattr( pb[ mch_ik ], 'lock_ik_' + axis, True )
# Locks and Widget
pb[ ctrl ].lock_rotation = True, False, True
create_ikarrow_widget( self.obj, ctrl, bone_transform_name=None )
return { 'ctrl' : { 'limb' : ctrl },
'mch_ik' : mch_ik,
'mch_target' : mch_target,
'mch_str' : mch_str
}
def create_fk(self, parent):
org_bones = self.org_bones.copy()
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
ctrls = []
for o in org_bones:
bone = copy_bone( self.obj, o, get_bone_name( o, 'ctrl', 'fk' ) )
ctrls.append( bone )
# MCH
mch = copy_bone(
self.obj, org_bones[-1], get_bone_name( o, 'mch', 'fk' )
)
eb[ mch ].length /= 4
# Parenting
eb[ ctrls[0] ].parent = eb[ parent ]
eb[ ctrls[1] ].parent = eb[ ctrls[0] ]
eb[ ctrls[1] ].use_connect = True
eb[ ctrls[2] ].parent = eb[ mch ]
eb[ mch ].parent = eb[ ctrls[1] ]
eb[ mch ].use_connect = True
# Constrain MCH's scale to root
make_constraint( self, mch, {
'constraint' : 'COPY_SCALE',
'subtarget' : 'root'
})
# Locks and widgets
pb = self.obj.pose.bones
pb[ ctrls[2] ].lock_location = True, True, True
create_limb_widget( self.obj, ctrls[0] )
create_limb_widget( self.obj, ctrls[1] )
create_circle_widget(self.obj, ctrls[2], radius=0.4, head_tail=0.0)
for c in ctrls:
if self.fk_layers:
pb[c].bone.layers = self.fk_layers
return { 'ctrl' : ctrls, 'mch' : mch }
def org_parenting_and_switch(self, org, ik, fk, parent):
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# re-parent ORGs in a connected chain
for i,o in enumerate(org):
if i > 0:
eb[o].parent = eb[ org[i-1] ]
if i <= len(org)-1:
eb[o].use_connect = True
bpy.ops.object.mode_set(mode ='OBJECT')
pb = self.obj.pose.bones
pb_parent = pb[ parent ]
# Create ik/fk switch property
pb_parent['IK/FK'] = 0.0
prop = rna_idprop_ui_prop_get( pb_parent, 'IK/FK', create=True )
prop["min"] = 0.0
prop["max"] = 1.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = 'IK/FK Switch'
# Constrain org to IK and FK bones
iks = [ ik['ctrl']['limb'] ]
iks += [ ik[k] for k in [ 'mch_ik', 'mch_target'] ]
for o, i, f in zip( org, iks, fk ):
make_constraint( self, o, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : i
})
make_constraint( self, o, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : f
})
# Add driver to relevant constraint
drv = pb[o].constraints[-1].driver_add("influence").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = prop.name
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
pb_parent.path_from_id() + '['+ '"' + prop.name + '"' + ']'
def create_arm( self, bones):
org_bones = self.org_bones
bpy.ops.object.mode_set(mode='EDIT')
eb = self.obj.data.edit_bones
ctrl = get_bone_name( org_bones[2], 'ctrl', 'ik' )
# Create IK arm control
ctrl = copy_bone( self.obj, org_bones[2], ctrl )
# clear parent (so that rigify will parent to root)
eb[ ctrl ].parent = None
eb[ ctrl ].use_connect = False
# Parent
eb[ bones['ik']['mch_target'] ].parent = eb[ ctrl ]
eb[ bones['ik']['mch_target'] ].use_connect = False
# MCH for ik control
ctrl_socket = copy_bone(self.obj, org_bones[2], get_bone_name( org_bones[2], 'mch', 'ik_socket'))
eb[ctrl_socket].tail = eb[ctrl_socket].head + 0.8*(eb[ctrl_socket].tail-eb[ctrl_socket].head)
eb[ctrl_socket].parent = None
eb[ctrl].parent = eb[ctrl_socket]
ctrl_root = copy_bone(self.obj, org_bones[2], get_bone_name( org_bones[2], 'mch', 'ik_root'))
eb[ctrl_root].tail = eb[ctrl_root].head + 0.7*(eb[ctrl_root].tail-eb[ctrl_root].head)
eb[ctrl_root].use_connect = False
eb[ctrl_root].parent = eb['root']
if eb[org_bones[0]].parent:
arm_parent = eb[org_bones[0]].parent
ctrl_parent = copy_bone(self.obj, org_bones[2], get_bone_name( org_bones[2], 'mch', 'ik_parent'))
eb[ctrl_parent].tail = eb[ctrl_parent].head + 0.6*(eb[ctrl_parent].tail-eb[ctrl_parent].head)
eb[ctrl_parent].use_connect = False
eb[ctrl_parent].parent = eb[org_bones[0]].parent
else:
arm_parent = None
# Set up constraints
# Constrain ik ctrl to root / parent
make_constraint( self, ctrl_socket, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : ctrl_root,
})
if arm_parent:
make_constraint( self, ctrl_socket, {
'constraint' : 'COPY_TRANSFORMS',
'subtarget' : ctrl_parent,
})
# Constrain mch target bone to the ik control and mch stretch
make_constraint( self, bones['ik']['mch_target'], {
'constraint' : 'COPY_LOCATION',
'subtarget' : bones['ik']['mch_str'],
'head_tail' : 1.0
})
# Constrain mch ik stretch bone to the ik control
make_constraint( self, bones['ik']['mch_str'], {
'constraint' : 'DAMPED_TRACK',
'subtarget' : ctrl,
})
make_constraint( self, bones['ik']['mch_str'], {
'constraint' : 'STRETCH_TO',
'subtarget' : ctrl,
})
make_constraint( self, bones['ik']['mch_str'], {
'constraint' : 'LIMIT_SCALE',
'use_min_y' : True,
'use_max_y' : True,
'max_y' : 1.05,
'owner_space' : 'LOCAL'
})
# Create ik/fk switch property
pb = self.obj.pose.bones
pb_parent = pb[ bones['parent'] ]
# Modify rotation mode for ik and tweak controls
pb[bones['ik']['ctrl']['limb']].rotation_mode = 'ZXY'
for b in bones['tweak']['ctrl']:
pb[b].rotation_mode = 'ZXY'
pb_parent['IK_Strertch'] = 1.0
prop = rna_idprop_ui_prop_get( pb_parent, 'IK_Strertch', create=True )
prop["min"] = 0.0
prop["max"] = 1.0
prop["soft_min"] = 0.0
prop["soft_max"] = 1.0
prop["description"] = 'IK Stretch'
# Add driver to limit scale constraint influence
b = bones['ik']['mch_str']
drv = pb[b].constraints[-1].driver_add("influence").driver
drv.type = 'SUM'
var = drv.variables.new()
var.name = prop.name
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
pb_parent.path_from_id() + '['+ '"' + prop.name + '"' + ']'
drv_modifier = self.obj.animation_data.drivers[-1].modifiers[0]
drv_modifier.mode = 'POLYNOMIAL'
drv_modifier.poly_order = 1
drv_modifier.coefficients[0] = 1.0
drv_modifier.coefficients[1] = -1.0
# Create hand widget
create_hand_widget(self.obj, ctrl, bone_transform_name=None)
bones['ik']['ctrl']['terminal'] = [ ctrl ]
if arm_parent:
bones['ik']['mch_hand'] = [ctrl_socket, ctrl_root, ctrl_parent]
else:
bones['ik']['mch_hand'] = [ctrl_socket, ctrl_root]
return bones
def create_drivers(self, bones):
bpy.ops.object.mode_set(mode ='OBJECT')
pb = self.obj.pose.bones
ctrl = pb[bones['ik']['mch_hand'][0]]
props = [ "IK_follow", "root/parent" ]
for prop in props:
if prop == 'IK_follow':
ctrl[prop]=True
rna_prop = rna_idprop_ui_prop_get( ctrl, prop, create=True )
rna_prop["min"] = False
rna_prop["max"] = True
rna_prop["description"] = prop
drv = ctrl.constraints[ 0 ].driver_add("mute").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = prop
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
ctrl.path_from_id() + '['+ '"' + prop + '"' + ']'
drv_modifier = self.obj.animation_data.drivers[-1].modifiers[0]
drv_modifier.mode = 'POLYNOMIAL'
drv_modifier.poly_order = 1
drv_modifier.coefficients[0] = 1.0
drv_modifier.coefficients[1] = -1.0
if len(ctrl.constraints) > 1:
drv = ctrl.constraints[ 1 ].driver_add("mute").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = prop
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
ctrl.path_from_id() + '['+ '"' + prop + '"' + ']'
drv_modifier = self.obj.animation_data.drivers[-1].modifiers[0]
drv_modifier.mode = 'POLYNOMIAL'
drv_modifier.poly_order = 1
drv_modifier.coefficients[0] = 1.0
drv_modifier.coefficients[1] = -1.0
elif len(ctrl.constraints) > 1:
ctrl[prop]=0.0
rna_prop = rna_idprop_ui_prop_get( ctrl, prop, create=True )
rna_prop["min"] = 0.0
rna_prop["max"] = 1.0
rna_prop["soft_min"] = 0.0
rna_prop["soft_max"] = 1.0
rna_prop["description"] = prop
# drv = ctrl.constraints[ 0 ].driver_add("influence").driver
# drv.type = 'AVERAGE'
#
# var = drv.variables.new()
# var.name = prop
# var.type = "SINGLE_PROP"
# var.targets[0].id = self.obj
# var.targets[0].data_path = \
# ctrl.path_from_id() + '['+ '"' + prop + '"' + ']'
#
# drv_modifier = self.obj.animation_data.drivers[-1].modifiers[0]
#
# drv_modifier.mode = 'POLYNOMIAL'
# drv_modifier.poly_order = 1
# drv_modifier.coefficients[0] = 1.0
# drv_modifier.coefficients[1] = -1.0
drv = ctrl.constraints[ 1 ].driver_add("influence").driver
drv.type = 'AVERAGE'
var = drv.variables.new()
var.name = prop
var.type = "SINGLE_PROP"
var.targets[0].id = self.obj
var.targets[0].data_path = \
ctrl.path_from_id() + '['+ '"' + prop + '"' + ']'
def generate(self):
bpy.ops.object.mode_set(mode ='EDIT')
eb = self.obj.data.edit_bones
# Clear parents for org bones
for bone in self.org_bones[1:]:
eb[bone].use_connect = False
eb[bone].parent = None
bones = {}
# Create mch limb parent
bones['parent'] = self.create_parent()
bones['tweak'] = self.create_tweak()
bones['def'] = self.create_def( bones['tweak']['ctrl'] )
bones['ik'] = self.create_ik( bones['parent'] )
bones['fk'] = self.create_fk( bones['parent'] )
self.org_parenting_and_switch(
self.org_bones, bones['ik'], bones['fk']['ctrl'], bones['parent']
)
bones = self.create_arm( bones )
self.create_drivers( bones )
controls = [ bones['ik']['ctrl']['limb'], bones['ik']['ctrl']['terminal'][0] ]
# Create UI
controls_string = ", ".join(["'" + x + "'" for x in controls])
script = create_script( bones, 'arm' )
script += extra_script % (controls_string, bones['ik']['mch_hand'][0], 'IK_follow', 'root/parent', 'root/parent')
return [ script ]
def add_parameters(params):
""" Add the parameters of this rig type to the
RigifyParameters PropertyGroup
"""
items = [
('x', 'X', ''),
('y', 'Y', ''),
('z', 'Z', '')
]
params.rotation_axis = bpy.props.EnumProperty(
items = items,
name = "Rotation Axis",
default = 'x'
)
params.segments = bpy.props.IntProperty(
name = 'limb segments',
default = 2,
min = 1,
description = 'Number of segments'
)
params.bbones = bpy.props.IntProperty(
name = 'bbone segments',
default = 10,
min = 1,
description = 'Number of segments'
)
# Setting up extra layers for the FK and tweak
params.tweak_extra_layers = bpy.props.BoolProperty(
name = "tweak_extra_layers",
default = True,
description = ""
)
params.tweak_layers = bpy.props.BoolVectorProperty(
size = 32,
description = "Layers for the tweak controls to be on",
default = tuple( [ i == 1 for i in range(0, 32) ] )
)
# Setting up extra layers for the FK and tweak
params.fk_extra_layers = bpy.props.BoolProperty(
name = "fk_extra_layers",
default = True,
description = ""
)
params.fk_layers = bpy.props.BoolVectorProperty(
size = 32,
description = "Layers for the FK controls to be on",
default = tuple( [ i == 1 for i in range(0, 32) ] )
)
def parameters_ui(layout, params):
""" Create the ui for the rig parameters."""
# r = layout.row()
# r.prop(params, "limb_type")
r = layout.row()
r.prop(params, "rotation_axis")
r = layout.row()
r.prop(params, "segments")
r = layout.row()
r.prop(params, "bbones")
for layer in [ 'fk', 'tweak' ]:
r = layout.row()
r.prop(params, layer + "_extra_layers")
r.active = params.tweak_extra_layers
col = r.column(align=True)
row = col.row(align=True)
for i in range(8):
row.prop(params, layer + "_layers", index=i, toggle=True, text="")
row = col.row(align=True)
for i in range(16,24):
row.prop(params, layer + "_layers", index=i, toggle=True, text="")
col = r.column(align=True)
row = col.row(align=True)
for i in range(8,16):
row.prop(params, layer + "_layers", index=i, toggle=True, text="")
row = col.row(align=True)
for i in range(24,32):
row.prop(params, layer + "_layers", index=i, toggle=True, text="")
def create_sample(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
for _ in range(28):
arm.rigify_layers.add()
arm.rigify_layers[5].name = 'Fingers'
arm.rigify_layers[5].row = 5
arm.rigify_layers[6].name = 'Fingers (Tweak)'
arm.rigify_layers[6].row = 6
arm.rigify_layers[7].name = 'Arm.L (IK)'
arm.rigify_layers[7].row = 7
arm.rigify_layers[8].name = 'Arm.L (FK)'
arm.rigify_layers[8].row = 8
arm.rigify_layers[9].name = 'Arm.L (Tweak)'
arm.rigify_layers[9].row = 9
bone = arm.edit_bones.new('upper_arm.L')
bone.head[:] = 0.1953, 0.0267, 1.5846
bone.tail[:] = 0.4424, 0.0885, 1.4491
bone.roll = 2.0724
bone.use_connect = False
bones['upper_arm.L'] = bone.name
bone = arm.edit_bones.new('forearm.L')
bone.head[:] = 0.4424, 0.0885, 1.4491
bone.tail[:] = 0.6594, 0.0492, 1.3061
bone.roll = 2.1535
bone.use_connect = True
bone.parent = arm.edit_bones[bones['upper_arm.L']]
bones['forearm.L'] = bone.name
bone = arm.edit_bones.new('hand.L')
bone.head[:] = 0.6594, 0.0492, 1.3061
bone.tail[:] = 0.7234, 0.0412, 1.2585
bone.roll = 2.2103
bone.use_connect = True
bone.parent = arm.edit_bones[bones['forearm.L']]
bones['hand.L'] = bone.name
bone = arm.edit_bones.new('palm.01.L')
bone.head[:] = 0.6921, 0.0224, 1.2882
bone.tail[:] = 0.7464, 0.0051, 1.2482
bone.roll = -2.4928
bone.use_connect = False
bone.parent = arm.edit_bones[bones['hand.L']]
bones['palm.01.L'] = bone.name
bone = arm.edit_bones.new('palm.02.L')
bone.head[:] = 0.6970, 0.0389, 1.2877
bone.tail[:] = 0.7518, 0.0277, 1.2487
bone.roll = -2.5274
bone.use_connect = False
bone.parent = arm.edit_bones[bones['hand.L']]
bones['palm.02.L'] = bone.name
bone = arm.edit_bones.new('palm.03.L')
bone.head[:] = 0.6963, 0.0545, 1.2874
bone.tail[:] = 0.7540, 0.0521, 1.2482
bone.roll = -2.5843
bone.use_connect = False
bone.parent = arm.edit_bones[bones['hand.L']]
bones['palm.03.L'] = bone.name
bone = arm.edit_bones.new('palm.04.L')
bone.head[:] = 0.6929, 0.0696, 1.2871
bone.tail[:] = 0.7528, 0.0763, 1.2428
bone.roll = -2.5155
bone.use_connect = False
bone.parent = arm.edit_bones[bones['hand.L']]
bones['palm.04.L'] = bone.name
bone = arm.edit_bones.new('f_index.01.L')
bone.head[:] = 0.7464, 0.0051, 1.2482
bone.tail[:] = 0.7718, 0.0013, 1.2112
bone.roll = -2.0315
bone.use_connect = False
bone.parent = arm.edit_bones[bones['palm.01.L']]
bones['f_index.01.L'] = bone.name
bone = arm.edit_bones.new('thumb.01.L')
bone.head[:] = 0.6705, 0.0214, 1.2738
bone.tail[:] = 0.6857, 0.0015, 1.2404
bone.roll = -0.1587
bone.use_connect = False
bone.parent = arm.edit_bones[bones['palm.01.L']]
bones['thumb.01.L'] = bone.name
bone = arm.edit_bones.new('f_middle.01.L')
bone.head[:] = 0.7518, 0.0277, 1.2487
bone.tail[:] = 0.7762, 0.0234, 1.2058
bone.roll = -2.0067
bone.use_connect = False
bone.parent = arm.edit_bones[bones['palm.02.L']]
bones['f_middle.01.L'] = bone.name
bone = arm.edit_bones.new('f_ring.01.L')
bone.head[:] = 0.7540, 0.0521, 1.2482
bone.tail[:] = 0.7715, 0.0499, 1.2070
bone.roll = -2.0082
bone.use_connect = False
bone.parent = arm.edit_bones[bones['palm.03.L']]
bones['f_ring.01.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.01.L')
bone.head[:] = 0.7528, 0.0763, 1.2428
bone.tail[:] = 0.7589, 0.0765, 1.2156
bone.roll = -1.9749
bone.use_connect = False
bone.parent = arm.edit_bones[bones['palm.04.L']]
bones['f_pinky.01.L'] = bone.name
bone = arm.edit_bones.new('f_index.02.L')
bone.head[:] = 0.7718, 0.0013, 1.2112
bone.tail[:] = 0.7840, -0.0003, 1.1858
bone.roll = -1.8799
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.01.L']]
bones['f_index.02.L'] = bone.name
bone = arm.edit_bones.new('thumb.02.L')
bone.head[:] = 0.6857, 0.0015, 1.2404
bone.tail[:] = 0.7056, -0.0057, 1.2145
bone.roll = -0.4798
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thumb.01.L']]
bones['thumb.02.L'] = bone.name
bone = arm.edit_bones.new('f_middle.02.L')
bone.head[:] = 0.7762, 0.0234, 1.2058
bone.tail[:] = 0.7851, 0.0218, 1.1749
bone.roll = -1.8283
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.01.L']]
bones['f_middle.02.L'] = bone.name
bone = arm.edit_bones.new('f_ring.02.L')
bone.head[:] = 0.7715, 0.0499, 1.2070
bone.tail[:] = 0.7794, 0.0494, 1.1762
bone.roll = -1.8946
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.01.L']]
bones['f_ring.02.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.02.L')
bone.head[:] = 0.7589, 0.0765, 1.2156
bone.tail[:] = 0.7618, 0.0770, 1.1932
bone.roll = -1.9059
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.01.L']]
bones['f_pinky.02.L'] = bone.name
bone = arm.edit_bones.new('f_index.03.L')
bone.head[:] = 0.7840, -0.0003, 1.1858
bone.tail[:] = 0.7892, 0.0006, 1.1636
bone.roll = -1.6760
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.02.L']]
bones['f_index.03.L'] = bone.name
bone = arm.edit_bones.new('thumb.03.L')
bone.head[:] = 0.7056, -0.0057, 1.2145
bone.tail[:] = 0.7194, -0.0098, 1.1995
bone.roll = -0.5826
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thumb.02.L']]
bones['thumb.03.L'] = bone.name
bone = arm.edit_bones.new('f_middle.03.L')
bone.head[:] = 0.7851, 0.0218, 1.1749
bone.tail[:] = 0.7888, 0.0216, 1.1525
bone.roll = -1.7483
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.02.L']]
bones['f_middle.03.L'] = bone.name
bone = arm.edit_bones.new('f_ring.03.L')
bone.head[:] = 0.7794, 0.0494, 1.1762
bone.tail[:] = 0.7781, 0.0498, 1.1577
bone.roll = -1.6582
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.02.L']]
bones['f_ring.03.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.03.L')
bone.head[:] = 0.7618, 0.0770, 1.1932
bone.tail[:] = 0.7611, 0.0772, 1.1782
bone.roll = -1.7639
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.02.L']]
bones['f_pinky.03.L'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['upper_arm.L']]
pbone.rigify_type = 'pitchipoy.limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_ik_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.ik_layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.separate_hose_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.hose_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['forearm.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['hand.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['palm.01.L']]
pbone.rigify_type = 'palm'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone = obj.pose.bones[bones['palm.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone = obj.pose.bones[bones['palm.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone = obj.pose.bones[bones['palm.04.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone = obj.pose.bones[bones['f_index.01.L']]
pbone.rigify_type = 'pitchipoy.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_extra_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['thumb.01.L']]
pbone.rigify_type = 'pitchipoy.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.separate_extra_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_middle.01.L']]
pbone.rigify_type = 'pitchipoy.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_extra_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_ring.01.L']]
pbone.rigify_type = 'pitchipoy.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_extra_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_pinky.01.L']]
pbone.rigify_type = 'pitchipoy.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_extra_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_index.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['thumb.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_middle.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_ring.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_pinky.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_index.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['thumb.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_middle.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_ring.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['f_pinky.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
for eb in arm.edit_bones:
if ('arm' in eb.name) or ('hand' in eb.name):
eb.layers = (False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
else:
eb.layers = (False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
arm.layers = (False, False, False, False, False, True, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
if __name__ == "__main__":
create_sample(bpy.context.active_object) | gpl-2.0 | 452,583,428,731,699,500 | -1,318,732,907,338,977,800 | 35.820313 | 270 | 0.538267 | false |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/werkzeug/script.py | 84 | 11666 | # -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from warnings import warn
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def _deprecated():
warn(DeprecationWarning('werkzeug.script is deprecated and '
'will be removed soon'), stacklevel=2)
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
_deprecated()
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
_deprecated()
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
_deprecated()
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
_deprecated()
actions = sorted(iteritems(actions))
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
_deprecated()
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
_deprecated()
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed.instance(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
_deprecated()
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app,
use_reloader=reloader, use_debugger=debugger,
use_evalex=evalex, extra_files=extra_files,
reloader_interval=1, threaded=threaded, processes=processes,
static_files=static_files, ssl_context=ssl_context)
return action
| bsd-2-clause | 935,945,355,081,529,900 | 8,497,285,699,854,698,000 | 34.138554 | 85 | 0.605177 | false |
r-icarus/openstack_microserver | openstack_dashboard/openstack/common/rpc/zmq_receiver.py | 19 | 1254 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from openstack_dashboard.openstack.common import log as logging
from openstack_dashboard.openstack.common import rpc
from openstack_dashboard.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| apache-2.0 | 824,544,620,093,256,100 | 685,061,952,059,657,100 | 29.585366 | 78 | 0.739234 | false |
bmanojlovic/ansible | test/units/module_utils/gcp/test_utils.py | 15 | 1408 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2016, Tom Melendez <tom@supertom.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from ansible.compat.tests import mock, unittest
from ansible.module_utils.gcp import (check_min_pkg_version)
def build_distribution(version):
obj = mock.MagicMock()
obj.version = '0.5.0'
return obj
class GCPUtilsTestCase(unittest.TestCase):
@mock.patch("pkg_resources.get_distribution", side_effect=build_distribution)
def test_check_minimum_pkg_version(self, mockobj):
self.assertTrue(check_min_pkg_version('foobar', '0.4.0'))
self.assertTrue(check_min_pkg_version('foobar', '0.5.0'))
self.assertFalse(check_min_pkg_version('foobar', '0.6.0'))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -6,874,884,643,160,711,000 | 7,232,778,632,613,827,000 | 33.341463 | 81 | 0.716619 | false |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/test/test_md5.py | 194 | 1790 | # Testing md5 module
import warnings
warnings.filterwarnings("ignore", "the md5 module is deprecated.*",
DeprecationWarning)
import unittest
from md5 import md5
from test import test_support
def hexstr(s):
import string
h = string.hexdigits
r = ''
for c in s:
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class MD5_Test(unittest.TestCase):
def md5test(self, s, expected):
self.assertEqual(hexstr(md5(s).digest()), expected)
self.assertEqual(md5(s).hexdigest(), expected)
def test_basics(self):
eq = self.md5test
eq('', 'd41d8cd98f00b204e9800998ecf8427e')
eq('a', '0cc175b9c0f1b6a831c399e269772661')
eq('abc', '900150983cd24fb0d6963f7d28e17f72')
eq('message digest', 'f96b697d7cb7938d525a2f31aaf161d0')
eq('abcdefghijklmnopqrstuvwxyz', 'c3fcd3d76192e4007dfb496cca67e13b')
eq('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
eq('12345678901234567890123456789012345678901234567890123456789012345678901234567890',
'57edf4a22be3c955ac49da2e2107b67a')
def test_hexdigest(self):
# hexdigest is new with Python 2.0
m = md5('testing the hexdigest method')
h = m.hexdigest()
self.assertEqual(hexstr(m.digest()), h)
def test_large_update(self):
aas = 'a' * 64
bees = 'b' * 64
cees = 'c' * 64
m1 = md5()
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = md5()
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def test_main():
test_support.run_unittest(MD5_Test)
if __name__ == '__main__':
test_main()
| lgpl-3.0 | -5,703,561,037,972,893,000 | 9,169,529,102,981,538,000 | 28.344262 | 94 | 0.622905 | false |
xxxVxxx/troposphere | troposphere/openstack/heat.py | 29 | 1115 | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# Copyright (c) 2014, Andy Botting <andy.botting@theguardian.com>
# All rights reserved.
#
# See LICENSE file for full license.
from troposphere import AWSObject
from troposphere.validators import integer
# Due to the strange nature of the OpenStack compatability layer, some values
# that should be integers fail to validate and need to be represented as
# strings. For this reason, we duplicate the AWS::AutoScaling::AutoScalingGroup
# and change these types.
class AWSAutoScalingGroup(AWSObject):
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props = {
'AvailabilityZones': (list, True),
'Cooldown': (integer, False),
'DesiredCapacity': (basestring, False),
'HealthCheckGracePeriod': (integer, False),
'HealthCheckType': (basestring, False),
'LaunchConfigurationName': (basestring, True),
'LoadBalancerNames': (list, False),
'MaxSize': (basestring, True),
'MinSize': (basestring, True),
'Tags': (list, False),
'VPCZoneIdentifier': (list, False),
}
| bsd-2-clause | 130,424,036,826,120,050 | 4,403,618,227,290,268,000 | 36.166667 | 79 | 0.687892 | false |
gauthierm/bedrock | tests/functional/firefox/os/test_tv.py | 2 | 1615 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.os.tv import TVPage
@pytest.mark.sanity
@pytest.mark.nondestructive
def test_next_previous_buttons(base_url, selenium):
page = TVPage(base_url, selenium).open()
assert not page.is_previous_enabled
screens = page.screens
thumbnails = page.thumbnails
for i in range(len(screens) - 1):
assert screens[i].is_displayed
assert thumbnails[i].is_selected
page.show_next_screen()
assert not page.is_next_enabled
for i in range(len(screens) - 1, 0, -1):
assert screens[i].is_displayed
assert thumbnails[i].is_selected
page.show_previous_screen()
assert not page.is_previous_enabled
assert screens[0].is_displayed
assert thumbnails[0].is_selected
@pytest.mark.sanity
@pytest.mark.nondestructive
def test_click_thumbnails(base_url, selenium):
page = TVPage(base_url, selenium).open()
screens = page.screens
thumbnails = page.thumbnails
assert screens[0].is_displayed
assert thumbnails[0].is_selected
for i in range(1, len(thumbnails)):
thumbnails[i].click()
assert screens[i].is_displayed
assert thumbnails[i].is_selected
for i in range(len(thumbnails) - 2, -1, -1):
thumbnails[i].click()
assert screens[i].is_displayed
assert thumbnails[i].is_selected
assert screens[0].is_displayed
assert thumbnails[0].is_selected
| mpl-2.0 | 7,078,370,226,776,976,000 | -8,864,791,202,133,220,000 | 32.645833 | 69 | 0.686687 | false |
mitya57/debian-buildbot | buildbot/test/fake/pbmanager.py | 2 | 1843 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.application import service
from twisted.internet import defer
class FakePBManager(service.MultiService):
def __init__(self):
service.MultiService.__init__(self)
self.setName("fake-pbmanager")
self._registrations = []
self._unregistrations = []
def register(self, portstr, username, password, pfactory):
if (portstr, username) not in self._registrations:
reg = FakeRegistration(self, portstr, username)
self._registrations.append((portstr, username, password))
return reg
else:
raise KeyError("username '%s' is already registered on port %s"
% (username, portstr))
def _unregister(self, portstr, username):
self._unregistrations.append((portstr, username))
return defer.succeed(None)
class FakeRegistration(object):
def __init__(self, pbmanager, portstr, username):
self._portstr = portstr
self._username = username
self._pbmanager = pbmanager
def unregister(self):
self._pbmanager._unregister(self._portstr, self._username)
| gpl-2.0 | -4,160,422,053,857,638,400 | -7,703,755,140,495,749,000 | 35.86 | 79 | 0.688551 | false |
paulscottrobson/fred | __old/Processor/process.py | 2 | 2081 | #
# Process 1801.def
#
import re
def process(txt,opc):
txt = txt.replace("@X","{0:x}".format(opc & 15))
txt = txt.replace("@R","{0}".format(opc & 15))
txt = txt.replace("@F","{0:x}".format((opc & 3)+1))
return txt
src = open("1801.def","r").readlines() # read file
src = [x if x.find("//") < 0 else x[:x.find("//")] for x in src] # delete comments
src = [x.replace("\t"," ").strip() for x in src] # spaces and tabs
src = [x for x in src if x != ""] # remove blank lines
open("_1801_include.h","w").write("\n".join([x[1:] for x in src if x[0] == ':']))
mnemonics = [ None ] * 256 # mnemonics
codes = [ None ] * 256 # executable codes.
for l in [x for x in src if x[0] != ':']: # for each line.
m = re.match('^([0-9A-F\\-]+)\\s*\\"(.*)\\"\s*(.*)$',l) # check it
assert m is not None,"Error "+l
rg = m.group(1)
if len(rg) == 2: # get from/to range
rg = rg + "-"+rg
for opcode in range(int(rg[:2],16),int(rg[-2:],16)+1): # for each opcode
assert mnemonics[opcode] is None
mnemonics[opcode] = process(m.group(2),opcode-int(rg[:2],16)).lower()
codes[opcode] = process(m.group(3),opcode-int(rg[:2],16))
for i in range(0,256): # fill in mnemonics
if mnemonics[i] is None:
mnemonics[i] = "byte {0:02x}".format(i)
open("_1801_disasm.h","w").write(",".join(['"'+m+'"' for m in mnemonics])) # write out disassembly table
h = open("_1801_opcodes.h","w")
for i in range(0,256):
if codes[i] is not None:
h.write("case 0x{0:02x}: // *** {1} ***\n".format(i,mnemonics[i]))
h.write(" {0};break;\n".format(codes[i]))
h = open("_1801_ports.h","w")
for i in range(0,8):
h.write("#ifndef INPORT{0}\n".format(i))
h.write("#define INPORT{0}() (0)\n".format(i))
h.write("#endif\n")
h.write("#ifndef OUTPORT{0}\n".format(i))
h.write("#define OUTPORT{0}(v) {{}}\n".format(i))
h.write("#endif\n")
for i in range(1,5):
h.write("#ifndef EFLAG{0}\n".format(i))
h.write("#define EFLAG{0}() (0)\n".format(i))
h.write("#endif\n")
h.close() | mit | -2,687,992,910,930,175,500 | -6,541,237,630,336,527,000 | 36.178571 | 107 | 0.546852 | false |
nuncjo/odoo | addons/l10n_mx/__openerp__.py | 379 | 2559 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# All Rights Reserved
###############Credits######################################################
# Coded by: Alejandro Negrin anegrin@vauxoo.com,
# Planified by: Alejandro Negrin, Humberto Arocha, Moises Lopez
# Finance by: Vauxoo.
# Audited by: Humberto Arocha (hbto@vauxoo.com) y Moises Lopez (moylop260@vauxoo.com)
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
{
"name" : "Mexico - Accounting",
"version" : "2.0",
"author" : "Vauxoo",
"category" : "Localization/Account Charts",
"description": """
Minimal accounting configuration for Mexico.
============================================
This Chart of account is a minimal proposal to be able to use OoB the
accounting feature of Openerp.
This doesn't pretend be all the localization for MX it is just the minimal
data required to start from 0 in mexican localization.
This modules and its content is updated frequently by openerp-mexico team.
With this module you will have:
- Minimal chart of account tested in production eviroments.
- Minimal chart of taxes, to comply with SAT_ requirements.
.. SAT: http://www.sat.gob.mx/
""",
"depends" : ["account",
"base_vat",
"account_chart",
],
"demo_xml" : [],
"data" : ["data/account_tax_code.xml",
"data/account_chart.xml",
"data/account_tax.xml",
"data/l10n_chart_mx_wizard.xml"],
"active": False,
"installable": True,
"certificate": False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,093,103,969,312,639,000 | -8,756,917,620,335,053,000 | 39.619048 | 88 | 0.575225 | false |
caphrim007/ansible | lib/ansible/modules/storage/netapp/na_ontap_net_port.py | 6 | 8182 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: na_ontap_net_port
short_description: Manage NetApp ONTAP network ports.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- Modify a Ontap network port.
options:
state:
description:
- Whether the specified net port should exist or not.
choices: ['present']
default: present
node:
description:
- Specifies the name of node.
required: true
port:
description:
- Specifies the name of port.
required: true
mtu:
description:
- Specifies the maximum transmission unit (MTU) reported by the port.
autonegotiate_admin:
description:
- Enables or disables Ethernet auto-negotiation of speed,
duplex and flow control.
duplex_admin:
description:
- Specifies the user preferred duplex setting of the port.
- Valid values auto, half, full
speed_admin:
description:
- Specifies the user preferred speed setting of the port.
flowcontrol_admin:
description:
- Specifies the user preferred flow control setting of the port.
ipspace:
description:
- Specifies the port's associated IPspace name.
- The 'Cluster' ipspace is reserved for cluster ports.
"""
EXAMPLES = """
- name: Modify Net Port
na_ontap_net_port:
state=present
username={{ netapp_username }}
password={{ netapp_password }}
hostname={{ netapp_hostname }}
node={{ Vsim server name }}
port=e0d
autonegotiate_admin=true
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapNetPort(object):
"""
Modify a Net port
"""
def __init__(self):
"""
Initialize the Ontap Net Port Class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
node=dict(required=True, type="str"),
port=dict(required=True, type="str"),
mtu=dict(required=False, type="str", default=None),
autonegotiate_admin=dict(required=False, type="str", default=None),
duplex_admin=dict(required=False, type="str", default=None),
speed_admin=dict(required=False, type="str", default=None),
flowcontrol_admin=dict(required=False, type="str", default=None),
ipspace=dict(required=False, type="str", default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.node = p['node']
self.port = p['port']
# the following option are optional, but at least one need to be set
self.mtu = p['mtu']
self.autonegotiate_admin = p["autonegotiate_admin"]
self.duplex_admin = p["duplex_admin"]
self.speed_admin = p["speed_admin"]
self.flowcontrol_admin = p["flowcontrol_admin"]
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def get_net_port(self):
"""
Return details about the net port
:return: Details about the net port. None if not found.
:rtype: dict
"""
net_port_info = netapp_utils.zapi.NaElement('net-port-get-iter')
net_port_attributes = netapp_utils.zapi.NaElement('net-port-info')
net_port_attributes.add_new_child('node', self.node)
net_port_attributes.add_new_child('port', self.port)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(net_port_attributes)
net_port_info.add_child_elem(query)
result = self.server.invoke_successfully(net_port_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
net_port_attributes = result.get_child_by_name('attributes-list').\
get_child_by_name('net-port-info')
return_value = {
'node': net_port_attributes.get_child_content('node'),
'port': net_port_attributes.get_child_content('port'),
'mtu': net_port_attributes.get_child_content('mtu'),
'autonegotiate_admin': net_port_attributes.get_child_content(
'is-administrative-auto-negotiate'),
'duplex_admin': net_port_attributes.get_child_content(
'administrative-duplex'),
'speed_admin': net_port_attributes.get_child_content(
'administrative-speed'),
'flowcontrol_admin': net_port_attributes.get_child_content(
'administrative-flowcontrol'),
}
return return_value
def modify_net_port(self):
"""
Modify a port
"""
port_obj = netapp_utils.zapi.NaElement('net-port-modify')
port_obj.add_new_child("node", self.node)
port_obj.add_new_child("port", self.port)
# The following options are optional.
# We will only call them if they are not set to None
if self.mtu:
port_obj.add_new_child("mtu", self.mtu)
if self.autonegotiate_admin:
port_obj.add_new_child(
"is-administrative-auto-negotiate", self.autonegotiate_admin)
if self.duplex_admin:
port_obj.add_new_child("administrative-duplex", self.duplex_admin)
if self.speed_admin:
port_obj.add_new_child("administrative-speed", self.speed_admin)
if self.flowcontrol_admin:
port_obj.add_new_child(
"administrative-flowcontrol", self.flowcontrol_admin)
self.server.invoke_successfully(port_obj, True)
def apply(self):
"""
Run Module based on play book
"""
changed = False
net_port_exists = False
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(
module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_net_port", cserver)
net_port_details = self.get_net_port()
if net_port_details:
net_port_exists = True
if self.state == 'present':
if (self.mtu and self.mtu != net_port_details['mtu']) or \
(self.autonegotiate_admin and
self.autonegotiate_admin != net_port_details['autonegotiate_admin']) or \
(self.duplex_admin and
self.duplex_admin != net_port_details['duplex_admin']) or \
(self.speed_admin and
self.speed_admin != net_port_details['speed_admin']) or \
(self.flowcontrol_admin and
self.flowcontrol_admin != net_port_details['flowcontrol_admin']):
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if net_port_exists:
self.modify_net_port()
self.module.exit_json(changed=changed)
def main():
"""
Create the NetApp Ontap Net Port Object and modify it
"""
obj = NetAppOntapNetPort()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,982,902,041,680,588,000 | -5,088,016,962,355,900,000 | 34.11588 | 93 | 0.59472 | false |
duboviy/pybenchmark | pybenchmark/gprofiler.py | 1 | 3896 | """
Module for visualizing Python code profiles using the Chrome developer tools.
Example usage:
>>> profiler = Profiler()
>>> profiler.start()
>>> my_expensive_code()
>>> profiler.stop()
>>> with open('my.cpuprofile', 'w') as f:
... f.write(profiler.output())
In a gevented environnment, context switches can make things confusing. Data
collection can be limited to a single greenlet by passing
>>> profiler = Profiler(target_greenlet = gevent.getcurrent())
"""
import os
import sys
import json
import timeit
try:
import gevent
except ImportError:
gevent = None
class Node(object):
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.children = {}
self.hitCount = 1
def serialize(self):
res = {
'functionName': self.name,
'hitCount': self.hitCount,
'children': [c.serialize() for c in self.children.values()],
'scriptId': '1',
'url': '',
'lineNumber': 1,
'columnNumber': 1,
'deoptReason': '',
'id': self.id_,
'callUID': self.id_
}
return res
def add(self, frames, id_gen):
if not frames:
self.hitCount += 1
return
head = frames[0]
child = self.children.get(head)
if child is None:
child = Node(name=head, id_=id_gen())
self.children[head] = child
child.add(frames[1:], id_gen)
class GProfiler(object):
def __init__(self, target_greenlet=None, interval=0.0001):
self.target_greenlet_id = (
id(target_greenlet) if target_greenlet else None)
self.interval = interval
self.started = None
self.last_profile = None
self.root = Node('head', 1)
self.nextId = 1
self.samples = []
self.timestamps = []
def _id_generator(self):
self.nextId += 1
return self.nextId
def _profile(self, frame, event, arg): # pylint: disable=W0613
if event == 'call':
self._record_frame(frame.f_back)
def _record_frame(self, frame):
if self.target_greenlet_id and id(gevent.getcurrent()) != self.target_greenlet_id:
return
now = timeit.default_timer()
if self.last_profile is not None:
if now - self.last_profile < self.interval:
return
self.last_profile = now
self.timestamps.append(int(1e6 * now))
stack = []
while frame is not None:
stack.append(self._format_frame(frame))
frame = frame.f_back
stack.reverse()
self.root.add(stack, self._id_generator)
self.samples.append(self.nextId)
@staticmethod
def _format_frame(frame):
return '{}({})'.format(frame.f_code.co_name,
frame.f_globals.get('__name__'))
def output(self):
if self.samples:
data = {
'startTime': self.started,
'endTime': 0.000001 * self.timestamps[-1],
'timestamps': self.timestamps,
'samples': self.samples,
'head': self.root.serialize()
}
else:
data = {}
return json.dumps(data)
def start(self):
sys.setprofile(self._profile)
self.started = timeit.default_timer()
print("# Running in profile mode. #")
@staticmethod
def stop():
sys.setprofile(None)
print("# Profile mode stopped. #")
def __enter__(self):
self.start()
return self
def __exit__(self, type_, value, traceback_):
self.stop()
filename = './pybenchmark_%s_.cpuprofile' % os.getpid()
with open(filename, 'w') as f:
f.write(self.output())
print(("Written profile file '%s'." % (filename)))
| mit | 7,351,859,670,591,429,000 | 6,043,023,548,087,257,000 | 27.647059 | 90 | 0.545688 | false |
sorenk/ansible | test/sanity/import/importer.py | 29 | 6122 | #!/usr/bin/env python
"""Import the given python module(s) and report error(s) encountered."""
from __future__ import absolute_import, print_function
import contextlib
import imp
import os
import re
import sys
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import ansible.module_utils.basic
import ansible.module_utils.common.removed
class ImporterAnsibleModuleException(Exception):
"""Exception thrown during initialization of ImporterAnsibleModule."""
pass
class ImporterAnsibleModule(object):
"""Replacement for AnsibleModule to support import testing."""
def __init__(self, *args, **kwargs):
raise ImporterAnsibleModuleException()
# stop Ansible module execution during AnsibleModule instantiation
ansible.module_utils.basic.AnsibleModule = ImporterAnsibleModule
# no-op for _load_params since it may be called before instantiating AnsibleModule
ansible.module_utils.basic._load_params = lambda *args, **kwargs: {}
# no-op for removed_module since it is called in place of AnsibleModule instantiation
ansible.module_utils.common.removed.removed_module = lambda *args, **kwargs: None
def main():
"""Main program function."""
base_dir = os.getcwd()
messages = set()
for path in sys.argv[1:] or sys.stdin.read().splitlines():
test_python_module(path, base_dir, messages, False)
test_python_module(path, base_dir, messages, True)
if messages:
exit(10)
def test_python_module(path, base_dir, messages, ansible_module):
if ansible_module:
# importing modules with __main__ under Python 2.6 exits with status code 1
if sys.version_info < (2, 7):
return
# only run __main__ protected code for Ansible modules
if not path.startswith('lib/ansible/modules/'):
return
# async_wrapper is not an Ansible module
if path == 'lib/ansible/modules/utilities/logic/async_wrapper.py':
return
# run code protected by __name__ conditional
name = '__main__'
# show the Ansible module responsible for the exception, even if it was thrown in module_utils
filter_dir = os.path.join(base_dir, 'lib/ansible/modules')
else:
# do not run code protected by __name__ conditional
name = 'module_import_test'
# show the Ansible file responsible for the exception, even if it was thrown in 3rd party code
filter_dir = base_dir
capture = Capture()
try:
with open(path, 'r') as module_fd:
with capture_output(capture):
imp.load_module(name, module_fd, os.path.abspath(path), ('.py', 'r', imp.PY_SOURCE))
capture_report(path, capture, messages)
except ImporterAnsibleModuleException:
# module instantiated AnsibleModule without raising an exception
pass
except BaseException as ex: # pylint: disable=locally-disabled, broad-except
capture_report(path, capture, messages)
exc_type, _, exc_tb = sys.exc_info()
message = str(ex)
results = list(reversed(traceback.extract_tb(exc_tb)))
source = None
line = 0
offset = 0
for result in results:
if result[0].startswith(filter_dir):
source = result[0][len(base_dir) + 1:].replace('test/sanity/import/', '')
line = result[1] or 0
break
if not source:
# If none of our source files are found in the traceback, report the file we were testing.
# I haven't been able to come up with a test case that encounters this issue yet.
source = path
message += ' (in %s:%d)' % (results[-1][0], results[-1][1] or 0)
elif isinstance(ex, SyntaxError):
if ex.filename.endswith(path): # pylint: disable=locally-disabled, no-member
# A SyntaxError in the source we're importing will have the correct path, line and offset.
# However, the traceback will report the path to this importer.py script instead.
# We'll use the details from the SyntaxError in this case, as it's more accurate.
source = path
line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
message = str(ex)
# Hack to remove the filename and line number from the message, if present.
message = message.replace(' (%s, line %d)' % (os.path.basename(path), line), '')
message = re.sub(r'\n *', ': ', message)
error = '%s:%d:%d: %s: %s' % (source, line, offset, exc_type.__name__, message)
report_message(error, messages)
class Capture(object):
"""Captured output and/or exception."""
def __init__(self):
self.stdout = StringIO()
self.stderr = StringIO()
def capture_report(path, capture, messages):
"""Report on captured output.
:type path: str
:type capture: Capture
:type messages: set[str]
"""
if capture.stdout.getvalue():
message = '%s:%d:%d: %s: %s' % (path, 0, 0, 'Output', 'Import resulted in output to stdout.')
report_message(message, messages)
if capture.stderr.getvalue():
message = '%s:%d:%d: %s: %s' % (path, 0, 0, 'Output', 'Import resulted in output to stderr.')
report_message(message, messages)
def report_message(message, messages):
"""Report message if not already reported.
:type message: str
:type messages: set[str]
"""
if message not in messages:
messages.add(message)
print(message)
@contextlib.contextmanager
def capture_output(capture):
"""Capture sys.stdout and sys.stderr.
:type capture: Capture
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = capture.stdout
sys.stderr = capture.stderr
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
if __name__ == '__main__':
main()
| gpl-3.0 | 5,752,061,305,124,197,000 | -97,302,412,387,276,770 | 33.201117 | 106 | 0.633126 | false |
teeple/pns_server | work/install/Python-2.7.4/Lib/pyclbr.py | 139 | 13388 | """Parse a Python module and describe its classes and methods.
Parse enough of a Python file to recognize imports and class and
method definitions, and to find out the superclasses of a class.
The interface consists of a single function:
readmodule_ex(module [, path])
where module is the name of a Python module, and path is an optional
list of directories where the module is to be searched. If present,
path is prepended to the system search path sys.path. The return
value is a dictionary. The keys of the dictionary are the names of
the classes defined in the module (including classes that are defined
via the from XXX import YYY construct). The values are class
instances of the class Class defined here. One special key/value pair
is present for packages: the key '__path__' has a list as its value
which contains the package search path.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
module -- the module name
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
A function is described by the class Function in this module.
Instances of this class have the following instance variables:
module -- the module name
name -- the name of the class
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
"""
import sys
import imp
import tokenize
from token import NAME, DEDENT, OP
from operator import itemgetter
__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function:
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
self.module = module
self.name = name
self.file = file
self.lineno = lineno
def readmodule(module, path=None):
'''Backwards compatible interface.
Call readmodule_ex() and then only keep Class objects from the
resulting dictionary.'''
res = {}
for key, value in _readmodule(module, path or []).items():
if isinstance(value, Class):
res[key] = value
return res
def readmodule_ex(module, path=None):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.
'''
return _readmodule(module, path or [])
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = {}
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i+1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
if not '__path__' in parent:
raise ImportError('No package named {}'.format(package))
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
except StopIteration:
pass
f.close()
return dict
def _getnamelist(g):
# Helper to get a comma-separated list of dotted names plus 'as'
# clauses. Return a list of pairs (name, name2) where name2 is
# the 'as' name, or None if there is no 'as' clause.
names = []
while True:
name, token = _getname(g)
if not name:
break
if token == 'as':
name2, token = _getname(g)
else:
name2 = None
names.append((name, name2))
while token != "," and "\n" not in token:
token = g.next()[1]
if token != ",":
break
return names
def _getname(g):
# Helper to get a dotted name, return a pair (name, token) where
# name is the dotted name, or None if there was no dotted name,
# and token is the next input token.
parts = []
tokentype, token = g.next()[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = g.next()[0:2]
if token != '.':
break
tokentype, token = g.next()[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
def _main():
# Main program for testing.
import os
mod = sys.argv[1]
if os.path.exists(mod):
path = [os.path.dirname(mod)]
mod = os.path.basename(mod)
if mod.lower().endswith(".py"):
mod = mod[:-3]
else:
path = []
dict = readmodule_ex(mod, path)
objs = dict.values()
objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
getattr(b, 'lineno', 0)))
for obj in objs:
if isinstance(obj, Class):
print "class", obj.name, obj.super, obj.lineno
methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
for name, lineno in methods:
if name != "__path__":
print " def", name, lineno
elif isinstance(obj, Function):
print "def", obj.name, obj.lineno
if __name__ == "__main__":
_main()
| gpl-2.0 | -6,879,441,408,613,261,000 | -967,754,478,496,443,300 | 37.918605 | 76 | 0.51225 | false |