text
stringlengths 4
1.02M
| meta
dict |
---|---|
import inspect
import os
from oslo_utils import importutils
import osprofiler.opts as profiler
import six.moves as six
from nova import conf
from nova import test
class TestProfiler(test.NoDBTestCase):
def test_all_public_methods_are_traced(self):
# NOTE(rpodolyaka): osprofiler only wraps class methods when option
# CONF.profiler.enabled is set to True and the default value is False,
# which means in our usual test run we use original, not patched
# classes. In order to test, that we actually properly wrap methods
# we are interested in, this test case sets CONF.profiler.enabled to
# True and reloads all the affected Python modules (application of
# decorators and metaclasses is performed at module import time).
# Unfortunately, this leads to subtle failures of other test cases
# (e.g. super() is performed on a "new" version of a class instance
# created after a module reload while the class name is a reference to
# an "old" version of the class). Thus, this test is run in isolation.
if not os.getenv('TEST_OSPROFILER', False):
self.skipTest('TEST_OSPROFILER env variable is not set. '
'Skipping osprofiler tests...')
# reinitialize the metaclass after enabling osprofiler
profiler.set_defaults(conf.CONF)
self.flags(enabled=True, group='profiler')
six.reload_module(importutils.import_module('nova.manager'))
classes = [
'nova.compute.api.API',
'nova.compute.manager.ComputeManager',
'nova.compute.rpcapi.ComputeAPI',
'nova.conductor.manager.ComputeTaskManager',
'nova.conductor.manager.ConductorManager',
'nova.conductor.rpcapi.ComputeTaskAPI',
'nova.conductor.rpcapi.ConductorAPI',
'nova.image.api.API',
'nova.network.neutron.ClientWrapper',
'nova.scheduler.manager.SchedulerManager',
'nova.scheduler.rpcapi.SchedulerAPI',
'nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
'nova.virt.libvirt.volume.volume.LibvirtBaseVolumeDriver',
]
for clsname in classes:
# give the metaclass and trace_cls() decorator a chance to patch
# methods of the classes above
six.reload_module(
importutils.import_module(clsname.rsplit('.', 1)[0]))
cls = importutils.import_class(clsname)
for attr, obj in cls.__dict__.items():
# only public methods are traced
if attr.startswith('_'):
continue
# only checks callables
if not (inspect.ismethod(obj) or inspect.isfunction(obj)):
continue
# osprofiler skips static methods
if isinstance(obj, staticmethod):
continue
self.assertTrue(getattr(obj, '__traced__', False), obj)
| {
"content_hash": "f7325e79f9fe84664600a290e8d2288d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 45.08955223880597,
"alnum_prop": 0.6262826878517047,
"repo_name": "rahulunair/nova",
"id": "0d2ccf17879a851d6a68632c8650e025c75af65b",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/test_profiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
} |
import rospy
import actionlib
from vigir_synthesis_msgs.msg import *
import ltl_compilation_client
import ltl_synthesis_client
import sm_generate_client
class BehaviorSynthesisActionServer(object):
'''ROS Action server that handles the following processes:
* LTL Specification Compilation
* LTL Synthesis (resulting in an automaton)
* State Machine Generation/Instantiation
Depending on the synthesis request's options, all
or a subset of the above step will be carried out.
'''
# Messages that are used to publish feedback/result
_feedback = BehaviorSynthesisFeedback()
_result = BehaviorSynthesisResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name,
BehaviorSynthesisAction,
execute_cb = self.execute_cb,
auto_start = False)
self._as.start()
def execute_cb(self, goal):
'''The code to be executed when a BehaviorSynthesisActionGoal is received.'''
r = rospy.Rate(1) # FIX: What should this be?
success = True # start optimistically
# Acknowledge goal reception
self.set_and_publish_feedback("Received behavior synthesis request.")
# Examine the goal message
synthesis_goal = goal.request
synthesis_options = goal.synthesis_options
#TODO: receive a callback when a preempt request is received
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._as.set_preempted()
success = False
if success:
# Request LTL Specification Compilation from the corresponding server
# and also update and publish the appropriate feedback
ltl_spec, error_code_value, success = self.handle_ltl_specification_request(synthesis_goal)
if success:
# Request LTL Synthesis from the corresponding server
# and also update and publish the appropriate feedback
automaton, error_code_value, success = self.handle_ltl_synthesis_request(ltl_spec, synthesis_goal.name)
if success:
# Request State Machine Generation from the corresponding server
# and also update and publish the appropriate feedback
# TODO: how to get the the yaml_config file?
sm, error_code_value, success = self.handle_sm_generation_request(automaton, synthesis_goal.system)
if success:
self._result.error_code = SynthesisErrorCodes(SynthesisErrorCodes.SUCCESS)
self._result.states = sm
rospy.loginfo('\033[92m%s: Succeeded\033[0m' % self._action_name)
self._as.set_succeeded(self._result)
else:
self._result.error_code = SynthesisErrorCodes(error_code_value)
rospy.logerr('%s: Failed' % self._action_name)
self._as.set_aborted(self._result)
def handle_ltl_specification_request(self, synthesis_goal):
'''
Makes a LTL Specification Compilation request
to the corresponding service and handles the response.
synthesis_goal: BehaviorSynthesisRequest A partial specification.
'''
system = synthesis_goal.system
goals = synthesis_goal.goal.replace(' ', '').split(',')
ics = synthesis_goal.initial_condition.replace(' ', '').split(',')
sm_outcomes = synthesis_goal.sm_outcomes
custom_ltl = synthesis_goal.ltl_specification #TODO: Handle this field
response = ltl_compilation_client.ltl_compilation_client(system,
goals,
ics,
sm_outcomes)
# Update success and publish feedback based on response
if response.error_code.value is SynthesisErrorCodes.SUCCESS:
self.set_and_publish_feedback("Received LTL specification")
success = True
else:
self.set_and_publish_feedback("Did not receive LTL specification")
success = False
return response.ltl_specification, response.error_code.value, success
def handle_ltl_synthesis_request(self, ltl_spec, path_name):
'''
Makes a LTL Synthesis request
to the corresponding service and handles the response.
ltl_spec: LTLSpecification A complete LTL specification.
'''
response = ltl_synthesis_client.ltl_synthesis_client(ltl_spec, path_name)
if not response:
self.set_and_publish_feedback("The LTL Synthesis service failed!")
success = False
return None, SynthesisErrorCodes.SYNTHESIS_FAILED, success
if response.synthesizable:
self.set_and_publish_feedback("The LTL Specification is synthesizable")
success = True
else:
self.set_and_publish_feedback("The LTL Specification is unsynthesizable")
success = False
return response.automaton, response.error_code.value, success
def handle_sm_generation_request(self, synthesized_automata, system):
'''
Generate State Machine definitions for a given
robotic system based on a synthesized automaton.
@param synthesized_automata FSAutomaton The automaton to instantiate as a SM.
@param system string System name. e.g. "atlas"
'''
response = sm_generate_client.sm_generate_client(synthesized_automata, system)
# Update success and publish feedback based on response
if response.error_code.value is SynthesisErrorCodes.SUCCESS:
self.set_and_publish_feedback("Generated State Machine definitions")
success = True
else:
self.set_and_publish_feedback("Unable to generate State Machine.")
success = False
return response.state_definition, response.error_code.value, success
def set_and_publish_feedback(self, status):
'''Helper method for updating and publishing feedback.'''
self._feedback.status = status
self._as.publish_feedback(self._feedback)
if __name__ == '__main__':
rospy.init_node('vigir_behavior_synthesis')
BehaviorSynthesisActionServer(rospy.get_name())
rospy.spin()
| {
"content_hash": "951d9c18b45cd52c8ba04a2ac4d0346c",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 115,
"avg_line_length": 41.025,
"alnum_prop": 0.6256855575868373,
"repo_name": "team-vigir/vigir_behavior_synthesis",
"id": "72c9fabe8496c24eecb53f7e50536a773b710542",
"size": "6588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vigir_synthesis_manager/src/vigir_synthesis_manager/behavior_synthesis_server.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "5964"
},
{
"name": "Python",
"bytes": "192231"
},
{
"name": "Shell",
"bytes": "1820"
}
],
"symlink_target": ""
} |
"""
Tests for L{txdav.xml.base}.
"""
from twisted.trial.unittest import TestCase
from txdav.xml.base import decodeXMLName, encodeXMLName
from txdav.xml.base import WebDAVUnknownElement
from txdav.xml.parser import WebDAVDocument
class NameEncodeTests(TestCase):
"""
Name encoding tests.
"""
def test_decodeXMLName(self):
# Empty name
self.assertRaises(ValueError, decodeXMLName, "")
self.assertRaises(ValueError, decodeXMLName, "{}")
self.assertRaises(ValueError, decodeXMLName, "{x}")
# Weird bracket cases
self.assertRaises(ValueError, decodeXMLName, "{")
self.assertRaises(ValueError, decodeXMLName, "x{")
self.assertRaises(ValueError, decodeXMLName, "{x")
self.assertRaises(ValueError, decodeXMLName, "}")
self.assertRaises(ValueError, decodeXMLName, "x}")
self.assertRaises(ValueError, decodeXMLName, "}x")
self.assertRaises(ValueError, decodeXMLName, "{{}")
self.assertRaises(ValueError, decodeXMLName, "{{}}")
self.assertRaises(ValueError, decodeXMLName, "x{}")
# Empty namespace is OK
self.assertEquals(decodeXMLName("x"), (None, "x"))
self.assertEquals(decodeXMLName("{}x"), (None, "x"))
# Normal case
self.assertEquals(decodeXMLName("{namespace}name"), ("namespace", "name"))
def test_encodeXMLName(self):
# No namespace
self.assertEquals(encodeXMLName(None, "name"), "name")
self.assertEquals(encodeXMLName("", "name"), "name")
# Normal case
self.assertEquals(encodeXMLName("namespace", "name"), "{namespace}name")
class WebDAVElementTestsMixin:
"""
Mixin for L{TestCase}s which test a L{WebDAVElement} subclass.
"""
def test_fromString(self):
"""
The XML representation of L{WebDAVDocument} can be parsed into a
L{WebDAVDocument} instance using L{WebDAVDocument.fromString}.
"""
doc = WebDAVDocument.fromString(self.serialized)
self.assertEquals(doc, WebDAVDocument(self.element))
def test_toxml(self):
"""
L{WebDAVDocument.toxml} returns a C{str} giving the XML representation
of the L{WebDAVDocument} instance.
"""
document = WebDAVDocument(self.element)
self.assertEquals(
document,
WebDAVDocument.fromString(document.toxml()))
class WebDAVUnknownElementTests(WebDAVElementTestsMixin, TestCase):
"""
Tests for L{WebDAVUnknownElement}.
"""
serialized = (
"""<?xml version="1.0" encoding="utf-8" ?>"""
"""<T:foo xmlns:T="http://twistedmatrix.com/"/>"""
)
element = WebDAVUnknownElement.withName(
"http://twistedmatrix.com/",
"foo"
)
| {
"content_hash": "9bdaf273ba2e4356ddda0473709bf69c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 82,
"avg_line_length": 32.63529411764706,
"alnum_prop": 0.6434751261715934,
"repo_name": "macosforge/ccs-calendarserver",
"id": "78b154687db422eff69cc0a164bd0a26ed94e6fd",
"size": "3946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txdav/xml/test/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TokenValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="token", parent_name="table.stream", **kwargs):
super(TokenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "info"),
strict=kwargs.pop("strict", True),
**kwargs
)
| {
"content_hash": "522c262a862db1d35037610e32875460",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 38.42857142857143,
"alnum_prop": 0.5910780669144982,
"repo_name": "plotly/python-api",
"id": "dd775062137cf463d3ff6cd3bb10b845a212cc0b",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/table/stream/_token.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from binwalk.core.compat import *
from binwalk.core.common import BlockFile
from binwalk.core.module import Module, Option, Kwarg
class Plotter(Module):
'''
Base class for visualizing binaries in Qt.
Other plotter classes are derived from this.
'''
VIEW_DISTANCE = 1024
MAX_2D_PLOT_POINTS = 12500
MAX_3D_PLOT_POINTS = 25000
TITLE = "Binary Visualization"
CLI = [
Option(short='3',
long='3D',
kwargs={'axis': 3, 'enabled': True},
description='Generate a 3D binary visualization'),
Option(short='2',
long='2D',
kwargs={'axis': 2, 'enabled': True},
description='Project data points onto 3D cube walls only'),
Option(short='V',
long='points',
type=int,
kwargs={'max_points': 0},
description='Set the maximum number of plotted data points'),
# Option(short='V',
# long='grids',
# kwargs={'show_grids' : True},
# description='Display the x-y-z grids in the resulting plot'),
]
KWARGS = [
Kwarg(name='axis', default=3),
Kwarg(name='max_points', default=0),
Kwarg(name='show_grids', default=False),
Kwarg(name='enabled', default=False),
]
# There isn't really any useful data to print to console. Disable header
# and result output.
HEADER = None
RESULT = None
def init(self):
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtGui
self.verbose = self.config.verbose
self.offset = self.config.offset
self.length = self.config.length
self.plane_count = -1
self.plot_points = None
if self.axis == 2:
self.MAX_PLOT_POINTS = self.MAX_2D_PLOT_POINTS
self._generate_data_point = self._generate_2d_data_point
elif self.axis == 3:
self.MAX_PLOT_POINTS = self.MAX_3D_PLOT_POINTS
self._generate_data_point = self._generate_3d_data_point
else:
raise Exception(
"Invalid Plotter axis specified: %d. Must be one of: [2,3]" % self.axis)
if not self.max_points:
self.max_points = self.MAX_PLOT_POINTS
self.app = QtGui.QApplication([])
self.window = gl.GLViewWidget()
self.window.opts['distance'] = self.VIEW_DISTANCE
if len(self.config.target_files) == 1:
self.window.setWindowTitle(self.config.target_files[0])
def _print(self, message):
'''
Print console messages. For internal use only.
'''
if self.verbose:
print(message)
def _generate_plot_points(self, data_points):
'''
Generates plot points from a list of data points.
@data_points - A dictionary containing each unique point and its frequency of occurance.
Returns a set of plot points.
'''
total = 0
min_weight = 0
weightings = {}
plot_points = {}
# If the number of data points exceeds the maximum number of allowed data points, use a
# weighting system to eliminate data points that occur less freqently.
if sum(data_points.values()) > self.max_points:
# First, generate a set of weight values 1 - 10
for i in range(1, 11):
weightings[i] = 0
# Go through every data point and how many times that point occurs
for (point, count) in iterator(data_points):
# For each data point, compare it to each remaining weight
# value
for w in get_keys(weightings):
# If the number of times this data point occurred is >= the weight value,
# then increment the weight value. Since weight values are ordered lowest
# to highest, this means that more frequent data points also increment lower
# weight values. Thus, the more high-frequency data points there are, the
# more lower-frequency data points are eliminated.
if count >= w:
weightings[w] += 1
else:
break
# Throw out weight values that exceed the maximum number of
# data points
if weightings[w] > self.max_points:
del weightings[w]
# If there's only one weight value left, no sense in continuing
# the loop...
if len(weightings) == 1:
break
# The least weighted value is our minimum weight
min_weight = min(weightings)
# Get rid of all data points that occur less frequently than our
# minimum weight
for point in get_keys(data_points):
if data_points[point] < min_weight:
del data_points[point]
for point in sorted(data_points, key=data_points.get, reverse=True):
plot_points[point] = data_points[point]
# Register this as a result in case future modules need access to the raw point information,
# but mark plot as False to prevent the entropy module from
# attempting to overlay this data on its graph.
self.result(point=point, plot=False)
total += 1
if total >= self.max_points:
break
return plot_points
def _generate_data_point(self, data):
'''
Subclasses must override this to return the appropriate data point.
@data - A string of data self.axis in length.
Returns a data point tuple.
'''
return (0, 0, 0)
def _generate_data_points(self, fp):
'''
Generates a dictionary of data points and their frequency of occurrance.
@fp - The BlockFile object to generate data points from.
Returns a dictionary.
'''
i = 0
data_points = {}
self._print("Generating data points for %s" % fp.name)
# We don't need any extra data from BlockFile
fp.set_block_size(peek=0)
while True:
(data, dlen) = fp.read_block()
if not data or not dlen:
break
i = 0
while (i + (self.axis - 1)) < dlen:
point = self._generate_data_point(data[i:i + self.axis])
if has_key(data_points, point):
data_points[point] += 1
else:
data_points[point] = 1
i += 3
return data_points
def _generate_plot(self, plot_points):
import numpy as np
import pyqtgraph.opengl as gl
nitems = float(len(plot_points))
pos = np.empty((nitems, 3))
size = np.empty((nitems))
color = np.empty((nitems, 4))
i = 0
for (point, weight) in iterator(plot_points):
r = 0.0
g = 0.0
b = 0.0
pos[i] = point
frequency_percentage = (weight / nitems)
# Give points that occur more frequently a brighter color and larger point size.
# Frequency is determined as a percentage of total unique data
# points.
if frequency_percentage > .010:
size[i] = .20
r = 1.0
elif frequency_percentage > .005:
size[i] = .15
b = 1.0
elif frequency_percentage > .002:
size[i] = .10
g = 1.0
r = 1.0
else:
size[i] = .05
g = 1.0
color[i] = (r, g, b, 1.0)
i += 1
scatter_plot = gl.GLScatterPlotItem(
pos=pos, size=size, color=color, pxMode=False)
scatter_plot.translate(-127.5, -127.5, -127.5)
return scatter_plot
def plot(self, wait=True):
import pyqtgraph.opengl as gl
self.window.show()
if self.show_grids:
xgrid = gl.GLGridItem()
ygrid = gl.GLGridItem()
zgrid = gl.GLGridItem()
self.window.addItem(xgrid)
self.window.addItem(ygrid)
self.window.addItem(zgrid)
# Rotate x and y grids to face the correct direction
xgrid.rotate(90, 0, 1, 0)
ygrid.rotate(90, 1, 0, 0)
# Scale grids to the appropriate dimensions
xgrid.scale(12.8, 12.8, 12.8)
ygrid.scale(12.8, 12.8, 12.8)
zgrid.scale(12.8, 12.8, 12.8)
for fd in iter(self.next_file, None):
data_points = self._generate_data_points(fd)
self._print("Generating plot points from %d data points" %
len(data_points))
self.plot_points = self._generate_plot_points(data_points)
del data_points
self._print("Generating graph from %d plot points" %
len(self.plot_points))
self.window.addItem(self._generate_plot(self.plot_points))
if wait:
self.wait()
def wait(self):
from pyqtgraph.Qt import QtCore, QtGui
t = QtCore.QTimer()
t.start(50)
QtGui.QApplication.instance().exec_()
def _generate_3d_data_point(self, data):
'''
Plot data points within a 3D cube.
'''
return (ord(data[0]), ord(data[1]), ord(data[2]))
def _generate_2d_data_point(self, data):
'''
Plot data points projected on each cube face.
'''
self.plane_count += 1
if self.plane_count > 5:
self.plane_count = 0
if self.plane_count == 0:
return (0, ord(data[0]), ord(data[1]))
elif self.plane_count == 1:
return (ord(data[0]), 0, ord(data[1]))
elif self.plane_count == 2:
return (ord(data[0]), ord(data[1]), 0)
elif self.plane_count == 3:
return (255, ord(data[0]), ord(data[1]))
elif self.plane_count == 4:
return (ord(data[0]), 255, ord(data[1]))
elif self.plane_count == 5:
return (ord(data[0]), ord(data[1]), 255)
def run(self):
self.plot()
return True
| {
"content_hash": "9002d7a0c9a0c4c4607220a9fd57d27c",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 104,
"avg_line_length": 32.73354231974922,
"alnum_prop": 0.5315073740662708,
"repo_name": "sundhaug92/binwalk",
"id": "7fa1abbed5cc52c283ce85171f83d4e73f5cac39",
"size": "10489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/binwalk/modules/binvis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301375"
},
{
"name": "Shell",
"bytes": "6291"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
import re
import ast
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('qtk/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
BASE_PATH = os.path.dirname(__file__)
def get_requirements(suffix=''):
with open(os.path.join(BASE_PATH, 'Requirements%s.txt' % suffix)) as f:
rv = f.read().splitlines()
return rv
setup(
name='qtk',
version=version,
url='https://github.com/gouthambs/qtk-python',
license='MIT',
author='Goutham Balaraman',
author_email='gouthaman.balaraman@gmail.com',
description='A QuantLib Python ToolKit',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=get_requirements(),
tests_require=["nose"],
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | {
"content_hash": "0dd53ef073a9d5f36abccd15ab15f6e8",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 29.76,
"alnum_prop": 0.6223118279569892,
"repo_name": "gouthambs/qtk-python",
"id": "6885ae19c29c67677ee60a663ce448fe742b4380",
"size": "1488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140473"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/smallest-subtree-with-all-the-deepest-nodes/
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from common.list_to_tree_node import listToTreeNode
from common.tree_node_to_list import treeNodeToList
class Solution:
def subtreeWithAllDeepest(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
stack = [root]
while len(stack) > 1 or (stack[0].left and (stack[0].left.left or stack[0].left.right)) or (stack[0].right and (stack[0].right.left or stack[0].right.right)):
node = stack.pop(0)
if node.left and (node.left.left or node.left.right):
stack.append(node.left)
if node.right and (node.right.left or node.right.right):
stack.append(node.right)
return stack[0]
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(treeNodeToList(solution.subtreeWithAllDeepest(
listToTreeNode([3, 5, 1, 6, 2, 0, 8, None, None, 7, 4])
)), [2, 7, 4])
self.assertEqual(treeNodeToList(solution.subtreeWithAllDeepest(
listToTreeNode([0, 2, 1, None, None, 3])
)), [1, 3])
# self.assertEqual(treeNodeToList(solution.subtreeWithAllDeepest(
# listToTreeNode([0, 1, 3, None, 2])
# )), [2])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a432bbbcd55f70966ca09a0bd52a8401",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 166,
"avg_line_length": 29.884615384615383,
"alnum_prop": 0.5926640926640927,
"repo_name": "vivaxy/algorithms",
"id": "20f1e906233296ed37671135a3d119de8f5825ba",
"size": "1554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/smallest_subtree_with_all_the_deepest_nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "130225"
},
{
"name": "Python",
"bytes": "272982"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import numpy as np
import copy
def combine_predictions(Predictions, predictions_list, index_list):
"""Combine predictions predictions_list[index_list].
Parameters
----------
Predictions : a Predictions type
Needed to call combine.
predictions_list : list of instances of Predictions
index_list : list of integers
Indices of the submissions to combine (possibly with replacement).
Returns
-------
combined_predictions : instance of Predictions
"""
predictions_list_to_combine = [predictions_list[i] for i in index_list]
combined_predictions = Predictions.combine(predictions_list_to_combine)
return combined_predictions
def get_score_cv_bags(score_type, predictions_list, ground_truths,
test_is_list=None):
"""
Compute the bagged scores of the predictions in predictions_list.
test_is_list (list of list of integer indexes) controls which points
in which fold participate in the combination. We return the
combined predictions and a list of scores, where each element i is the
score of the combination of the first i+1 folds.
Parameters
----------
score_type : instance implementing BaseScoreType signature
predictions_list : list of instances of Predictions
ground_truths : instance of Predictions
test_is_list : list of list of integers
Indices of points that should be bagged in each prediction. If None,
the full prediction vectors will be bagged.
Returns
-------
combined_predictions : instance of Predictions
score_cv_bags : list of scores (typically floats)
"""
Predictions = type(ground_truths)
if test_is_list is None: # we combine the full list
test_is_list = [range(len(predictions.y_pred))
for predictions in predictions_list]
y_comb = np.array(
[Predictions(n_samples=len(ground_truths.y_pred))
for _ in predictions_list])
score_cv_bags = []
for i, test_is in enumerate(test_is_list):
# setting valid fold indexes of points to be combined
y_comb[i].set_valid_in_train(predictions_list[i], test_is)
# combine first i folds
combined_predictions = combine_predictions(
Predictions, y_comb[:i + 1], range(i + 1))
# get indexes of points with at least one prediction in
# combined_predictions
valid_indexes = combined_predictions.valid_indexes
# set valid slices in ground truth and predictions
ground_truths_local = copy.deepcopy(ground_truths)
ground_truths_local.set_slice(valid_indexes)
combined_predictions.set_slice(valid_indexes)
# score the combined predictions
score_of_prefix = score_type.score_function(
ground_truths_local, combined_predictions)
score_cv_bags.append(score_of_prefix)
# Alex' old suggestion: maybe use masked arrays rather than passing
# valid_indexes
# TODO: will crash if len(test_is_list) == 0
return combined_predictions, score_cv_bags
def _get_next_best_submission(predictions_list, ground_truths,
score_type, best_index_list,
min_improvement=0.0):
"""Find net best submission if added to predictions_list[best_index_list].
Find the model that minimizes the score if added to
predictions_list[best_index_list] using score_type.score_function.
If there is no model improving the input
combination, the input best_index_list is returned. Otherwise the index of
the best model is added to the list. We could also return the combined
prediction (for efficiency, so the combination would not have to be done
each time; right now the algo is quadratic), but I don't think any
meaningful rule will be associative, in which case we should redo the
combination from scratch each time the set changes. Since mostly
combination = mean, we could maintain the sum and the number of models, but
it would be a bit bulky. We'll see how this evolves.
Parameters
----------
predictions_list : list of instances of Predictions
Each element of the list is an instance of Predictions of a model
on the same (cross-validation valid) data points.
score_type : instance implementing BaseScoreType signature
The score to improve by adding one submission to the ensemble.
ground_truths : instance of Predictions
The ground truth.
best_index_list : list of integers
Indices of the current best model.
Returns
-------
best_index_list : list of integers
Indices of the models in the new combination. If the same as input,
no models were found improving the score.
"""
Predictions = type(ground_truths)
best_predictions = combine_predictions(
Predictions, predictions_list, best_index_list)
best_score = score_type.score_function(ground_truths, best_predictions)
best_index = -1
# Combination with replacement, what Caruana suggests. Basically, if a
# model is added several times, it's upweighted, leading to
# integer-weighted ensembles.
r = np.arange(len(predictions_list))
# Randomization doesn't matter, only in case of exact equality.
# np.random.shuffle(r)
# print r
for i in r:
# try to append the ith prediction to the current best predictions
new_index_list = np.append(best_index_list, i)
combined_predictions = combine_predictions(
Predictions, predictions_list, new_index_list)
new_score = score_type.score_function(
ground_truths, combined_predictions)
if score_type.is_lower_the_better:
is_improved = new_score < best_score - min_improvement
else:
is_improved = new_score > best_score + min_improvement
if is_improved:
best_index = i
best_score = new_score
if best_index > -1:
return np.append(best_index_list, best_index), best_score
else:
return best_index_list, best_score
def blend_on_fold(predictions_list, ground_truths_valid, score_type,
max_n_ensemble=80, min_improvement=0.0):
"""Construct the best model combination on a single fold.
Using greedy forward selection with replacement. See
http://www.cs.cornell.edu/~caruana/ctp/ct.papers/
caruana.icml04.icdm06long.pdf.
Then sets foldwise contributivity.
Parameters
----------
force_ensemble : boolean
To force include deleted models
"""
# The submissions must have is_to_ensemble set to True. It is for
# fogetting models. Users can also delete models in which case
# we make is_valid false. We then only use these models if
# force_ensemble is True.
# We can further bag here which should be handled in config (or
# ramp table.) Or we could bag in get_next_best_single_fold
if len(predictions_list) == 0:
return None, None, None, None
valid_scores = [score_type.score_function(ground_truths_valid, predictions)
for predictions in predictions_list]
if score_type.is_lower_the_better:
best_prediction_index = np.argmin(valid_scores)
else:
best_prediction_index = np.argmax(valid_scores)
score = valid_scores[best_prediction_index]
best_index_list = np.array([best_prediction_index])
is_improved = True
while is_improved and len(best_index_list) < max_n_ensemble:
print('\t{}: {}'.format(best_index_list, score))
old_best_index_list = best_index_list
best_index_list, score = _get_next_best_submission(
predictions_list, ground_truths_valid, score_type, best_index_list,
min_improvement)
is_improved = len(best_index_list) != len(old_best_index_list)
return best_index_list
# we share a unit of 1. among the contributive submissions
# unit_contributivity = 1. / len(best_index_list)
# for i in best_index_list:
# selected_submissions_on_fold[i].contributivity +=\
# unit_contributivity
# combined_predictions = combine_predictions_list(
# predictions_list, index_list=best_index_list)
# best_predictions = predictions_list[best_index_list[0]]
# test_predictions_list = [
# submission_on_fold.test_predictions
# for submission_on_fold in selected_submissions_on_fold
# ]
# if any(test_predictions_list) is None:
# logger.error("Can't compute combined test score," +
# " some submissions are untested.")
# combined_test_predictions = None
# best_test_predictions = None
# else:
# combined_test_predictions = combine_predictions_list(
# test_predictions_list, index_list=best_index_list)
# best_test_predictions = test_predictions_list[best_index_list[0]]
# return combined_predictions, best_predictions,\
# combined_test_predictions, best_test_predictions
| {
"content_hash": "15349d96ad12c4e4c98adbe70874f8e9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 43.13809523809524,
"alnum_prop": 0.6692791698863009,
"repo_name": "paris-saclay-cds/ramp-workflow",
"id": "dac4b5fe60c64543268e9a43ef5542a0d75a750b",
"size": "9059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rampwf/utils/combine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "115957"
},
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "354774"
},
{
"name": "Shell",
"bytes": "3960"
}
],
"symlink_target": ""
} |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import gflags as flags
FLAGS = flags.FLAGS
def format_message(record):
try:
record_message = '%s' % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: 'F', # FATAL is alias of CRITICAL
logging.ERROR: 'E',
logging.WARN: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D'
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = '?'
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = '%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s' % (
level, date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min,
date.tm_sec, date_usec,
record.process if record.process is not None else '?????',
record.filename,
record.lineno,
format_message(record))
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
logger = logging.getLogger()
handler = logging.StreamHandler()
def setLevel(newlevel):
logger.setLevel(newlevel)
logger.debug('Log level set to %s', newlevel)
def init():
setLevel(FLAGS.verbosity)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: 'DEBUG',
INFO: 'INFO',
WARN: 'WARN',
ERROR: 'ERROR',
FATAL: 'FATAL'
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
""") % ''.join(_level_letters)
"""Regex you can use to parse glog line prefixes."""
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
class CaptureWarningsFlag(flags.BooleanFlag):
def __init__(self):
flags.BooleanFlag.__init__(self, 'glog_capture_warnings', True,
"Redirect warnings to log.warn messages")
def Parse(self, arg):
flags.BooleanFlag.Parse(self, arg)
logging.captureWarnings(self.value)
flags.DEFINE_flag(CaptureWarningsFlag())
class VerbosityParser(flags.ArgumentParser):
"""Sneakily use gflags parsing to get a simple callback."""
def Parse(self, arg):
try:
intarg = int(arg)
# Look up the name for this level (DEBUG, INFO, etc) if it exists
try:
level = logging._levelNames.get(intarg, intarg)
except AttributeError: # This was renamed somewhere b/w 2.7 and 3.4
level = logging._levelToName.get(intarg, intarg)
except ValueError:
level = arg
setLevel(level)
return level
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name='verbosity',
default=logging.INFO,
help='Logging verbosity')
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = logger.makeRecord('CRITICAL', 50, filename, line_num,
message, None, None)
handler.handle(log_record)
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
'Check failed here:', None, None)
handler.handle(log_record)
for line in stacktrace_lines:
log_record = logger.makeRecord('DEBUG', 10, filename, line_num,
line, None, None)
handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| {
"content_hash": "cbdbd28ed3ef4a0c82896ae17e713d5c",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 90,
"avg_line_length": 28.663967611336034,
"alnum_prop": 0.5997175141242937,
"repo_name": "benley/python-glog",
"id": "1aa0f75fe4390a025e4d19fb2e9b105b050d994e",
"size": "7080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glog.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9890"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
import os
from ardublocklyserver.py23 import py23
from ardublocklyserver.compilersettings import ServerCompilerSettings
class SketchCreator(object):
"""
Creates an Arduino Sketch.
"""
#
# Metaclass methods
#
def __init__(self):
# Default sketch, blink builtin LED
self._sketch_default_code = \
'int led = 13;\n' \
'void setup() {\n' \
' pinMode(led, OUTPUT);\n' \
'}\n' \
'void loop() {\n' \
' digitalWrite(led, HIGH);\n' \
' delay(1000);\n' \
' digitalWrite(led, LOW);\n' \
' delay(1000);\n' \
'}\n'
#
# Creating files
#
def create_sketch(self, sketch_code=None):
"""
Creates the Arduino sketch with either the default blinky
code or the code defined in the input parameter.
:param sketch_code: Unicode string with the code for the sketch.
:return: Unicode string with full path to the sketch file
Return None indicates an error has occurred.
"""
sketch_path = self.build_sketch_path()
if isinstance(sketch_code, py23.string_type_compare)\
and sketch_code:
code_to_write = sketch_code
else:
code_to_write = self._sketch_default_code
try:
arduino_sketch = open(sketch_path, 'w')
arduino_sketch.write(code_to_write)
arduino_sketch.close()
except Exception as e:
sketch_path = None
print(e)
print('Arduino sketch could not be created!!!')
return sketch_path
#
# File and directories settings
#
def build_sketch_path(self):
"""
If a valid directory is saved in the settings, it creates the Arduino
folder (if it does not exists already) and returns a string pointing
to the sketch path
:return: unicode string with full path to the sketch file
Return None indicates an error has occurred
"""
sketch_name = ServerCompilerSettings().sketch_name
sketch_directory = ServerCompilerSettings().sketch_dir
sketch_path = None
if os.path.isdir(sketch_directory):
sketch_path = os.path.join(sketch_directory, sketch_name)
if not os.path.exists(sketch_path):
os.makedirs(sketch_path)
sketch_path = os.path.join(sketch_path, sketch_name + '.ino')
else:
print('The sketch directory in the settings does not exists!')
return sketch_path
| {
"content_hash": "9be1007533af577944ef714f8bff19fb",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 32.51807228915663,
"alnum_prop": 0.5739162652834383,
"repo_name": "adrgerez/ardublockly",
"id": "2a394cbb2ea452968fe6ce76847cd12c89cd2b46",
"size": "2975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ardublocklyserver/sketchcreator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31908"
},
{
"name": "HTML",
"bytes": "128755"
},
{
"name": "JavaScript",
"bytes": "3295283"
},
{
"name": "NSIS",
"bytes": "3760"
},
{
"name": "Python",
"bytes": "268060"
}
],
"symlink_target": ""
} |
from window import App
app = App()
app.updatemsg("startup")
app.run()
app.root.destroy()
| {
"content_hash": "dd574ebd716ea177a9e8b15e133bd20c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 24,
"avg_line_length": 15,
"alnum_prop": 0.7111111111111111,
"repo_name": "brucelau-github/raspberry-pi-proj",
"id": "9c512a419aa71ee8ddf0e11ec71b28cada618c27",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wintest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50929"
}
],
"symlink_target": ""
} |
"""Dummy ProvPy ``prov-compare`` which mimics the behaviour of
ProvPy ``prov-compare``.
``prov-compare`` returns 2 if:
- No files.
- Files are not valid PROV documents.
``prov-compare`` returns 1 if:
- Files are valid PROV documents but not equivalent.
``prov-compare`` returns 0 if:
- Files are valid PROV documents and are equivalent.
This script behaves similarly (though it does no PROV validation).
If the inputs and formats are valid and the file names have
the same contents then it returns 0 else it returns 1
Usage::
usage: prov_compare_dummy.py [-h] -f [FORMAT] infile outfile
Dummy ProvPy prov-compare.
positional arguments:
infile Input file
outfile Output file
optional arguments:
-h, --help show this help message and exit
-f [FORMAT] Output format - one of provn, xml, json
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import filecmp
import os
import shutil
import sys
def compare(file1, format1, file2, format2):
"""
Mimic `prov-compare` behaviour.
:param file1: File
:type file1: str or unicode
:param format1: `file1` format
:type format1: str or unicode
:param file2: File
:type file2: str or unicode
:param format2: `file2` format
:type format2: str or unicode
"""
for file_name in [file1, file2]:
if not os.path.isfile(file_name):
print(("No such file " + file_name))
sys.exit(2)
formats = ["xml", "json"]
for format in [format1, format2]:
if format not in formats:
# Unsupported format
sys.exit(2)
if not filecmp.cmp(file1, file2, shallow=False):
# Documents do not match
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Dummy ProvPy prov_compare.")
parser.add_argument("-f", metavar="FORMAT1",
help="File 1 format - one of xml, json",
nargs="?",
required=True)
parser.add_argument("-F", metavar="FORMAT2",
help="File 2 format - one of xml, json",
nargs="?",
required=True)
parser.add_argument("file1", help="File 1")
parser.add_argument("file2", help="File 2")
args = parser.parse_args()
compare(args.file1, args.f, args.file2, args.F)
sys.exit(0)
| {
"content_hash": "5c91fea115b01592bd034e72cee26f5b",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 76,
"avg_line_length": 32.74766355140187,
"alnum_prop": 0.6786529680365296,
"repo_name": "prov-suite/interop-test-harness",
"id": "528c98169572cf958f01ddb153657eee65cdd96e",
"size": "3504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prov_interop/tests/provpy/prov_compare_dummy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7256"
},
{
"name": "HTML",
"bytes": "4209"
},
{
"name": "Makefile",
"bytes": "8132"
},
{
"name": "Python",
"bytes": "202636"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
} |
"""zest.releaser entry points to support projects using distutils2-like
setup.cfg files. The only actual functionality this adds is to update the
version option in a setup.cfg file, if it exists. If setup.cfg does not exist,
or does not contain a version option, then this does nothing.
TODO: d2to1 theoretically supports using a different filename for setup.cfg;
this does not support that. We could hack in support, though I'm not sure how
useful the original functionality is to begin with (and it might be removed) so
we ignore that for now.
TODO: There exists a proposal
(http://mail.python.org/pipermail/distutils-sig/2011-March/017628.html) to add
a 'version-from-file' option (or something of the like) to distutils2; if this
is added then support for it should be included here as well.
"""
import logging
import os
from .extern.six import moves as m
ConfigParser = m.configparser.ConfigParser
logger = logging.getLogger(__name__)
def update_setupcfg_version(filename, version):
"""Opens the given setup.cfg file, locates the version option in the
[metadata] section, updates it to the new version.
"""
setup_cfg = open(filename).readlines()
current_section = None
updated = False
for idx, line in enumerate(setup_cfg):
m = ConfigParser.SECTCRE.match(line)
if m:
if current_section == 'metadata':
# We already parsed the entire metadata section without finding
# a version line, and are now moving into a new section
break
current_section = m.group('header')
continue
if '=' not in line:
continue
opt, val = line.split('=', 1)
opt, val = opt.strip(), val.strip()
if current_section == 'metadata' and opt == 'version':
setup_cfg[idx] = 'version = %s\n' % version
updated = True
break
if updated:
open(filename, 'w').writelines(setup_cfg)
logger.info("Set %s's version to %r" % (os.path.basename(filename),
version))
def prereleaser_middle(data):
filename = os.path.join(data['workingdir'], 'setup.cfg')
if os.path.exists(filename):
update_setupcfg_version(filename, data['new_version'])
def postreleaser_middle(data):
filename = os.path.join(data['workingdir'], 'setup.cfg')
if os.path.exists(filename):
update_setupcfg_version(filename, data['dev_version'])
| {
"content_hash": "7f01c29a270510da72ee720f8e12f518",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6566506410256411,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "896b6dbddde892313b462ddddde93b96989d5e3d",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/d2to1/d2to1/zestreleaser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from quotations import models
class AuthorAdmin(admin.ModelAdmin):
search_fields = ['name']
class QuotationAdmin(admin.ModelAdmin):
search_fields = ['text', 'author__name']
admin.site.register(models.Author, AuthorAdmin)
admin.site.register(models.Quotation, QuotationAdmin)
| {
"content_hash": "f76028df3aed6f7757306d21ed6ec40b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 21.533333333333335,
"alnum_prop": 0.7647058823529411,
"repo_name": "jessamynsmith/socialjusticebingo",
"id": "eed4ba8820cc0e8165ef8ac560f88f0a23f55b9a",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quotations/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1790"
},
{
"name": "HTML",
"bytes": "3414"
},
{
"name": "Python",
"bytes": "19887"
},
{
"name": "Shell",
"bytes": "763"
}
],
"symlink_target": ""
} |
"""
Environmental Exposures API
Environmental Exposures API
OpenAPI spec version: 1.0.0
Contact: stealey@renci.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.default_api import DefaultApi
class TestDefaultApi(unittest.TestCase):
""" DefaultApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.default_api.DefaultApi()
def tearDown(self):
pass
def test_exposures_exposure_type_coordinates_get(self):
"""
Test case for exposures_exposure_type_coordinates_get
Get exposure location(s) as latitude, longitude coordinates
"""
pass
def test_exposures_exposure_type_dates_get(self):
"""
Test case for exposures_exposure_type_dates_get
Get exposure start date and end date range for exposure type
"""
pass
def test_exposures_exposure_type_scores_get(self):
"""
Test case for exposures_exposure_type_scores_get
Get exposure score for a given environmental factor at exposure location(s)
"""
pass
def test_exposures_exposure_type_values_get(self):
"""
Test case for exposures_exposure_type_values_get
Get exposure value for a given environmental factor at exposure location(s)
"""
pass
def test_exposures_get(self):
"""
Test case for exposures_get
Get list of exposure types
"""
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c09997431f7a2715bf368c6b4bffa613",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 83,
"avg_line_length": 26.870588235294118,
"alnum_prop": 0.670753064798599,
"repo_name": "ResearchSoftwareInstitute/greendatatranslator",
"id": "5bfd93a2dbfd5a4169fff95e51c4cc4c26527ad1",
"size": "2301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/greentranslator/python-client/test/test_default_api.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6248"
},
{
"name": "Makefile",
"bytes": "616"
},
{
"name": "Python",
"bytes": "130346"
},
{
"name": "Shell",
"bytes": "3605"
}
],
"symlink_target": ""
} |
import os
import sys
import MySQLdb
class DatabaseConnection(object):
'''Connect to OLRC's mysql server.'''
def __init__(self):
'''Initiate connection the database. If connection credentials are not
available or connection fails throw exception.'''
try:
self.db = MySQLdb.connect(
host=os.environ["MYSQL_HOST"],
user=os.environ["MYSQL_USER"],
passwd=os.environ["MYSQL_PASSWD"],
db=os.environ["MYSQL_DB"],
charset='utf8',
)
self.cursor = self.db.cursor()
except KeyError:
sys.exit("Please make sure all required environment variables"
" are set:\n$MYSQL_HOST\n$MYSQL_DB\n$MYSQL_USER\n"
"$MYSQL_PASSWD\n")
except MySQLdb.Error, e:
sys.exit("ERROR {0} IN CONNECTION: {1}".format(
e.args[0], e.args[1]
))
def get_cursor(self):
'''Return a cursor for the database.'''
return self.cursor
def create_table(self, table_name):
'''Given a table_name, create a table in the database.
'''
query = "CREATE TABLE {0} ( \
`id` INTEGER NOT NULL AUTO_INCREMENT,\
path VARCHAR(1000),\
uploaded BOOL DEFAULT '0',\
INDEX `path_index` (`id`)\
)".format(table_name)
try:
self.cursor.execute(query)
except MySQLdb.Error, e:
sys.exit("ERROR {0} IN TABLE CREATION: {1}".format(
e.args[0],
e.args[1]
))
def insert_path(self, path, table_name, alt=False):
'''Insert the given path to the table_name. If alt, create
the query with reversed quotes.'''
if not alt:
query = "INSERT INTO {0} (path) VALUES ('{1}');".format(
table_name,
path
)
else:
query = 'INSERT INTO {0} (path) VALUES ("{1}");'.format(
table_name,
path
)
self.cursor.execute(query)
self.db.commit()
def execute_query(self, query):
'''Execute the given query and return the cursor object.'''
try:
self.cursor.execute(query)
self.db.commit()
except MySQLdb.Error, e:
sys.exit("ERROR {0} IN QUERY: {1}\nQuery:{2}".format(
e.args[0],
e.args[1],
query
))
return self.cursor | {
"content_hash": "a9c0c75398be6365999673e3c0909b45",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 29.86046511627907,
"alnum_prop": 0.4976635514018692,
"repo_name": "cudevmaxwell/SwiftBulkUploader",
"id": "2285c130af0b5f85de18d72270c8128861df4b84",
"size": "2568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swiftbulkuploader/olrcdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1736"
},
{
"name": "Python",
"bytes": "39580"
}
],
"symlink_target": ""
} |
from os.path import abspath, dirname, basename, join
try:
import kendoui_backend
except ImportError:
import sys
sys.path.insert(0, "..")
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
ROOT_PATH = abspath(dirname(__file__))
PROJECT_NAME = basename(ROOT_PATH)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'osewojz0pyn8j_&sa#(q&c&p%sj^x&brz($+e+5j@&j_l966o&'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
join(ROOT_PATH, 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'json_utils',
'kendoui_backend',
'app',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "2a1425bdbd13b41d550c48ee7bd62ddd",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 122,
"avg_line_length": 26.041666666666668,
"alnum_prop": 0.62112,
"repo_name": "tnajdek/django-kendoui-backend",
"id": "08b3cf3a28e79fa1950942d2e52539ef9adc8485",
"size": "3125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2266"
},
{
"name": "Python",
"bytes": "60861"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("account", "0001_user")]
operations = [
migrations.AddField(
model_name="user",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time that the user was last updated.",
verbose_name="updated_at",
),
),
migrations.AlterField(
model_name="user",
name="created_at",
field=models.DateTimeField(
auto_now_add=True, verbose_name="created at"
),
),
]
| {
"content_hash": "23f97f59e607dc13e74f4d2535d6be8a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 27.04,
"alnum_prop": 0.5133136094674556,
"repo_name": "knowmetools/km-api",
"id": "650e877a31bd3c342eee395c419b53f1cb497056",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "km_api/account/migrations/0002_user_timestamps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "746"
},
{
"name": "HTML",
"bytes": "7830"
},
{
"name": "JavaScript",
"bytes": "7009"
},
{
"name": "Python",
"bytes": "635776"
},
{
"name": "SCSS",
"bytes": "4662"
},
{
"name": "Shell",
"bytes": "1671"
}
],
"symlink_target": ""
} |
import argparse
import subprocess
PROJECT_VERSION = '0.1.0-rc.5' # Current version of the project (ViennaGrid for Python)
BOOST_VERSION = '1.53.0' # Version of Boost used for development
VIENNAGRID_VERSION = '1.0.1' # Version of ViennaGrid used for development
def replace_dirs(old_str, new_str, dirs):
paths = ' '.join(dirs)
cmd = "perl -p -i -e 's/%(old_str)s/%(new_str)s/g' `grep -Rl '%(old_str)s' %(paths)s`" % locals()
subprocess.call(cmd, shell=True)
def replace_file(old_str, new_str, paths):
cmd = "perl -p -i -e 's/%(old_str)s/%(new_str)s/g' %(paths)s" % locals()
subprocess.call(cmd, shell=True)
def main(project_version, boost_version, viennagrid_version):
dirs = ['doc', 'scripts', 'test', 'viennagrid-python']
old_versions = [PROJECT_VERSION, BOOST_VERSION, VIENNAGRID_VERSION]
new_versions = [project_version, boost_version, viennagrid_version]
for old_str, new_str in zip(old_versions, new_versions):
old_str = old_str.replace('.', '\.')
new_str = new_str.replace('.', '\.')
replace_dirs(old_str, new_str, dirs)
replace_file(old_str, new_str, 'src/wrapper.cpp')
replace_file(old_str, new_str, 'README.md')
replace_file(old_str, new_str, 'src/CMakeLists.txt')
old_version_short = '.'.join(PROJECT_VERSION.split('.')[0:2])
new_version_short = '.'.join(project_version.split('.')[0:2])
replace_file(old_version_short, new_version_short, 'src/CMakeLists.txt')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('project_version', help='version number of the next release of ViennaGrid for Python')
parser.add_argument('boost_version', help='ViennaGrid version number to be used', nargs='?', default=BOOST_VERSION)
parser.add_argument('viennagrid_version', help='Boost version number to be used', nargs='?', default=VIENNAGRID_VERSION)
args = parser.parse_args()
main(args.project_version, args.boost_version, args.viennagrid_version)
| {
"content_hash": "84d50f00093b06fe69c65da3b368a3a3",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 121,
"avg_line_length": 47.170731707317074,
"alnum_prop": 0.6923474663908997,
"repo_name": "jonancm/viennagrid-python",
"id": "4599b5b166636e3730207ec438826c7bb08a27aa",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/bump_version.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1287098"
},
{
"name": "Python",
"bytes": "434735"
},
{
"name": "Shell",
"bytes": "1916"
}
],
"symlink_target": ""
} |
from model.new_contact import New_contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_new_contact_form(self, new_contact):
wd = self.app.wd
self.open_add_new_contact()
self.fill_contact_form(new_contact)
# submit the form
wd.find_element_by_name("submit").click()
self.contact_list_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.name)
self.change_field_value("lastname", contact.last_name)
self.change_field_value("home", contact.home_number)
self.change_field_value("address", contact.address)
self.change_field_value("mobile", contact.mobile_number)
self.change_field_value("work", contact.work_number)
self.change_field_value("phone2", contact.secondary_number)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def open_add_new_contact(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
contact_list_cache = None
def get_contact_list(self):
if self.contact_list_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_list_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
all_phones = cells[5].text
# print(all_phones)
self.contact_list_cache.append(New_contact(name=firstname, last_name=lastname, id=id, address=address,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails))
return list(self.contact_list_cache)
def open_home_page(self):
wd = self.app.wd
if not(wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
wd.find_element_by_link_text("home").click()
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
def del_first_contact(self):
self.del_contact_by_index(0)
def del_contact_by_index(self, index):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
# submit deletion
wd.find_elements_by_class_name("left")[1].click()
wd.switch_to_alert().accept()
self.contact_list_cache = None
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def modify_first_contact_name(self, new_name_data):
self.modify_some_contact_name(0, new_name_data )
def modify_some_contact_name(self, index, new_name_data):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
# enter new data
self.fill_contact_form(new_name_data)
# sumbit modification form
wd.find_element_by_name("update").click()
self.contact_list_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
# open contact to modify
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_to_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
# open contact to modify
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
name = wd.find_element_by_name("firstname").get_attribute("value")
last_name = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_number = wd.find_element_by_name("home").get_attribute("value")
work_number = wd.find_element_by_name("work").get_attribute("value")
mobile_number = wd.find_element_by_name("mobile").get_attribute("value")
address = wd.find_element_by_name("address").text
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
secondary_number = wd.find_element_by_name("phone2").get_attribute("value")
return New_contact(name = name, last_name = last_name, id = id, home_number = home_number, work_number = work_number,
mobile_number = mobile_number, secondary_number = secondary_number, address = address,
email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_to_view_by_index(index)
text = wd.find_element_by_id("content").text
home_number = re.search("H: (.*)", text).group(1)
mobile_number = re.search("M: (.*)", text).group(1)
work_number = re.search("W: (.*)", text).group(1)
secondary_number = re.search("P: (.*)", text).group(1)
return New_contact(home_number=home_number, work_number=work_number,
mobile_number=mobile_number, secondary_number=secondary_number)
def del_contact_by_id(self, id):
wd = self.app.wd
self.open_home_page()
self.select_contact_by_id(id)
# submit deletion
wd.find_elements_by_class_name("left")[1].click()
wd.switch_to_alert().accept()
self.contact_list_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[id='%s']" % id).click()
| {
"content_hash": "88bac066dcb620d2b63b6c4e91a5830f",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 125,
"avg_line_length": 42.515527950310556,
"alnum_prop": 0.5956172388604821,
"repo_name": "olga121/python_training",
"id": "142c563c3b58f0d8c885be5748d36def76a23eb5",
"size": "6845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/new_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34769"
}
],
"symlink_target": ""
} |
import sys
import os.path
import re
# import exceptions
class bjbTask:
# Class "constants"
TEXT = 0
CONTEXT = 1
STATUS = 2
DUE_DATE = 3
START_DATE = 4
# Class variables
db_file = "taskdb"
tasks = []
def __init__(self):
# Check for config file
if os.path.isfile(os.path.expanduser("~/.bjbtask")):
with open(os.path.expanduser("~/.bjbtask")) as f:
self.db_file = f.readline().strip()
# Read database
if os.path.isfile(os.path.expanduser(self.db_file)):
with open(os.path.expanduser(self.db_file), "r") as f:
for line in f:
bjbTask.tasks.append(line.strip().split(','))
else:
print("ERROR: Database file not found")
def arg_parse(self, argv):
if (len(argv) < 2):
print ("No command specified (try bjbtask.py help)")
else:
if ((argv[1] == 'add') or (argv[1] == 'a')):
self.add(argv[2:])
elif ((argv[1] == 'context') or (argv[1] == 'c')):
self.set_context(argv[2:])
elif ((argv[1] == 'list') or (argv[1] == 'l')):
self.list(argv[2:])
elif ((argv[1] == 'board') or (argv[1] == 'b')):
self.board(argv[2:])
elif argv[1] == 'start':
self.start(argv[2:])
elif argv[1] == 'done':
self.done(argv[2:])
elif ((argv[1] == 'del') or (argv[1] == 'delete')):
self.delete(argv[2:])
elif argv[1] == 'archive':
self.archive(argv[2:])
elif argv[1] == 'init':
self.init()
elif ((argv[1] == 'help') or (argv[1] == '?')):
self.help(argv[2:])
else:
print ("Command not recognised: {}".format(argv[1]))
self.save()
def add(self, argv):
# Check arguments for special commands
# --- Context handling
context_found = False
context = "--"
num = 0
for arg in argv:
m = re.search('@.*', arg)
if m != None:
context_found = True
context_position = num
context = argv[context_position]
num = num + 1
# Remove context from argument list
if context_found == True:
if context_position == 0:
argv = argv[1:]
else:
argv = argv[:(context_position)]
# --- End of Context handling
# Convert remaining argument list to a string
text = " ".join(argv)
task = [text, context, "--"]
bjbTask.tasks.append(task)
print ("Task added: {}".format(text))
if context_found == True:
print ("Task added in context: {}".format(context))
def set_context(self, argv):
try:
num = int(argv[0])
except ValueError:
print ("Error: Task number not valid")
num = len(bjbTask.tasks) + 1
invalid = True
if ((num > 0) and ((num <= len(bjbTask.tasks)))):
text = bjbTask.tasks[num - 1]
if len(argv) >= 2:
context = argv[1]
m = re.search('@.*', context)
if m == None:
context = '@' + context
bjbTask.tasks[num - 1][self.CONTEXT] = context
print ("Context set for task: {}".format(text))
else:
if invalid != True:
print ("Task mumber out of range: {}".format(num))
def list(self, argv):
print ("Current Tasks")
if len(argv) >= 1:
arg = argv[0]
else:
arg = '--'
num = 0
for task in bjbTask.tasks:
if (arg[0] == "@"):
if (arg == task[self.CONTEXT]):
print ("{:<3} {:30}".format((num + 1), task[self.TEXT]))
else:
if ((arg == 'all') or (task[self.STATUS] != 'DONE')):
if (arg == 'all'):
print ("{:<3} {:30} {:20} {}".format((num + 1), task[self.TEXT], task[self.CONTEXT], task[self.STATUS]))
else:
print ("{:<3} {:30} {}".format((num + 1), task[self.TEXT], task[self.CONTEXT]))
num = num + 1
def print_spaces(self, num):
for space in range(num):
print (" ", end = '')
def board(self, argv):
line = 0
backlog = []
started = []
done = []
context='--'
if len(argv) >= 1:
context = argv[0]
m = re.search('@.*', context)
if m == None:
context = '@' + context
for task in bjbTask.tasks:
if ((task[self.STATUS] == '--') and ((task[self.CONTEXT] == context) or (context == '--'))):
backlog.append(task[self.TEXT])
if ((task[self.STATUS] == 'STARTED') and ((task[self.CONTEXT] == context) or (context == '--'))):
started.append(task[self.TEXT])
if ((task[self.STATUS] == 'DONE') and ((task[self.CONTEXT] == context) or (context == '--'))):
done.append(task[self.TEXT])
print ("Backlog Started Done")
print ("------- ------- ----")
while ((line < len(backlog)) or (line < len(started)) or (line < len(done))):
if line < len(backlog):
print (backlog[line], end = '')
self.print_spaces(25 - len(backlog[line]))
else:
# print (" ", end = '')
self.print_spaces(25)
if line < len(started):
print (started[line], end = '')
self.print_spaces(25 - len(started[line]))
else:
# print (" ", end = '')
self.print_spaces(25)
if line < len(done):
print (done[line])
else:
print (" ")
line = line + 1
def done(self, argv):
try:
num = int(argv[0])
except ValueError:
print ("Error: Task number not valid")
num = len(bjbTask.tasks) + 1
invalid = True
if ((num > 0) and ((num <= len(bjbTask.tasks)))):
text = bjbTask.tasks[num - 1]
bjbTask.tasks[num - 1][self.STATUS] = "DONE"
print ("Task marked as done: {}".format(text))
else:
if invalid != True:
print ("Task mumber out of range: {}".format(num))
def start(self, argv):
try:
num = int(argv[0])
except ValueError:
print ("Error: Task number not valid")
num = len(bjbTask.tasks) + 1
invalid = True
if ((num > 0) and ((num <= len(bjbTask.tasks)))):
text = bjbTask.tasks[num - 1]
bjbTask.tasks[num - 1][self.STATUS] = "STARTED"
print ("Task marked as started: {}".format(text))
else:
if invalid != True:
print ("Task mumber out of range: {}".format(num))
def delete(self, argv):
try:
num = int(argv[0])
except ValueError:
print ("Error: Task number not valid")
num = len(bjbTask.tasks) + 1
invalid = True
if ((num > 0) and ((num <= len(bjbTask.tasks)))):
text = bjbTask.tasks[num - 1]
del(bjbTask.tasks[num - 1])
print ("Task deleted: {}".format(text))
else:
if invalid != True:
print ("Task mumber out of range: {}".format(num))
def archive(self, argv):
with open(os.path.expanduser("~/.bjbtask_archive"), "a") as f:
# Inerate over a copy of the task list
for task in bjbTask.tasks[:]:
if task[self.STATUS] == "DONE":
f.write("{},{},{}\n".format(task[self.TEXT].strip(), task[self.CONTEXT], task[self.STATUS]))
print ("Task archived: {}".format(task))
bjbTask.tasks.remove(task)
def init(self):
f = open(os.path.expanduser(self.db_file),"w+")
f.close()
def help(self, argv):
if len(argv) < 1:
arg = "no arg"
else:
arg = argv[0]
if ((arg == 'add') or (arg == 'a')):
print ("bjbtask add command - add a task")
print (" add [@context] <task description> [@context]")
print (" Short command name a")
print ("Add a new task with the given description to the database")
print ("Put @context_name as the first or last word to make the task context @context_name")
if ((arg == 'context') or (arg == 'c')):
print ("bjbtask context command - set the context for a task")
print (" context <task number> <context>")
print (" Short command name c")
elif ((arg == 'list') or (arg == 'l')):
print ("bjbtask list command - list tasks")
print (" list [all]")
print (" Short command name l")
print ("Print all tasks prepended with a task number")
print ("The number is used to identify the task for other commands")
print ("with all modifier even completed tasks are displayed")
elif ((arg == 'board') or (arg == 'b')):
print ("bjbtask board command - show task board - optionaly filtered by context")
print (" board <context>")
print (" Short command name b")
elif arg == 'start':
print ("bjbtask start command - start a task")
print (" start <task number>")
elif arg == 'done':
print ("bjb task done commend - mark a task as done")
print (" done <task number>")
elif ((arg == 'del') or (arg == 'delete')):
print ("bjbtask delete command - delete a task")
print (" del <task number>")
print (" delete <task number>")
print ("Deletes the task with the given number")
elif arg == 'archive':
print ("bjbtask archive command - archive completed tasks")
print (" archive")
print ("Moves completed tasks into the archive file")
else:
print ("bjbtask commands")
print (" add - add a task")
print (" context - set the context for an existing task")
print (" list - list tasks")
print (" board - show a task (canban) board")
print (" start - mark a task as started")
print (" done - mark a task as done")
print (" del or delete - delete a task")
print (" archive - archive completed tasks")
def save(self):
# Overwrite file with all current data - THIS WILL NOT SCALE!!!!
with open(os.path.expanduser(self.db_file), "w") as f:
for task in bjbTask.tasks:
f.write("{},{},{}\n".format(task[self.TEXT].strip(), task[self.CONTEXT], task[self.STATUS]))
if __name__ == "__main__":
bjb_task = bjbTask()
bjb_task.arg_parse(sys.argv)
| {
"content_hash": "eb346422ea918bccc52ef7d03b4ee7e8",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 128,
"avg_line_length": 38.98293515358362,
"alnum_prop": 0.466993521274733,
"repo_name": "barrybridgens/bjbtask",
"id": "50595c897b1f73232d82666d57d5b3e5006a915f",
"size": "11491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bjbtask/bjbtask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11491"
}
],
"symlink_target": ""
} |
import math
import numpy
import worldengine.astar
# Direction
NORTH = [0, -1]
NORTH_EAST = [1, -1]
EAST = [1, 0]
SOUTH_EAST = [1, 1]
SOUTH = [0, 1]
SOUTH_WEST = [-1, 1]
WEST = [-1, 0]
NORTH_WEST = [-1, -1]
CENTER = [0, 0]
DIR_NEIGHBORS = [NORTH, EAST, SOUTH, WEST]
DIR_NEIGHBORS_CENTER = [CENTER, NORTH, EAST, SOUTH, WEST]
RIVER_TH = 0.02
def overflow(value, max_value):
return value % max_value
def in_circle(radius, center_x, center_y, x, y):
square_dist = ((center_x - x) ** 2 + (center_y - y) ** 2)
return square_dist <= radius ** 2
class ErosionSimulation(object):
def __init__(self):
self.wrap = True
def is_applicable(self, world):
return world.has_precipitations()
def execute(self, world, seed):
water_flow = numpy.zeros((world.height, world.width))
water_path = numpy.zeros((world.height, world.width), dtype=int)
river_list = []
lake_list = []
river_map = numpy.zeros((world.height, world.width))
lake_map = numpy.zeros((world.height, world.width))
# step one: water flow per cell based on rainfall
self.find_water_flow(world, water_path)
# step two: find river sources (seeds)
river_sources = self.river_sources(world, water_flow, water_path)
# step three: for each source, find a path to sea
for source in river_sources:
river = self.river_flow(source, world, river_list, lake_list)
if len(river) > 0:
river_list.append(river)
self.cleanUpFlow(river, world)
rx, ry = river[-1] # find last cell in river
if not world.is_ocean((rx, ry)):
lake_list.append(river[-1]) # river flowed into a lake
# step four: simulate erosion and updating river map
for river in river_list:
self.river_erosion(river, world)
self.rivermap_update(river, water_flow, river_map, world.layers['precipitation'].data)
# step five: rivers with no paths to sea form lakes
for lake in lake_list:
# print "Found lake at:",lake
lx, ly = lake
lake_map[ly, lx] = 0.1 # TODO: make this based on rainfall/flow
world.set_rivermap(river_map)
world.set_lakemap(lake_map)
def find_water_flow(self, world, water_path):
"""Find the flow direction for each cell in heightmap"""
# iterate through each cell
for x in range(world.width - 1):
for y in range(world.height - 1):
# search around cell for a direction
path = self.find_quick_path([x, y], world)
if path:
tx, ty = path
flow_dir = [tx - x, ty - y]
key = 0
for direction in DIR_NEIGHBORS_CENTER:
if direction == flow_dir:
water_path[y, x] = key
key += 1
def find_quick_path(self, river, world):
# Water flows based on cost, seeking the highest elevation difference
# highest positive number is the path of least resistance
# (lowest point)
# Cost
# *** 1,0 ***
# 0,1 *** 2,1
# *** 1,2 ***
x, y = river
new_path = []
lowest_elevation = world.layers['elevation'].data[y, x]
# lowestDirection = [0, 0]
for dx, dy in DIR_NEIGHBORS:
temp_dir = [x + dx, y + dy]
tx, ty = temp_dir
if not self.wrap and not world.contains(temp_dir):
continue
tx, ty = overflow(tx, world.width), overflow(ty, world.height)
elevation = world.layers['elevation'].data[ty, tx]
if elevation < lowest_elevation:
if world.contains(temp_dir):
pass
lowest_elevation = elevation
new_path = [tx, ty]
return new_path
@staticmethod
def river_sources(world, water_flow, water_path):
"""Find places on map where sources of river can be found"""
river_source_list = []
# Using the wind and rainfall data, create river 'seeds' by
# flowing rainfall along paths until a 'flow' threshold is reached
# and we have a beginning of a river... trickle->stream->river->sea
# step one: Using flow direction, follow the path for each cell
# adding the previous cell's flow to the current cell's flow.
# step two: We loop through the water flow map looking for cells
# above the water flow threshold. These are our river sources and
# we mark them as rivers. While looking, the cells with no
# out-going flow, above water flow threshold and are still
# above sea level are marked as 'sources'.
for x in range(0, world.width - 1):
for y in range(0, world.height - 1):
rain_fall = world.layers['precipitation'].data[y, x]
water_flow[y, x] = rain_fall
if water_path[y, x] == 0:
continue # ignore cells without flow direction
cx, cy = x, y # begin with starting location
neighbour_seed_found = False
# follow flow path to where it may lead
while not neighbour_seed_found:
# have we found a seed?
if world.is_mountain((cx, cy)) and water_flow[cy, cx] >= RIVER_TH:
# try not to create seeds around other seeds
for seed in river_source_list:
sx, sy = seed
if in_circle(9, cx, cy, sx, sy):
neighbour_seed_found = True
if neighbour_seed_found:
break # we do not want seeds for neighbors
river_source_list.append([cx, cy]) # river seed
break
# no path means dead end...
if water_path[cy, cx] == 0:
break # break out of loop
# follow path, add water flow from previous cell
dx, dy = DIR_NEIGHBORS_CENTER[water_path[cy, cx]]
nx, ny = cx + dx, cy + dy # calculate next cell
water_flow[ny, nx] += rain_fall
cx, cy = nx, ny # set current cell to next cell
return river_source_list
def river_flow(self, source, world, river_list, lake_list):
"""simulate fluid dynamics by using starting point and flowing to the
lowest available point"""
current_location = source
path = [source]
# start the flow
while True:
x, y = current_location
# is there a river nearby, flow into it
for dx, dy in DIR_NEIGHBORS:
ax, ay = x + dx, y + dy
if self.wrap:
ax, ay = overflow(ax, world.width), overflow(ay,
world.height)
for river in river_list:
if [ax, ay] in river:
merge = False
for rx, ry in river:
if [ax, ay] == [rx, ry]:
merge = True
path.append([rx, ry])
elif merge:
path.append([rx, ry])
return path # skip the rest, return path
# found a sea?
if world.is_ocean((x, y)):
break
# find our immediate lowest elevation and flow there
quick_section = self.find_quick_path(current_location, world)
if quick_section:
path.append(quick_section)
current_location = quick_section
continue # stop here and enter back into loop
is_wrapped, lower_elevation = self.findLowerElevation(
current_location, world)
if lower_elevation and not is_wrapped:
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
if lower_path:
path += lower_path
current_location = path[-1]
else:
break
elif lower_elevation and is_wrapped:
# TODO: make this more natural
max_radius = 40
cx, cy = current_location
lx, ly = lower_elevation
if x < 0 or y < 0 or x > world.width or y > world.height:
raise Exception(
"BUG: fix me... we shouldn't be here: %s %s" % (
current_location, lower_elevation))
if not in_circle(max_radius, cx, cy, lx, cy):
# are we wrapping on x axis?
if cx - lx < 0:
lx = 0 # move to left edge
nx = world.width - 1 # next step is wrapped around
else:
lx = world.width - 1 # move to right edge
nx = 0 # next step is wrapped around
ly = ny = int((cy + ly) / 2) # move halfway
elif not in_circle(max_radius, cx, cy, cx, ly):
# are we wrapping on y axis?
if cy - ly < 0:
ly = 0 # move to top edge
ny = world.height - 1 # next step is wrapped around
else:
ly = world.height - 1 # move to bottom edge
ny = 0 # next step is wrapped around
lx = nx = int((cx + lx) / 2) # move halfway
else:
raise Exception(
"BUG: fix me... we are not in circle: %s %s" % (
current_location, lower_elevation))
# find our way to the edge
edge_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, [cx, cy], [lx, ly])
if not edge_path:
# can't find another other path, make it a lake
lake_list.append(current_location)
break
path += edge_path # add our newly found path
path.append([nx, ny]) # finally add our overflow to other side
current_location = path[-1]
# find our way to lowest position original found
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
path += lower_path
current_location = path[-1]
else: # can't find any other path, make it a lake
lake_list.append(current_location)
break # end of river
if not world.contains(current_location):
print("Why are we here:", current_location)
return path
def cleanUpFlow(self, river, world):
'''Validate that for each point in river is equal to or lower than the
last'''
celevation = 1.0
for r in river:
rx, ry = r
relevation = world.layers['elevation'].data[ry, rx]
if relevation <= celevation:
celevation = relevation
elif relevation > celevation:
world.layers['elevation'].data[ry, rx] = celevation
return river
def findLowerElevation(self, source, world):
'''Try to find a lower elevation with in a range of an increasing
circle's radius and try to find the best path and return it'''
x, y = source
currentRadius = 1
maxRadius = 40
lowestElevation = world.layers['elevation'].data[y, x]
destination = []
notFound = True
isWrapped = False
wrapped = []
while notFound and currentRadius <= maxRadius:
for cx in range(-currentRadius, currentRadius + 1):
for cy in range(-currentRadius, currentRadius + 1):
rx, ry = x + cx, y + cy
# are we within bounds?
if not self.wrap and not world.contains((rx, ry)):
continue
# are we within a circle?
if not in_circle(currentRadius, x, y, rx, ry):
continue
rx, ry = overflow(rx, world.width), overflow(ry,
world.height)
# if utilities.outOfBounds([x+cx, y+cy], self.size):
# print "Fixed:",x ,y, rx, ry
elevation = world.layers['elevation'].data[ry, rx]
# have we found a lower elevation?
if elevation < lowestElevation:
lowestElevation = elevation
destination = [rx, ry]
notFound = False
if not world.contains((x + cx, y + cy)):
wrapped.append(destination)
currentRadius += 1
if destination in wrapped:
isWrapped = True
# print "Wrapped lower elevation found:", rx, ry, "!"
return isWrapped, destination
def river_erosion(self, river, world):
""" Simulate erosion in heightmap based on river path.
* current location must be equal to or less than previous location
* riverbed is carved out by % of volume/flow
* sides of river are also eroded to slope into riverbed.
"""
# erosion around river, create river valley
for r in river:
rx, ry = r
radius = 2
for x in range(rx - radius, rx + radius):
for y in range(ry - radius, ry + radius):
if not self.wrap and not world.contains(
(x, y)): # ignore edges of map
continue
x, y = overflow(x, world.width), overflow(y, world.height)
curve = 1.0
if [x, y] == [0, 0]: # ignore center
continue
if [x, y] in river: # ignore river itself
continue
if world.layers['elevation'].data[y, x] <= world.layers['elevation'].data[ry, rx]:
# ignore areas lower than river itself
continue
if not in_circle(radius, rx, ry, x,
y): # ignore things outside a circle
continue
adx, ady = math.fabs(rx - x), math.fabs(ry - y)
if adx == 1 or ady == 1:
curve = 0.2
elif adx == 2 or ady == 2:
curve = 0.05
diff = world.layers['elevation'].data[ry, rx] - world.layers['elevation'].data[y, x]
newElevation = world.layers['elevation'].data[y, x] + (
diff * curve)
if newElevation <= world.layers['elevation'].data[ry, rx]:
print('newElevation is <= than river, fix me...')
newElevation = world.layers['elevation'].data[r, x]
world.layers['elevation'].data[y, x] = newElevation
return
def rivermap_update(self, river, water_flow, rivermap, precipitations):
"""Update the rivermap with the rainfall that is to become
the waterflow"""
isSeed = True
px, py = (0, 0)
for x, y in river:
if isSeed:
rivermap[y, x] = water_flow[y, x]
isSeed = False
else:
rivermap[y, x] = precipitations[y, x] + rivermap[py, px]
px, py = x, y
| {
"content_hash": "ede1c0ccc4069050eea042514f2c83e1",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 104,
"avg_line_length": 40.35308641975308,
"alnum_prop": 0.49097472924187724,
"repo_name": "psi29a/worldengine",
"id": "a08a0bc27143862c080fbe0ee77048b9717f2802",
"size": "16343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worldengine/simulations/erosion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "Makefile",
"bytes": "7429"
},
{
"name": "PowerShell",
"bytes": "3633"
},
{
"name": "Protocol Buffer",
"bytes": "3029"
},
{
"name": "Python",
"bytes": "270269"
},
{
"name": "Scheme",
"bytes": "24114"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def upload_js():
return mark_safe("""
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">{%=file.name%}</p>
<strong class="error text-danger"></strong>
</td>
<td>
<p class="size">Processing...</p>
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
</td>
<td>
{% if (!i && !o.options.autoUpload) { %}
<button class="btn btn-primary start" disabled>
<i class="glyphicon glyphicon-upload"></i>
<span>Start</span>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel">
<i class="glyphicon glyphicon-ban-circle"></i>
<span>Cancel</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p class="name">
{% if (file.url) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" {%=file.thumbnailUrl?'data-gallery':''%}>{%=file.name%}</a>
{% } else { %}
<span>{%=file.name%}</span>
{% } %}
</p>
{% if (file.error) { %}
<div><span class="label label-danger">Error</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
{% if (file.deleteUrl) { %}
<button class="btn btn-danger delete" data-type="{%=file.deleteType%}" data-url="{%=file.deleteUrl%}"{% if (file.deleteWithCredentials) { %} data-xhr-fields='{"withCredentials":true}'{% } %}>
<i class="glyphicon glyphicon-trash"></i>
<span>Delete</span>
</button>
<input type="checkbox" name="delete" value="1" class="toggle">
{% } else { %}
<button class="btn btn-warning cancel">
<i class="glyphicon glyphicon-ban-circle"></i>
<span>Cancel</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
""")
| {
"content_hash": "989941418ff0cc597b8d7a8f065b6240",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 210,
"avg_line_length": 36.37777777777778,
"alnum_prop": 0.46640195479535734,
"repo_name": "vsoch/docfish",
"id": "e086be7e79a49b8f2f1e9457e26c79ecad280f67",
"size": "3274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docfish/apps/storage/templatetags/upload_tags1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1203987"
},
{
"name": "HTML",
"bytes": "679372"
},
{
"name": "JavaScript",
"bytes": "3447989"
},
{
"name": "Nginx",
"bytes": "1783"
},
{
"name": "Python",
"bytes": "322003"
},
{
"name": "Shell",
"bytes": "365"
}
],
"symlink_target": ""
} |
import os, sys
sys.path.append(os.getcwd())
import profiler
import time
import random
# this example essentially showcases the basic uses of
# - profiler.function_profiler
# - profiler.FunctionLogger.log_data
@profiler.function_profiler()
def foo():
return
foo()
foo()
@profiler.function_profiler()
def bar():
time.sleep(1)
return
bar()
class Woz(object):
@profiler.function_profiler()
def __init__(self):
self.x = 5
@profiler.function_profiler()
def some_instance_method(self):
return
@profiler.function_profiler()
def a_class_method():
time.sleep(random.random())
return
woz_instance = Woz()
woz_instance.some_instance_method()
another_woz_instance = Woz()
[Woz.a_class_method() for _ in range(10)]
profiler.FunctionLogger.log_data('stdout')
| {
"content_hash": "677222f9f00da89274c76de0e9c9c8ac",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 54,
"avg_line_length": 17.208333333333332,
"alnum_prop": 0.6767554479418886,
"repo_name": "Datamine/Function-Profiler",
"id": "c62d610961c2f7cbe86550b99b903e3adfcfd0c2",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic-profiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16019"
}
],
"symlink_target": ""
} |
"""Integration code for AFLplusplus fuzzer."""
import os
import shutil
from fuzzers.afl import fuzzer as afl_fuzzer
from fuzzers import utils
def get_cmplog_build_directory(target_directory):
"""Return path to CmpLog target directory."""
return os.path.join(target_directory, 'cmplog')
def get_uninstrumented_build_directory(target_directory):
"""Return path to CmpLog target directory."""
return os.path.join(target_directory, 'uninstrumented')
def build(*args): # pylint: disable=too-many-branches,too-many-statements
"""Build benchmark."""
# BUILD_MODES is not already supported by fuzzbench, meanwhile we provide
# a default configuration.
build_modes = list(args)
if 'BUILD_MODES' in os.environ:
build_modes = os.environ['BUILD_MODES'].split(',')
# Placeholder comment.
build_directory = os.environ['OUT']
# If nothing was set this is the default:
if not build_modes:
build_modes = ['tracepc', 'cmplog', 'dict2file']
# For bug type benchmarks we have to instrument via native clang pcguard :(
build_flags = os.environ['CFLAGS']
if build_flags.find(
'array-bounds'
) != -1 and 'qemu' not in build_modes and 'classic' not in build_modes:
build_modes[0] = 'native'
# Instrumentation coverage modes:
if 'lto' in build_modes:
os.environ['CC'] = '/afl/afl-clang-lto'
os.environ['CXX'] = '/afl/afl-clang-lto++'
edge_file = build_directory + '/aflpp_edges.txt'
os.environ['AFL_LLVM_DOCUMENT_IDS'] = edge_file
if os.path.isfile('/usr/local/bin/llvm-ranlib-13'):
os.environ['RANLIB'] = 'llvm-ranlib-13'
os.environ['AR'] = 'llvm-ar-13'
os.environ['AS'] = 'llvm-as-13'
elif os.path.isfile('/usr/local/bin/llvm-ranlib-12'):
os.environ['RANLIB'] = 'llvm-ranlib-12'
os.environ['AR'] = 'llvm-ar-12'
os.environ['AS'] = 'llvm-as-12'
else:
os.environ['RANLIB'] = 'llvm-ranlib'
os.environ['AR'] = 'llvm-ar'
os.environ['AS'] = 'llvm-as'
elif 'qemu' in build_modes:
os.environ['CC'] = 'clang'
os.environ['CXX'] = 'clang++'
elif 'gcc' in build_modes:
os.environ['CC'] = 'afl-gcc-fast'
os.environ['CXX'] = 'afl-g++-fast'
else:
os.environ['CC'] = '/afl/afl-clang-fast'
os.environ['CXX'] = '/afl/afl-clang-fast++'
print('AFL++ build: ')
print(build_modes)
if 'qemu' in build_modes or 'symcc' in build_modes:
os.environ['CFLAGS'] = ' '.join(utils.NO_SANITIZER_COMPAT_CFLAGS)
cxxflags = [utils.LIBCPLUSPLUS_FLAG] + utils.NO_SANITIZER_COMPAT_CFLAGS
os.environ['CXXFLAGS'] = ' '.join(cxxflags)
if 'tracepc' in build_modes or 'pcguard' in build_modes:
os.environ['AFL_LLVM_USE_TRACE_PC'] = '1'
elif 'classic' in build_modes:
os.environ['AFL_LLVM_INSTRUMENT'] = 'CLASSIC'
elif 'native' in build_modes:
os.environ['AFL_LLVM_INSTRUMENT'] = 'LLVMNATIVE'
# Instrumentation coverage options:
# Do not use a fixed map location (LTO only)
if 'dynamic' in build_modes:
os.environ['AFL_LLVM_MAP_DYNAMIC'] = '1'
# Use a fixed map location (LTO only)
if 'fixed' in build_modes:
os.environ['AFL_LLVM_MAP_ADDR'] = '0x10000'
# Generate an extra dictionary.
if 'dict2file' in build_modes or 'native' in build_modes:
os.environ['AFL_LLVM_DICT2FILE'] = build_directory + '/afl++.dict'
# Enable context sentitivity for LLVM mode (non LTO only)
if 'ctx' in build_modes:
os.environ['AFL_LLVM_CTX'] = '1'
# Enable N-gram coverage for LLVM mode (non LTO only)
if 'ngram2' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '2'
elif 'ngram3' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '3'
elif 'ngram4' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '4'
elif 'ngram5' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '5'
elif 'ngram6' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '6'
elif 'ngram7' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '7'
elif 'ngram8' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '8'
elif 'ngram16' in build_modes:
os.environ['AFL_LLVM_NGRAM_SIZE'] = '16'
if 'ctx1' in build_modes:
os.environ['AFL_LLVM_CTX_K'] = '1'
elif 'ctx2' in build_modes:
os.environ['AFL_LLVM_CTX_K'] = '2'
elif 'ctx3' in build_modes:
os.environ['AFL_LLVM_CTX_K'] = '3'
elif 'ctx4' in build_modes:
os.environ['AFL_LLVM_CTX_K'] = '4'
# Only one of the following OR cmplog
# enable laf-intel compare splitting
if 'laf' in build_modes:
os.environ['AFL_LLVM_LAF_SPLIT_SWITCHES'] = '1'
os.environ['AFL_LLVM_LAF_SPLIT_COMPARES'] = '1'
os.environ['AFL_LLVM_LAF_SPLIT_FLOATS'] = '1'
if 'autodict' not in build_modes:
os.environ['AFL_LLVM_LAF_TRANSFORM_COMPARES'] = '1'
if 'eclipser' in build_modes:
os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'
else:
os.environ['FUZZER_LIB'] = '/libAFLDriver.a'
# Some benchmarks like lcms
# (see: https://github.com/mm2/Little-CMS/commit/ab1093539b4287c233aca6a3cf53b234faceb792#diff-f0e6d05e72548974e852e8e55dffc4ccR212)
# fail to compile if the compiler outputs things to stderr in unexpected
# cases. Prevent these failures by using AFL_QUIET to stop afl-clang-fast
# from writing AFL specific messages to stderr.
os.environ['AFL_QUIET'] = '1'
os.environ['AFL_MAP_SIZE'] = '2621440'
src = os.getenv('SRC')
work = os.getenv('WORK')
with utils.restore_directory(src), utils.restore_directory(work):
# Restore SRC to its initial state so we can build again without any
# trouble. For some OSS-Fuzz projects, build_benchmark cannot be run
# twice in the same directory without this.
utils.build_benchmark()
if 'cmplog' in build_modes and 'qemu' not in build_modes:
# CmpLog requires an build with different instrumentation.
new_env = os.environ.copy()
new_env['AFL_LLVM_CMPLOG'] = '1'
# For CmpLog build, set the OUT and FUZZ_TARGET environment
# variable to point to the new CmpLog build directory.
cmplog_build_directory = get_cmplog_build_directory(build_directory)
os.mkdir(cmplog_build_directory)
new_env['OUT'] = cmplog_build_directory
fuzz_target = os.getenv('FUZZ_TARGET')
if fuzz_target:
new_env['FUZZ_TARGET'] = os.path.join(cmplog_build_directory,
os.path.basename(fuzz_target))
print('Re-building benchmark for CmpLog fuzzing target')
utils.build_benchmark(env=new_env)
if 'symcc' in build_modes:
symcc_build_directory = get_uninstrumented_build_directory(
build_directory)
os.mkdir(symcc_build_directory)
# symcc requires an build with different instrumentation.
new_env = os.environ.copy()
new_env['CC'] = '/symcc/build/symcc'
new_env['CXX'] = '/symcc/build/sym++'
new_env['SYMCC_OUTPUT_DIR'] = '/tmp'
new_env['CXXFLAGS'] = new_env['CXXFLAGS'].replace("-stlib=libc++", "")
new_env['FUZZER_LIB'] = '/libfuzzer-harness.o'
new_env['OUT'] = symcc_build_directory
new_env['SYMCC_LIBCXX_PATH'] = "/libcxx_native_build"
new_env['SYMCC_NO_SYMBOLIC_INPUT'] = "1"
new_env['SYMCC_SILENT'] = "1"
# For CmpLog build, set the OUT and FUZZ_TARGET environment
# variable to point to the new CmpLog build directory.
new_env['OUT'] = symcc_build_directory
fuzz_target = os.getenv('FUZZ_TARGET')
if fuzz_target:
new_env['FUZZ_TARGET'] = os.path.join(symcc_build_directory,
os.path.basename(fuzz_target))
print('Re-building benchmark for CmpLog fuzzing target')
utils.build_benchmark(env=new_env)
shutil.copy('/afl/afl-fuzz', build_directory)
if os.path.exists('/afl/afl-qemu-trace'):
shutil.copy('/afl/afl-qemu-trace', build_directory)
if os.path.exists('/aflpp_qemu_driver_hook.so'):
shutil.copy('/aflpp_qemu_driver_hook.so', build_directory)
if os.path.exists('/get_frida_entry.sh'):
shutil.copy('/afl/afl-frida-trace.so', build_directory)
shutil.copy('/get_frida_entry.sh', build_directory)
# pylint: disable=too-many-arguments
def fuzz(input_corpus,
output_corpus,
target_binary,
flags=tuple(),
skip=False,
no_cmplog=False): # pylint: disable=too-many-arguments
"""Run fuzzer."""
# Calculate CmpLog binary path from the instrumented target binary.
target_binary_directory = os.path.dirname(target_binary)
cmplog_target_binary_directory = (
get_cmplog_build_directory(target_binary_directory))
target_binary_name = os.path.basename(target_binary)
cmplog_target_binary = os.path.join(cmplog_target_binary_directory,
target_binary_name)
afl_fuzzer.prepare_fuzz_environment(input_corpus)
# decomment this to enable libdislocator.
# os.environ['AFL_ALIGNED_ALLOC'] = '1' # align malloc to max_align_t
# os.environ['AFL_PRELOAD'] = '/afl/libdislocator.so'
flags = list(flags)
if os.path.exists('./afl++.dict'):
flags += ['-x', './afl++.dict']
# Move the following to skip for upcoming _double tests:
if os.path.exists(cmplog_target_binary) and no_cmplog is not False:
flags += ['-c', cmplog_target_binary]
if not skip:
os.environ['AFL_DISABLE_TRIM'] = "1"
# os.environ['AFL_FAST_CAL'] = '1'
os.environ['AFL_CMPLOG_ONLY_NEW'] = '1'
if 'ADDITIONAL_ARGS' in os.environ:
flags += os.environ['ADDITIONAL_ARGS'].split(' ')
os.environ['AFL_DISABLE_RP'] = '1'
afl_fuzzer.run_afl_fuzz(input_corpus,
output_corpus,
target_binary,
additional_flags=flags)
| {
"content_hash": "b54e259fb5ccc2861ccec1a95fb3d4cf",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 136,
"avg_line_length": 40.12549019607843,
"alnum_prop": 0.6134675527756059,
"repo_name": "google/fuzzbench",
"id": "b350390e21c69ee791d527276da82c35ccc4e66a",
"size": "10804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzers/aflpp_random_wrs_rf/fuzzer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17334"
},
{
"name": "C++",
"bytes": "37645"
},
{
"name": "Dockerfile",
"bytes": "337043"
},
{
"name": "HTML",
"bytes": "25840"
},
{
"name": "Jupyter Notebook",
"bytes": "578996"
},
{
"name": "Makefile",
"bytes": "2810"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1222236"
},
{
"name": "Shell",
"bytes": "86157"
}
],
"symlink_target": ""
} |
import os
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment, Api, Api_gps
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from werkzeug.contrib.fixers import ProxyFix
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
app.wsgi_app = ProxyFix(app.wsgi_app)
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment, Api=Api, Api_gps=Api_gps)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| {
"content_hash": "15c6673794719e9e25184ff13e2f4c38",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 92,
"avg_line_length": 31.266666666666666,
"alnum_prop": 0.7185501066098081,
"repo_name": "Harold2017/myfirstflasky",
"id": "a3211f7c8557bd6936aa7715dc738091c838c1f3",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "48411"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "166924"
}
],
"symlink_target": ""
} |
"""A Folder Resource."""
import json
from google.cloud.forseti.common.gcp_type import resource
class FolderLifecycleState(resource.LifecycleState):
"""Represents the Folder's LifecycleState."""
pass
class Folder(resource.Resource):
"""Folder Resource."""
RESOURCE_NAME_FMT = 'folders/%s'
def __init__(
self,
folder_id,
full_name=None,
data=None,
name=None,
display_name=None,
parent=None,
lifecycle_state=FolderLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
folder_id (str): The folder id number.
full_name (str): The full resource name and ancestory.
data (str): Resource representation of the folder.
name (str): The folder unique GCP name, i.e. "folders/{id}".
display_name (str): The folder display name.
parent (Resource): The parent Resource.
lifecycle_state (LifecycleState): The folder's lifecycle state.
"""
super(Folder, self).__init__(
resource_id=folder_id,
resource_type=resource.ResourceType.FOLDER,
name=name,
display_name=display_name,
parent=parent,
lifecycle_state=lifecycle_state)
self.full_name = full_name
self.data = data
@classmethod
def from_json(cls, parent, json_string):
"""Creates a Folder from a JSON string.
Args:
parent (Resource): resource this folder belongs to.
json_string (str): JSON string of a folder GCP resource.
Returns:
Folder: folder resource.
"""
folder_dict = json.loads(json_string)
name = folder_dict['name']
folder_id = name.split('/')[-1]
full_name = 'folder/{}/'.format(folder_id)
if parent:
full_name = '{}{}'.format(parent.full_name, full_name)
lifecycle = folder_dict.get('lifecycleState',
FolderLifecycleState.UNSPECIFIED)
return cls(
folder_id=folder_id,
full_name=full_name,
data=json_string,
name=name,
display_name=folder_dict.get('displayName'),
parent=parent,
lifecycle_state=lifecycle)
| {
"content_hash": "e0627dc42dd314c13a6a728369603c62",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 75,
"avg_line_length": 31.7027027027027,
"alnum_prop": 0.5626598465473146,
"repo_name": "forseti-security/forseti-security",
"id": "48bfc874ea2fd6a5ddbb9b340b7995d1339b1dc9",
"size": "2960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/forseti/common/gcp_type/folder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3652"
},
{
"name": "HCL",
"bytes": "37409"
},
{
"name": "JavaScript",
"bytes": "1833"
},
{
"name": "Jinja",
"bytes": "6379"
},
{
"name": "Makefile",
"bytes": "5427"
},
{
"name": "Open Policy Agent",
"bytes": "3600"
},
{
"name": "Python",
"bytes": "4140122"
},
{
"name": "Ruby",
"bytes": "37434"
},
{
"name": "Shell",
"bytes": "17062"
}
],
"symlink_target": ""
} |
import urllib2
import urllib
import urlparse
import base64
import argparse
import sys
import json
import ConfigParser
import os
import time
import atexit
import signal
import logging
import ssl
# maps a dynamic DNS provider name (used in the config file)
# to the name of the class that supports it)
DNS_PROVIDER_MAP = {'linode': 'LinodeDNS'}
class CMError(Exception):
"""
Custom exception
"""
pass
# This class from
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
# si = file(self.stdin, 'r')
# so = file(self.stdout, 'a+')
# se = file(self.stderr, 'a+', 0)
# os.dup2(si.fileno(), sys.stdin.fileno())
# os.dup2(so.fileno(), sys.stdout.fileno())
# os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError as e:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
class Logger(object):
"""
Wrapper around logging
Supported levels:
- info
- warning
- error
"""
def __init__(self, filename):
"""
Create logger
Args:
filename: name of the log file
"""
self.logfile = os.path.abspath(filename)
self.logger = self._setup_logger()
def _setup_logger(self):
logger = logging.getLogger('littled')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
try:
handler = logging.FileHandler(self.logfile)
except IOError as e:
sys.exit(1)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def rotate_handler(self):
self.log('info', 'Rotating logs.')
for handler in self.logger.handlers:
handler.close()
self.logger.removeHandler(handler)
self.logger = self._setup_logger()
self.log('info', 'New log file started.')
def log(self, level, message):
if self.logger:
if level == 'info':
self.logger.info(message)
elif level == 'warning':
self.logger.warning(message)
elif level == 'error':
self.logger.error(message)
class ChipmunkRequest(object):
# pylint: disable=too-many-instance-attributes
# This class uses these different attributes
"""
Class to handle requests to the Chipmunk API server.
"""
def __init__(self, config_dict):
"""
Iinitialize
Args:
config_dict: dictionary containing the following keys:
username: string for username on chipmunkapi server
password: string for password on chipmunkapi server
server: API endpoint URL
method: method to call on the endpoint
"""
self.username = config_dict['username']
self.password = config_dict['password']
self.endpoint = config_dict['endpoint']
self.method = config_dict['method']
# the return value from the server
self.my_ip = None
self.status = None
# determin if we have a new IP address. Initially set to true
# since we don't know what our "old" address was when we start up.
self.have_new_ip = True
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value == 'get':
self._method = 'get/ip.json'
elif value == 'set':
self._method = 'set'
else:
raise ValueError()
@property
def endpoint(self):
return self._endpoint
@endpoint.setter
def endpoint(self, value):
try:
# if the scheme is missing, assume it's http and try that
parsed_url = urlparse.urlsplit(value, 'http')
except AttributeError:
print 'Invalid server URL.'
sys.exit(1)
if parsed_url.netloc == '':
print 'Invalid server URL.'
sys.exit(1)
else:
self._endpoint = urlparse.urlunsplit(parsed_url)
def configure(self, config_dict):
"""
comment
"""
self.username = config_dict['username']
self.password = config_dict['password']
self.endpoint = config_dict['endpoint']
self.method = config_dict['method']
def get_ip(self):
"""
Get the client's IP address from the Chipmunk API Server. The purpose
of all of this is to discover the clients public IP address. Depending
on the sepcific method called, the API Server may do more than just
return the client IP (see the API server for details).
Returns:
IP address as a string
"""
# initially set ip to self.my_ip to put at a known good starting point
ip = self.my_ip
url = self.endpoint + '/' + self.method
try:
request = urllib2.Request(url)
creds = base64.encodestring('%s:%s' % (self.username, self.password)).strip()
request.add_header('Authorization', 'Basic %s' % creds)
request.add_header('User-Agent', 'Chipmunk Client')
# In 2.7.9, changes where made to validate certs by default. This
# is obviously a "good" thing, but I want/need to mirror the
# non-validating behavior of < 2.7.9. Eventually this should be
# a config option, but for now, here is some evil to undo the good
# of validation.
if sys.version_info.micro >= 9:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
result = urllib2.urlopen(request, context=ctx)
else:
result = urllib2.urlopen(request)
answer = result.read()
answer = json.loads(answer)
except urllib2.HTTPError as e:
message = 'Error connecting to Chipmunk API: ' + str(e)
raise CMError(message)
except urllib2.URLError as e:
message = 'Error connecting to Chipmunk API: ' + str(e.reason)
raise CMError(message)
# catch if the response isn't json
try:
ip = answer['Client_IP']
except ValueError as e:
raise CMError(str(e))
if self.method == 'set':
self.status = answer['status']
if ip == self.my_ip:
self.have_new_ip = False
else:
self.have_new_ip = True
# finally set the new ip as our ip
self.my_ip = ip
return self.my_ip
@staticmethod
def get_config_params(conf):
"""
Setup the configuration parameters needed to communicate with the
DNS provider and make updates. This should be implimented for each
sepcific DNS provider as the parameters for each are likely different.
Args:
configp_dict: a ConfigParser.ConfigParser() object
Returns:
A dictionary with keys and values that are used by the provider
specific implementation of this class.
"""
settings = dict()
try:
settings['username'] = conf.get('ChipmunkAPIServer', 'username')
settings['password'] = conf.get('ChipmunkAPIServer', 'password')
settings['method'] = conf.get('ChipmunkAPIServer', 'method')
settings['endpoint'] = conf.get('ChipmunkAPIServer', 'endpoint')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print >> sys.stderr, 'Problem with your configuration file.'
print >> sys.stderr, '\t' + str(e)
sys.exit(1)
return settings
class DNSProvider(object):
"""
Abstract class for implimenting the ability to update
DNS providers.
"""
def __init__(self):
raise NotImplementedError
def update_dns(self):
"""
Make update to DNS entry
Should raise CMError() exceptions for problems with the connection or
data. The calling routine should handle this error appropriately.
Returns:
String suitable for display to end user indicating status of
the update
"""
raise NotImplementedError
def configure(self, config_data):
"""
Read a the values from a dictionary and set the appropriate variables.
Args:
config_data: dictionary whose key:value pairs are used to
set the appropriate variables
"""
raise NotImplementedError
@staticmethod
def get_config_params(configp_dict):
"""
Setup the configuration parameters needed to communicate with the
DNS provider and make updates. This should be implimented for each
sepcific DNS provider as the parameters for each are likely different.
Args:
configp_dict: a ConfigParser.ConfigParser() object
Returns:
A dictionary with keys and values that are used by the provider
specific implementation of this class.
"""
raise NotImplementedError
class LinodeDNS(DNSProvider):
"""
Impliment Linode DNS update
For details of the API see: https://www.linode.com/api
"""
def __init__(self):
self.api_key = ''
self.domain = ''
self.host = ''
self.endpoint = 'https://api.linode.com'
self.domain_id = None
self.resource_id = None
self.linode_ip = None
def configure(self, config_data):
"""
see super class
"""
self.api_key = config_data['api_key']
self.domain = config_data['domain']
self.host = config_data['host']
self.endpoint = config_data['endpoint']
self.api_key = config_data['api_key']
def update_dns(self, new_ip_addr):
"""
Perform DNS update.
Any connection errors are raised as CMError() exceptions. These
should be handled appropriately when ths method is called.
Args:
new_ip_addr: string with the new IP address to set
"""
self.domain_id = self._get_domain_id()
self.resource_id = self._get_resource_id()
self.linode_ip = self._get_linode_ip()
if new_ip_addr != self.linode_ip:
self._set_linode_ip(new_ip_addr)
return 'Success'
else:
return 'No Update Needed'
def _get_domain_id(self):
"""
Get the DomainID for the given domain.
https://www.linode.com/api/dns/domain.list
Returns: DomainID of domain
"""
if self.domain_id:
return self.domain_id
method = {'api_action': 'domain.list'}
data = self._make_api_call(method)
for entry in data:
if entry['DOMAIN'] == self.domain:
return entry['DOMAINID']
def _get_resource_id(self):
"""
Get the ResourceID of the entry for the given host.
https://www.linode.com/api/dns/domain.list
Returns: ResourceID for the host
"""
if self.resource_id:
return self.resource_id
method = {'api_action' : 'domain.resource.list',
'DomainID' : self.domain_id}
data = self._make_api_call(method)
for entry in data:
if entry['NAME'] == self.host:
return entry['RESOURCEID']
def _get_linode_ip(self):
"""
Get the current IP address set in Linode's DNS.
https://www.linode.com/api/dns/domain.resource.list
Returns: IP address (as a stirng) matching the hostname
"""
method = {'api_action' : 'domain.resource.list',
'DomainID' : self.domain_id}
data = self._make_api_call(method)
for entry in data:
if entry['NAME'] == self.host:
return entry['TARGET']
def _set_linode_ip(self, new_ip):
"""
Set the IP address Linode DNS.
https://www.linode.com/api/dns/domain.resource.update
Returns: ResouceID that was updated
"""
method = {'api_action': 'domain.resource.update',
'ResourceID' : self.resource_id,
'DomainID' : self.domain_id,
'Target': new_ip}
data = self._make_api_call(method)
return data['ResourceID']
def _make_api_call(self, query_dict):
"""
Make the actual API call to Linode.
Args:
query_dict: Dictionary containing the API methods and parameters.
This is converted into a URL query string
Returns: Data dictionary from linode API call.
"""
method = urllib.urlencode(query_dict)
url = self.endpoint + '/?api_key=' + self.api_key + '&' + method
answer = None
try:
request = urllib2.Request(url)
result = urllib2.urlopen(request)
answer = result.read()
except urllib2.HTTPError as e:
raise CMError('Error connecting to DNS provider: %s' % str(e))
except urllib2.URLError as e:
raise CMError('Error connecting to DNS provider: %s' % str(e.reason))
try:
answer = json.loads(answer)
except ValueError as e:
raise CMError('Error decoding JSON repsone: %s' % str(e))
if answer:
if len(answer['ERRORARRAY']) != 0:
message = (answer['ERRORARRAY']['ERRORMESSAGE'])
raise CMError('Problem with DNS Update API call: %s' % str(message))
else:
return answer['DATA']
@staticmethod
def get_config_params(configp_dict):
"""
See superclass
"""
retval = dict()
try:
retval['api_key'] = configp_dict.get('linode', 'api_key')
retval['host'] = configp_dict.get('linode', 'host')
retval['domain'] = configp_dict.get('linode', 'domain')
retval['endpoint'] = configp_dict.get('linode', 'endpoint')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print >> sys.stderr, 'Problem with your configuration file.'
print >> sys.stderr, '\t' + str(e)
sys.exit(1)
return retval
class Config(object):
"""
Process configuration data
"""
def __init__(self):
self.config_file = None
# general settings
self.update_dns = False
self.daemonize = False
# params needed for CM API Server
self.cmapi = None
# params needed for DNS updates. The contents will vary by
# provider
self.dns_provider = None
self.dns_params = dict()
self.daemon = dict()
def _global_config(self, conf):
"""
setup the global config file options
"""
try:
update_dns = conf.get('Global', 'update_dns')
daemonize = conf.get('Global', 'daemonize')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
print >> sys.stderr, 'Problem with your configuration file.'
print >> sys.stderr, '\t' + str(e)
sys.exit(1)
# update_dns
if update_dns.lower() == 'yes':
self.update_dns = True
else:
self.update_dns = False
# daemonize
if daemonize.lower() == 'yes':
self.daemonize = True
else:
self.daemonize = False
def configure(self, config_file):
"""
Read configuration file for parameters and set up everything.
Args:
config_file: config file to read
"""
self.config_file = os.path.abspath(config_file)
conf = ConfigParser.ConfigParser()
conf.read(config_file)
# get global settings
self._global_config(conf)
# parameters for interacting with chipmunk API server
self.cmapi = ChipmunkRequest.get_config_params(conf)
# fill in appropriate dns provider values
if self.update_dns:
self.dns_provider = conf.get('dDNS', 'provider')
self.dns_params = globals()[DNS_PROVIDER_MAP[self.dns_provider]].get_config_params(conf)
# deamon parameters if we are going to be a daemon
if self.daemonize:
self.daemon['interval'] = conf.get('Daemon', 'interval')
self.daemon['pidfile'] = conf.get('Daemon', 'pidfile').strip("'").strip('"')
self.daemon['logfile'] = conf.get('Daemon', 'logfile').strip("'").strip('"')
class CMDaemon(Daemon):
"""
Implimentation of Daemon class. Allows this to be run as a deamon.
"""
def __init__(self, conf):
"""
Setup the daemon version of things
Args:
conf: a Config object with everything initialized
"""
self.conf = conf
self.cm = ChipmunkRequest(self.conf.cmapi)
self.dnupdate = None
if self.conf.update_dns:
self.dnupdate = globals()[DNS_PROVIDER_MAP[self.conf.dns_provider]]()
self.dnupdate.configure(self.conf.dns_params)
self.logger = Logger(self.conf.daemon['logfile'])
# self.cm.logger = self.logger
# self.dnupdate.logger = self.logger
super(CMDaemon, self).__init__(self.conf.daemon['pidfile'])
# register signal handlers
signal.signal(signal.SIGHUP, self._sighup_handler)
signal.signal(signal.SIGTERM, self._sigterm_handler)
signal.signal(signal.SIGUSR1, self._sigusr1_handler)
def _sighup_handler(self, signum, frame):
"""
Handle a SIGHUP by reloading the config.
"""
self.logger.log('info', 'Reloading Configuration.')
self.conf.configure(self.conf.config_file)
self.cm.configure(self.conf.cmapi)
if self.dnupdate:
# if we have an object, just reconfigure it
self.dnupdate.configure(self.conf.dns_params)
elif self.conf.update_dns:
# need to create the object
self.dnupdate = globals()[DNS_PROVIDER_MAP[self.conf.dns_provider]]()
self.dnupdate.configure(self.conf.dns_params)
def _sigusr1_handler(self, signum, frame):
"""
Handle SIGUSR1 to allow log file rotation.
"""
self.logger.rotate_handler()
def _sigterm_handler(self, signum, frame):
self.logger.log('info', 'Exiting.')
sys.exit(0)
def run(self):
"""
Implement the run method to allow daemonization. Execute all
logic in here.
"""
self.logger.log('info', 'Starting.')
while True:
try:
self.cm.get_ip()
except CMError as e:
self.logger.log('error', 'Problem getting IP address: %s' % str(e))
time.sleep(int(self.conf.daemon['interval']))
continue
self.logger.log('info', 'Current IP is: %s ' % self.cm.my_ip)
if self.conf.update_dns:
if self.cm.have_new_ip:
try:
message = self.dnupdate.update_dns(self.cm.my_ip)
except CMError as e:
self.logger.log('error', 'Problem updating DNS: %s' % str(e))
# setting cm.my_ip to something it can't ever be will
# force a dns update the next time around
self.cm.my_ip = '0.0.0.0'
time.sleep(int(self.conf.daemon['interval']))
continue
self.logger.log('info', 'DNS Update Status: %s' % message)
# sleep for a period of time before checking again.
time.sleep(int(self.conf.daemon['interval']))
def single_run(conf):
"""
Run everything one time. Allows a command line one line update or
incorporation into a shell script. Prints status messages to stdout.
"""
cm = ChipmunkRequest(conf.cmapi)
try:
ip = cm.get_ip()
except CMError as e:
print >> sys.stderr, 'Problem getting IP address: %s' % str(e)
sys.exit(1)
print 'IP: %s' % (cm.my_ip)
if cm.status:
print 'Status: %s' % (cm.status)
if conf.update_dns:
dnupdate = globals()[DNS_PROVIDER_MAP[conf.dns_provider]]()
dnupdate.configure(conf.dns_params)
try:
update_result = dnupdate.update_dns(cm.my_ip)
except CMError as e:
print >> sys.stderr, 'Problem updating DNS: %s' % str(e)
sys.exit(1)
print 'DNS Update status: %s' % (update_result)
sys.exit(0)
def _get_args():
"""
Get command line arguments.
Returns: argparse Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c',
'--config',
required=True,
help='Configuration File')
args = parser.parse_args()
return args
def main():
"""
main
"""
args = _get_args()
# load configuration data
conf = Config()
conf.configure(args.config)
if conf.daemonize:
daemon = CMDaemon(conf)
daemon.start()
else:
single_run(conf)
if __name__ == '__main__':
main()
| {
"content_hash": "ad7928b95b41a3d682f5fc5b1ef7d005",
"timestamp": "",
"source": "github",
"line_count": 766,
"max_line_length": 110,
"avg_line_length": 32.26109660574413,
"alnum_prop": 0.5581094205244416,
"repo_name": "theory14/chipmunk",
"id": "9e3be71082574f3e4ed78bf32208964c57194f32",
"size": "24735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/chipmunkclient.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45702"
},
{
"name": "Shell",
"bytes": "188"
},
{
"name": "Smarty",
"bytes": "183"
}
],
"symlink_target": ""
} |
from __future__ import division
import requests
from collections import defaultdict
from math import ceil
BASE_URL = "http://api.genius.com"
def search_songs(search, n=25):
pages = int(ceil(n / 25))
songs = []
for page in xrange(1, pages + 1):
r = requests.get(BASE_URL + "/search", params={"q": search, "page": page})
new_songs = [Song(x) for x in r.json()['response']['hits']]
if len(new_songs) == 0:
break
songs += new_songs
return songs
def search_artists(search):
songs = search_songs(search)
count = defaultdict(int)
artist_by_name = {}
for song in songs:
if song.artist.name.lower() == search.lower():
return song.artist
else:
artist_by_name[song.artist.name] = song.artist
count[song.artist.name] += 1
ranked = sorted(count, key=count.get, reverse=True)
if count[ranked[0]] > 1:
return artist_by_name[ranked[0]]
else:
return songs[0].artist
class Song:
def __init__(self, data):
try:
data = data['result']
except KeyError:
pass
self.artist = Artist(data['primary_artist'])
self.title = data['title']
self.id = data['id']
self._lyrics = None
self._description = None
@property
def lyrics(self):
if self._lyrics is None:
self.hydrate()
return self._lyrics
@property
def description(self):
if self._description is None:
self.hydrate()
return self._description
def hydrate(self):
r = requests.get(BASE_URL + "/songs/" + str(self.id))
self._lyrics = self._process_child(r.json()['response']['song']['lyrics']['dom'])
self._lyrics = [x for x in self._lyrics.split("\n") if x != ""]
self._description = self._process_child(r.json()['response']['song']['description']['dom'])
self._description = " ".join([x for x in self._description.split("\n") if x != ""])
def _process_child(self, child):
if type(child) is str or type(child) is unicode:
if len(child) > 0 and (child[0] == "[" or child[-1] == "]"):
return ""
else:
return child
else:
try:
return "".join([self._process_child(c) for c in child['children']])
except KeyError:
return "\n"
class Artist:
def __init__(self, data):
self.id = data['id']
self.name = data['name']
self.url = data['url']
self.image_url = data['image_url']
self._songs = None
def _get_songs(self):
self._songs = []
songs_remaining = True
page = 1
while songs_remaining:
r = requests.get(BASE_URL + "/artists/" + str(self.id) + "/songs", params={"page": page})
new_songs = [Song(x) for x in r.json()['response']['songs']]
if len(new_songs) == 0:
songs_remaining = False
else:
page += 1
self._songs += new_songs
@property
def songs(self):
if self._songs is None:
self._get_songs()
return self._songs
| {
"content_hash": "69b266219568c19fd45e7c086b0c9013",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 101,
"avg_line_length": 29.9375,
"alnum_prop": 0.5117804950790337,
"repo_name": "billletson/genius",
"id": "0c8e1bb93935194b1cfb09910f28c588420e741b",
"size": "3353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genius/genius.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4706"
}
],
"symlink_target": ""
} |
"""
Migration from moin--main--1.3 pre patch-332 to post patch-332.
In patch-332 we changed the format of page lists in user data file. They
are now tab separated instead of comma separated, and page names are not
quoted using file system quoting.
You can run the script multiple times with no damage.
Steps for a successful migration:
1. Stop your wiki
2. Make a backup of your wiki 'data' directory
WARNING: THIS SCRIPT MIGHT CORRUPT YOUR 'DATA' DIRECTORY. DON'T
COMPLAIN LATER, MAKE BACKUP NOW!
3. Move the wiki's 'data' directory to your working dir
4. Run this script from your working dir
5. If there was no error, you will find:
data.pre-mig9 - backup of original data directory
data - converted data dir
6. Verify conversion results (number of pages, size of logs,
attachments, number of backup copies) - everything should be
reasonable before you proceed.
NOTE: THE CACHE DIRECTORY IS NOT COPIED - DO NOT COPY IT, IT WILL BE
CREATED AND FILLED BY THE WIKI AUTOMATICALLY.
7. Move the converted data directory into your wiki. Do not simply copy
the converted stuff into the original or you will duplicate pages
and create chaos!
8. Fix permissions on your data directory, see HelpOnInstalling.
9. Test it - if something has gone wrong, you still have your backup.
@copyright: 2004 Thomas Waldmann
@license: GPL, see COPYING for details
"""
import os, sys, codecs
join = os.path.join
# Insert THIS moin dir first into sys path, or you might run another
# version of moin and get unpredicted results!
sys.path.insert(0, '../../../..')
from MoinMoin import wikiutil, user
from MoinMoin.script.migration import migutil
def convert_quicklinks(string):
""" Convert quicklinks from pre patch-332 to new format """
# No need to convert new style list
if '\t' in string:
return string
names = [name.strip() for name in string.split(',')]
names = [wikiutil.unquoteWikiname(name) for name in names if name != '']
string = user.encodeList(names)
return string
def convert_subscribed_pages(string):
""" Convert subscribed pages from pre patch-332 to new format """
# No need to convert new style list
if '\t' in string:
return string
# This might break pages that contain ',' in the name, we can't do
# anything about it. This was the reason we changed the format.
names = [name.strip() for name in string.split(',')]
string = user.encodeList(names)
return string
def convertUserData(text):
""" Convert user data
@param text: text of user file, unicode
@rtype: unicode
@return: convected user data
"""
lines = text.splitlines()
for i in range(len(lines)):
line = lines[i]
try:
key, value = line.split('=', 1)
except ValueError:
continue
if key == u'quicklinks':
value = convert_quicklinks(value)
elif key == u'subscribed_pages':
value = convert_subscribed_pages(value)
lines[i] = u'%s=%s' % (key, value)
# Join back, append newline to last line
text = u'\n'.join(lines) + u'\n'
return text
def convertUsers(srcdir, dstdir):
""" Convert users files
@param srcdir: old users dir
@param dstdir: new users dir
"""
charset = 'utf-8'
# Create dstdir
if not os.path.exists(dstdir):
try:
os.mkdir(dstdir)
except OSError:
migutil.fatalError("can't create user directory at '%s'" % dstdir)
if not os.path.isdir(srcdir):
migutil.fatalError("can't find user directory at '%s'" % srcdir)
for name in migutil.listdir(srcdir):
if name == 'README' or name.endswith('.trail'):
# Copy as is
migutil.copy_file(join(srcdir, name), join(dstdir, name))
else:
srcfile = join(srcdir, name)
f = codecs.open(srcfile, 'rb', charset)
text = f.read()
f.close()
text = convertUserData(text)
dstfile = join(dstdir, name)
f = codecs.open(dstfile, 'wb', charset)
f.write(text)
f.close()
print "Converted '%s' to '%s'" % (srcfile, dstfile)
if __name__ == '__main__':
# Backup original dir
datadir = 'data'
origdir = 'data.pre-mig9'
migutil.backup(datadir, origdir)
# Copy ALL stuff from original dir into new data dir. Don't change
# or drop anything from the original directory expect cache files.
names = ['edit-log', 'event-log', 'intermap.txt', 'pages', 'plugin']
migutil.copy(names, origdir, datadir)
# Convert user directory
convertUsers(join(origdir, 'user'), join(datadir, 'user'))
| {
"content_hash": "39e2c729f0052601680c0305b4b0a547",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 78,
"avg_line_length": 30.063291139240505,
"alnum_prop": 0.6442105263157895,
"repo_name": "RealTimeWeb/wikisite",
"id": "e423d2ac5a22221751719f30fba864de5a058f94",
"size": "4772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/script/old/migration/12_to_13_mig09.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import enum
@enum.unique
class ConnectionStatus(enum.Enum):
NOT_APPLICABLE = "NOT_APPLICABLE"
CALCULATING = "CALCULATING"
CONNECTED = "CONNECTED"
DISCONNECTED = "DISCONNECTED"
DOMAIN_DELETED = "DOMAIN_DELETED"
NOT_COMPLETE = "NOT_COMPLETE"
USER_IN_SOURCE = "USER_IN_SOURCE"
L7_APPLICATION = "L7_APPLICATION"
ANY_OR_A_CLASS = "ANY_OR_A_CLASS"
INTERNET = "INTERNET"
NON_CONTINUOUS_MASK = "NON_CONTINUOUS_MASK"
| {
"content_hash": "f9605caa3a2be4839e49f818ddf40448",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 47,
"avg_line_length": 28.375,
"alnum_prop": 0.6806167400881057,
"repo_name": "Tufin/pytos",
"id": "a903466459731bf0bfd596acaf3b861a85057000",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytos/secureapp/definitions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11180"
},
{
"name": "Python",
"bytes": "1073816"
}
],
"symlink_target": ""
} |
"""
osisoftpy.find_tags_and_query
~~~~~~~~~~~~
Some blah blah about what this file is for...
"""
# Fix print functions
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.utils import iteritems
import arrow
import time
import osisoftpy # main package
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Connect and instantiate the webapi object for basic
# webapi = osisoftpy.webapi('https://sbb03.eecs.berkeley.edu/piwebapi',
# authtype='basic', username='albertxu',
# password='Welcome2pi')
# Connect and instantiate the webapi object for kerberos
# webapi = osisoftpy.webapi('https://piudnpiwebapi.sli.pge.com/piwebapi', authtype='kerberos', verifyssl=False)
# webapi = osisoftpy.webapi('https://dev.dstcontrols.com/piwebapi/', authtype='kerberos', verifyssl=False, hostname_override='api.osisoft.dstcontrols.local')
webapi = osisoftpy.webapi('https://dev.dstcontrols.com/piwebapi/')
# webapi = osisoftpy.webapi('https://gold.dstcontrols.local/piwebapi/')
print('Connected to {}'.format(webapi.links.get('Self')))
# Get a list of Points from the Web API:
points = webapi.points(query='name:CD* or name:SINU*', count=100)
# Get a list of point signals for the points we'd like to monitor for changes.
# We're passing in a list of points, and the Point's method we're monitoring.
signals = webapi.subscribe(points, 'current')
# a list to store modified points in:
updated_points = []
# When the monitored point's value changes, it emits a signal.
# This will call a function, known as as the receiver. Here, We're creating
# receiver named notify which will simply print out the changed Point's
# attributes, and saving the updated point to a list for us to use later.
def notify(sender):
msg = 'Current value for {} has changed to {}'
if sender not in updated_points:
updated_points.append(sender)
print(msg.format(sender.name, sender.current_value))
# Here is where we're connecting to the signals that will be emitted. We're
# going through the signals we retrieved earlier, and connecting to each
# one, passing in the reciver function we just defined
for (k, signal) in iteritems(signals):
signal.connect(notify)
# Here, we're creating a simple 500ms timer which will grab the latest value
# for each PI point. The point.current() method will emit the change signal
# when the value changes.
# we'll just run this until we receive 10 point changes:
starttime = time.time()
# for point in points:
# point.recorded(starttime='*-14d', endtime='*', maxcount=1000)
# points.current()
for point in points:
point.current()
print('The current value for {} is {}, recorded {}'.format(
point.name,
point.current_value.value,
arrow.get(point.current_value.timestamp).humanize()))
# for point in points:
# point.end()
# print('The end value for {} is {}, recorded {}'.format(
# point.name,
# point.end_value.value,
# arrow.get(point.end_value.timestamp).humanize()))
# for point in points:
# values = point.interpolated(starttime='*-14d', endtime='*', interval='1m')
# print('{} interpolated values for {} were retrieved; '
# 'the data ranges from {} to {}.'.format(
# values.__len__(),
# point.name,
# arrow.get(values[0].timestamp).humanize(),
# arrow.get(values[-1].timestamp).humanize()))
while updated_points.__len__() < 10:
for point in points:
point.current()
# run every 500 milliseconds
sleep = 1 / 2
time.sleep(sleep - ((time.time() - starttime) % sleep))
# print out the modified points
for point in updated_points:
print(point)
# obs = osisoftpy.observable(points)
# obs2 = points.current_observable()
#
# # obs.subscribe(on_next=lambda value: print("obs Received {0}".format(value)),
# # on_completed=lambda: print("obs Done!"),
# # on_error=lambda error: print("obs Error Occurred: {0}".format(error))
# # )
#
#
# obs2.subscribe(on_next=lambda value: print("obs2 Received {0}".format(value)),
# on_completed=lambda: print("obs2 Done!"),
# on_error=lambda error: print("obs2 Error Occurred: {0}".format(error))
# )
#
# print('foo')
# Send the Web API an Indexed Search query for tags named SINU*
# search_paramaters = {'q': "name:SINU*"}
# points = webapi.points(params=search_paramaters)
# points = webapi.foopoints(query='name:*SPF*', count=100)
quit()
# .map(lambda point: getInterpolatedValues(point)) \
# \
# \
for point in points:
print('getting data for %s...' % point.name)
# let's also get the last 2 weeks of data at 1 minute intervals...
interpolated_values = point.interpolated(starttime='*-14d', endtime='*',
interval='1m')
values[point.name] = interpolated_values
points.current()
for point in points:
print(point.name)
print('1: current value: %s' % point.current_value)
print('2: Return from current(): %s' % point.current())
print('3: current value: %s' % point.current_value)
print('4: Overwrite=False, current(): %s' % point.current(overwrite=False))
# for value in point:
# print(value.timestamp)
# for each point returned...
for point in (p for p in points):
# let's print out it's current value and timestamp...
print('Name: {}, current: {}, timestamp: {}'.format(
point.name, point.current.value, point.current.timestamp))
# let's also get the last 2 weeks of data at 1 minute intervals...
interpolated_values = point.interpolated(starttime='*-14d', endtime='*',
interval='1m')
# create some messages to be printed out...
points_msg = '{} PI points were retrieved.'.format(points.__len__())
summary_msg = ('{} interpolated values for {} were retrieved. '
'The data spans from {} to {}').format(
interpolated_values.__len__(),
point.name,
arrow.get(interpolated_values[0].timestamp).humanize(),
arrow.get(interpolated_values[-1].timestamp).humanize()
)
# and then do some simple numpy calls against the 2 weeks of data:
min = numpy.amin([v.value for v in interpolated_values])
max = numpy.amax([v.value for v in interpolated_values])
mean = numpy.mean([v.value for v in interpolated_values])
average = numpy.average([v.value for v in interpolated_values])
median = numpy.median([v.value for v in interpolated_values])
mode = numpy.median([v.value for v in interpolated_values])
stdev = numpy.std([v.value for v in interpolated_values])
variance = numpy.var([v.value for v in interpolated_values], ddof=False)
# now let's print out our results for each point
print('Summary: for {}'.format(point.name))
print('--------')
print('Current: {}'.format(point.current.value))
print('Min: {}'.format(min))
print('Max: {}'.format(max))
print('Mean: {}'.format(mean))
print('Average: {}'.format(average))
print('Median: {}'.format(median))
print('Mode: {}'.format(mode))
print('Stdev: {}'.format(stdev))
print('Variance: {}'.format(variance))
print(summary_msg)
# and print out a single summary for all points
print(points_msg)
| {
"content_hash": "273aa6e41e8de18d0a781d8ccba4f429",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 157,
"avg_line_length": 37.01960784313726,
"alnum_prop": 0.6518802966101694,
"repo_name": "dstcontrols/osisoftpy",
"id": "09ed9f8bdd16ad642e31ffe66b49293b29bd928c",
"size": "8180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/find_tags_and_query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "235835"
},
{
"name": "Shell",
"bytes": "2009"
}
],
"symlink_target": ""
} |
import optparse
import unittest
from webkitpy.common.system.output_capture import OutputCapture
from webkitpy.tool.commands.queries import PrintBaselines, PrintExpectations
from webkitpy.tool.mock_tool import MockWebKitPatch
class PrintExpectationsTest(unittest.TestCase):
def run_test(self, tests, expected_stdout, platform='test-win-win7', **kwargs):
options_defaults = {
'all': False, 'csv': False, 'full': False, 'platform': platform,
'include_keyword': [], 'exclude_keyword': [], 'paths': False,
}
options_defaults.update(kwargs)
options = optparse.Values(dict(**options_defaults))
tool = MockWebKitPatch()
tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
command = PrintExpectations()
oc = OutputCapture()
try:
oc.capture_output()
command.execute(options, tests, tool)
finally:
stdout, _, _ = oc.restore_output()
self.assertMultiLineEqual(stdout, expected_stdout)
def test_basic(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'))
def test_multiple(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win10\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'
'\n'
'// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'
'failures/expected/timeout.html [ Timeout ]\n'),
platform='test-win-*')
def test_full(self):
self.run_test(['failures/expected/text.html', 'failures/expected/timeout.html'],
('// For test-win-win7\n'
'Bug(test) failures/expected/text.html [ Failure ]\n'
'Bug(test) failures/expected/timeout.html [ Timeout ]\n'),
full=True)
def test_exclude(self):
self.run_test(['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/text.html [ Failure ]\n'),
exclude_keyword=['crash'])
def test_include(self):
self.run_test(['failures/expected/text.html', 'failures/expected/crash.html'],
('// For test-win-win7\n'
'failures/expected/crash.html\n'),
include_keyword=['crash'])
def test_csv(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('test-win-win7,failures/expected/image.html,Bug(test),,FAIL\n'
'test-win-win7,failures/expected/text.html,Bug(test),,FAIL\n'),
csv=True)
def test_paths(self):
self.run_test([],
('/mock-checkout/LayoutTests/TestExpectations\n'
'LayoutTests/NeverFixTests\n'
'LayoutTests/StaleTestExpectations\n'
'LayoutTests/SlowTests\n'),
paths=True)
class PrintBaselinesTest(unittest.TestCase):
def setUp(self):
self.oc = None
self.tool = MockWebKitPatch()
self.test_port = self.tool.port_factory.get('test-win-win7')
self.tool.port_factory.get = lambda port_name=None: self.test_port
self.tool.port_factory.all_port_names = lambda: [
'test-linux-trusty', 'test-linux-precise',
'test-mac-mac10.11', 'test-mac-mac10.10',
'test-win-win10', 'test-win-win7'
]
def tearDown(self):
if self.oc:
self.restore_output()
def capture_output(self):
self.oc = OutputCapture()
self.oc.capture_output()
def restore_output(self):
stdout, stderr, logs = self.oc.restore_output()
self.oc = None
return (stdout, stderr, logs)
def test_basic(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'include_virtual_tests': False, 'csv': False, 'platform': None})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_multiple(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'include_virtual_tests': False, 'csv': False, 'platform': 'test-win-*'})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('// For test-win-win10\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'
'\n'
'// For test-win-win7\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
def test_csv(self):
command = PrintBaselines()
self.capture_output()
options = optparse.Values({'all': False, 'platform': '*win7', 'csv': True, 'include_virtual_tests': False})
command.execute(options, ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
self.assertMultiLineEqual(stdout,
('test-win-win7,passes/text.html,None,png,passes/text-expected.png,None\n'
'test-win-win7,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
| {
"content_hash": "a825c576a08355718737aa1de50ef2be",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 121,
"avg_line_length": 43.744827586206895,
"alnum_prop": 0.5390193914551474,
"repo_name": "google-ar/WebARonARCore",
"id": "d60a56e9d2201f248935c8317e6ab8887ed0fc20",
"size": "7931",
"binary": false,
"copies": "2",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import re
from random import randrange
from model.contact import Contact
def test_contact_on_the_home_page(app, db):
old_contacts = app.contact.get_contact_list()
contact_from_db = db.get_contact_list()
for i in range(len(contact_from_db)):
db_contact = contact_from_db[i]
ui_contact = (sorted(old_contacts, key=Contact.id_or_max))[i]
assert ui_contact.firstname == db_contact.firstname
assert ui_contact.lastname == db_contact.lastname
assert ui_contact.address == db_contact.address
assert ui_contact.all_emails_form_home_page == merge_emails_like_on_homepage(db_contact)
assert ui_contact.all_phones_from_home_page == merge_phones_like_on_homepage(db_contact)
def clear(s):
return re.sub("[() -]", "", s)
def merge_emails_like_on_homepage(db):
return "\n".join(filter(lambda x: x != "",(filter(lambda x: x is not None,[db.email, db.email2, db.email3]))))
def merge_phones_like_on_homepage(db):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,[db.home, db.mobile, db.work,db.privatephone]))))
| {
"content_hash": "1924701a12790e7ed29f863f8bf21199",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 41.10344827586207,
"alnum_prop": 0.639261744966443,
"repo_name": "AnnaWyszomirska/lesson1_1",
"id": "407539ec9ae4b5f086208cfcedce8b70ce3c07b5",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_contact_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "1671"
},
{
"name": "Python",
"bytes": "46437"
}
],
"symlink_target": ""
} |
import os
import struct
import subprocess
import configparser
CONFIG_FILE = os.path.join(os.environ['LOCALAPPDATA'], 'squeakvmlauncher.ini')
# TODO: make this platform independent
CONFIG_SECTION = 'VMs'
def launch_squeak(image_filename):
with open(image_filename, 'rb') as f:
image_version = read_magic_version_number(f)
vm_executable = vm_executable_for_version(image_version)
subprocess.call([vm_executable, image_filename])
def read_magic_version_number(file):
magic_word, = struct.unpack('i', file.read(4))
# TODO: add support for different endianness images
# TODO: add support for 64 bit images
return magic_word
def vm_executable_for_version(image_version):
executable = lookup_executable(image_version)
if not executable:
executable = choose_with_gui(image_version)
save_executable(image_version, executable)
return executable
config_parser = configparser.RawConfigParser()
def lookup_executable(image_version):
global config_parser
config_parser.read(CONFIG_FILE)
try:
return config_parser.get(CONFIG_SECTION, str(image_version))
except (configparser.NoSectionError, configparser.NoOptionError):
return None
def choose_with_gui(image_version):
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw()
return filedialog.askopenfilename(title='Choose a VM for image version ' + str(image_version))
def save_executable(image_version, executable):
if not config_parser.has_section(CONFIG_SECTION):
config_parser.add_section(CONFIG_SECTION)
config_parser.set(CONFIG_SECTION, str(image_version), executable)
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
if __name__ == '__main__':
import sys
launch_squeak(sys.argv[1])
| {
"content_hash": "0d308c7bac065d1f57fd6e86c837a997",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 98,
"avg_line_length": 33.43636363636364,
"alnum_prop": 0.7188689505165851,
"repo_name": "j4yk/squeakvm-launcher",
"id": "7c0196e0f1390d98a8129c1077077d2b7ac0b5ee",
"size": "1849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "squeakvmlauncher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8449"
}
],
"symlink_target": ""
} |
"""
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
"""
import glob
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return '{0} {1}({2})\n'.format(return_type, name, args)
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = glob.glob(directory + '/*.f*')
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = filename.split('\\')[-1][:-2]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_wrapper_generators.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# The signature that is used for zcgesv in lapack 3.1.0 and 3.1.1 changed
# in version 3.2.0. The version included in the clapack on OSX has the
# more recent signature though.
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=['scabs1', 'xerbla'])
elif libname.lower() == 'lapack':
# Exclude all routines that do not have consistent interfaces from
# LAPACK 3.1.0 through 3.6.0.
# Also exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
# Exclude sisnan and slaneg since they aren't currently included in
# The ABI compatibility wrappers.
exclusions = ['sisnan', 'csrot', 'zdrot', 'ilaenv', 'iparmq', 'lsamen',
'xerbla', 'zcgesv', 'dlaisnan', 'slaisnan', 'dlazq3',
'dlazq4', 'slazq3', 'slazq4', 'dlasq3', 'dlasq4',
'slasq3', 'slasq4', 'dlasq5', 'slasq5', 'slaneg',
# Routines deprecated in LAPACK 3.6.0
'cgegs', 'cgegv', 'cgelsx', 'cgeqpf', 'cggsvd', 'cggsvp',
'clahrd', 'clatzm', 'ctzrqf', 'dgegs', 'dgegv', 'dgelsx',
'dgeqpf', 'dggsvd', 'dggsvp', 'dlahrd', 'dlatzm', 'dtzrqf',
'sgegs', 'sgegv', 'sgelsx', 'sgeqpf', 'sggsvd', 'sggsvp',
'slahrd', 'slatzm', 'stzrqf', 'zgegs', 'zgegv', 'zgelsx',
'zgeqpf', 'zggsvd', 'zggsvp', 'zlahrd', 'zlatzm', 'ztzrqf']
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=exclusions)
| {
"content_hash": "d5d4edaefb7c58ac458510439be3831d",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 297,
"avg_line_length": 63.908396946564885,
"alnum_prop": 0.6207596751075012,
"repo_name": "DailyActie/Surrogate-Model",
"id": "3e32f4ee3bff69c241712515ab32a5fa027911ff",
"size": "8372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scipy-master/scipy/linalg/_cython_signature_generator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
"""
IPython -- An enhanced Interactive Python
One of Python's nicest features is its interactive interpreter. This allows
very fast testing of ideas without the overhead of creating test files as is
typical in most programming languages. However, the interpreter supplied with
the standard Python distribution is fairly primitive (and IDLE isn't really
much better).
IPython tries to:
i - provide an efficient environment for interactive work in Python
programming. It tries to address what we see as shortcomings of the standard
Python prompt, and adds many features to make interactive work much more
efficient.
ii - offer a flexible framework so that it can be used as the base
environment for other projects and problems where Python can be the
underlying language. Specifically scientific environments like Mathematica,
IDL and Mathcad inspired its design, but similar ideas can be useful in many
fields. Python is a fabulous language for implementing this kind of system
(due to its dynamic and introspective features), and with suitable libraries
entire systems could be built leveraging Python's power.
iii - serve as an embeddable, ready to go interpreter for your own programs.
IPython requires Python 2.4 or newer.
"""
#*****************************************************************************
# Copyright (C) 2008-2010 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Enforce proper version requirements
import sys
if sys.version[0:3] < '2.4':
raise ImportError('Python Version 2.4 or above is required for IPython.')
# Make it easy to import extensions - they are always directly on pythonpath.
# Therefore, non-IPython modules can be added to Extensions directory
import os
sys.path.append(os.path.dirname(__file__) + "/Extensions")
# Define what gets imported with a 'from IPython import *'
__all__ = ['ipapi','generics','ipstruct','Release','Shell']
# Load __all__ in IPython namespace so that a simple 'import IPython' gives
# access to them via IPython.<name>
glob,loc = globals(),locals()
for name in __all__:
#print 'Importing: ',name # dbg
__import__(name,glob,loc,[])
import Shell
# Release data
from IPython import Release # do it explicitly so pydoc can see it - pydoc bug
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
( Release.authors['Fernando'] + Release.authors['Janko'] + \
Release.authors['Nathan'] )
__license__ = Release.license
__version__ = Release.version
# Namespace cleanup
del name,glob,loc
| {
"content_hash": "8a36b1407c0ff05e8b1a193c3de3bb67",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 39.9,
"alnum_prop": 0.6931614751163623,
"repo_name": "toomoresuch/pysonengine",
"id": "ce4f990d792e649226889b8b89de929df396451a",
"size": "2817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eggs/ipython-0.10.1-py2.6.egg/IPython/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "401941"
},
{
"name": "JavaScript",
"bytes": "628757"
},
{
"name": "Python",
"bytes": "12919662"
},
{
"name": "Shell",
"bytes": "416"
},
{
"name": "VimL",
"bytes": "4587"
}
],
"symlink_target": ""
} |
from .resource import Resource
class GenericResource(Resource):
"""Resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param plan: The plan of the resource.
:type plan: ~azure.mgmt.resource.resources.v2016_09_01.models.Plan
:param properties: The resource properties.
:type properties: object
:param kind: The kind of the resource.
:type kind: str
:param managed_by: ID of the resource that manages this resource.
:type managed_by: str
:param sku: The SKU of the resource.
:type sku: ~azure.mgmt.resource.resources.v2016_09_01.models.Sku
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.resource.resources.v2016_09_01.models.Identity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'pattern': r'^[-\w\._,\(\)]+$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'plan': {'key': 'plan', 'type': 'Plan'},
'properties': {'key': 'properties', 'type': 'object'},
'kind': {'key': 'kind', 'type': 'str'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(self, location=None, tags=None, plan=None, properties=None, kind=None, managed_by=None, sku=None, identity=None):
super(GenericResource, self).__init__(location=location, tags=tags)
self.plan = plan
self.properties = properties
self.kind = kind
self.managed_by = managed_by
self.sku = sku
self.identity = identity
| {
"content_hash": "849f942c63b33ce571eb32dbe0e649b4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 130,
"avg_line_length": 36.16129032258065,
"alnum_prop": 0.584299732381802,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "22b5c9ae7fe73bbeb100d32b6bb435fa701400c9",
"size": "2716",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/generic_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import six.moves.cPickle as pickle
import os
import shutil
import tempfile
import unittest
import numpy
import theano
from theano.compile.io import In
def test_function_dump():
v = theano.tensor.vector()
fct1 = theano.function([v], v + 1)
try:
tmpdir = tempfile.mkdtemp()
fname = os.path.join(tmpdir, 'test_function_dump.pkl')
theano.function_dump(fname, [v], v + 1)
f = open(fname, 'rb')
l = pickle.load(f)
f.close()
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
fct2 = theano.function(**l)
x = [1, 2, 3]
assert numpy.allclose(fct1(x), fct2(x))
class TestFunctionIn(unittest.TestCase):
def test_in_strict(self):
a = theano.tensor.dvector()
b = theano.shared(7)
out = a + b
f = theano.function([In(a, strict=False)], out)
# works, rand generates float64 by default
f(numpy.random.rand(8))
# works, casting is allowed
f(numpy.array([1, 2, 3, 4], dtype='int32'))
f = theano.function([In(a, strict=True)], out)
try:
# fails, f expects float64
f(numpy.array([1, 2, 3, 4], dtype='int32'))
except TypeError:
pass
def test_explicit_shared_input(self):
# This is not a test of the In class per se, but the In class relies
# on the fact that shared variables cannot be explicit inputs
a = theano.shared(1.0)
self.assertRaises(TypeError, theano.function, [a], a + 1)
def test_in_shared_variable(self):
# Ensure that an error is raised if the In wrapped is used to wrap
# a shared variable
a = theano.shared(1.0)
a_wrapped = In(a, update=a + 1)
self.assertRaises(TypeError, theano.function, [a_wrapped])
def test_in_mutable(self):
a = theano.tensor.dvector()
a_out = a * 2 # assuming the op which makes this "in place" triggers
# using mutable=True will let f change the value in aval
f = theano.function([In(a, mutable=True)], a_out, mode='FAST_RUN')
aval = numpy.random.rand(10)
aval2 = aval.copy()
assert numpy.all(f(aval) == (aval2 * 2))
assert not numpy.all(aval == aval2)
# using mutable=False should leave the input untouched
f = theano.function([In(a, mutable=False)], a_out, mode='FAST_RUN')
aval = numpy.random.rand(10)
aval2 = aval.copy()
assert numpy.all(f(aval) == (aval2 * 2))
assert numpy.all(aval == aval2)
def test_in_update(self):
a = theano.tensor.dscalar('a')
f = theano.function([In(a, value=0.0, update=a + 1)], a,
mode='FAST_RUN')
# Ensure that, through the executions of the function, the state of the
# input is persistent and is updated as it should
assert f() == 0.0
assert f() == 1.0
assert f() == 2.0
def test_in_update_wrong_dtype(self):
# Ensure that an error is raised if an In-wrapped variables has
# an update of a different type
a = theano.tensor.dscalar('a')
b = theano.tensor.dvector('b')
self.assertRaises(TypeError, In, a, update=b)
def test_in_update_shared(self):
# Test that using both In() with updates and shared variables with
# updates in the same function behaves as expected
shared_var = theano.shared(1.0)
a = theano.tensor.dscalar('a')
a_wrapped = In(a, value=0.0, update=shared_var)
f = theano.function([a_wrapped], [], updates={shared_var: a},
mode='FAST_RUN')
# Ensure that, through the executions of the function, the state of
# the input and the shared variable are appropriate (after N execution,
# the values have swapped N times). This allows testing that the
# changes occur at the same time and one doesn't overwrite the other.
for i in range(5):
f()
assert numpy.allclose(shared_var.get_value(), i % 2)
def test_in_allow_downcast_int(self):
a = theano.tensor.wvector('a') # int16
b = theano.tensor.bvector('b') # int8
c = theano.tensor.bscalar('c') # int8
f = theano.function([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# Both values are in range. Since they're not ndarrays (but lists),
# they will be converted, and their value checked.
assert numpy.all(f([3], [6], 1) == 10)
# Values are in range, but a dtype too large has explicitly been given
# For performance reasons, no check of the data is explicitly performed
# (It might be OK to change this in the future.)
self.assertRaises(TypeError, f, [3], numpy.array([6], dtype='int16'),
1)
# Value too big for a, silently ignored
assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2)
# Value too big for b, raises TypeError
self.assertRaises(TypeError, f, [3], [312], 1)
# Value too big for c, raises TypeError
self.assertRaises(TypeError, f, [3], [6], 806)
def test_in_allow_downcast_floatX(self):
a = theano.tensor.fscalar('a')
b = theano.tensor.fscalar('b')
c = theano.tensor.fscalar('c')
f = theano.function([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# If the values can be accurately represented, everything is OK
assert numpy.all(f(0, 0, 0) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f(0.1, 0, 0), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, 0, 0.1, 0)
# If allow_downcast is None, it should work iff floatX=float32
if theano.config.floatX == 'float32':
assert numpy.allclose(f(0, 0, 0.1), 0.1)
else:
self.assertRaises(TypeError, f, 0, 0, 0.1)
def test_in_allow_downcast_vector_floatX(self):
a = theano.tensor.fvector('a')
b = theano.tensor.fvector('b')
c = theano.tensor.fvector('c')
f = theano.function([In(a, allow_downcast=True),
In(b, allow_downcast=False),
In(c, allow_downcast=None)],
(a + b + c))
# If the values can be accurately represented, everything is OK
z = [0]
assert numpy.all(f(z, z, z) == 0)
# If allow_downcast is True, idem
assert numpy.allclose(f([0.1], z, z), 0.1)
# If allow_downcast is False, nope
self.assertRaises(TypeError, f, z, [0.1], z)
# If allow_downcast is None, like False
self.assertRaises(TypeError, f, z, z, [0.1])
| {
"content_hash": "e6693ef1d99c962ea64013fa426d4fb1",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 36.329896907216494,
"alnum_prop": 0.5672531214528944,
"repo_name": "cmdunkers/DeeperMind",
"id": "1b023b2c478865e1297047ea9afb30d164fa34df",
"size": "7048",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PythonEnv/lib/python2.7/site-packages/theano/compile/tests/test_function.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "653032"
},
{
"name": "C++",
"bytes": "3354338"
},
{
"name": "Cuda",
"bytes": "538188"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "124328"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "22186197"
},
{
"name": "Shell",
"bytes": "4377"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from .dynamicbinary import DynamicBinary
class BinaryStore:
def __init__(self):
self._binaries = OrderedDict()
def add_binary(self, binary, name=None):
if name is None:
name = binary.get_name()
if name in self._binaries:
raise ValueError("Duplicated binary '{}'".format(name))
self._binaries[name] = binary
def get_binary(self, bin_name):
return self._binaries.get(bin_name)
def set_base_address(self, bin_name, base_address):
binary = self._binaries[bin_name]
if not isinstance(binary, DynamicBinary):
raise ValueError("Cannot set base address for non-dynamic binary.")
binary.set_base_address(base_address)
def get_address(self, name, bin_name=None, offset=0):
if bin_name is None:
return self._search_all_binaries(name, offset)
binary = self._binaries.get(bin_name)
if binary is None:
raise ValueError("No binary with name '{}' exists".format(bin_name))
address = binary.get_address(name, offset)
if address is None:
raise ValueError("No symbol with name '{}' exists".format(name))
return address
def _search_all_binaries(self, name, offset):
for binary in self._binaries.values():
address = binary.get_address(name, offset)
if address is not None:
return address
raise ValueError("No symbol with name '{}' exists".format(name))
| {
"content_hash": "7a707b57746e4fd612975c88db30ada6",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 37.53658536585366,
"alnum_prop": 0.6231319038336582,
"repo_name": "fr3akout/pwny",
"id": "06fa536fa048720c0f2ad9852d214fe0166f2672",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pwny/binarystore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15906"
}
],
"symlink_target": ""
} |
import pygame, math, random
from Entity import Entity
from Player import Player
class ShammyTowel(pygame.sprite.Sprite):
def __init__(self, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
self.upImages = [pygame.image.load("RSC/Enemy Images/ththing/OH.png"),
pygame.image.load("RSC/Enemy Images/ththing/OHNO.png")]
self.changed = False
self.images = self.upImages
self.speedx = random.randint(-4,4)
self.speedy = random.randint(-4,4)
self.speed = [self.speedx, self.speedy]
self.didBounceX = False
self.didBounceY = False
self.frame = 0
self.maxFrame = len(self.images) - 1
self.waitCount = 0
self.maxWait = 60*.25
self.image = self.images[self.frame]
self.rect = self.image.get_rect()
self.place(pos)
self.radius = (int(self.rect.height/2.0 + self.rect.width/2.0)/2) - 1
self.living = True
def place(self, pos):
self.rect.center = pos
def update(*args):
self = args[0]
width = args[1]
height = args[2]
self.didBounceX = False
self.didBounceY = False
self.speed = [self.speedx, self.speedy]
self.move()
self.collideWall(width, height)
self.animate()
self.changed = False
def move(self):
self.rect = self.rect.move(self.speed)
def collideWall(self, width, height):
if not self.didBounceX:
#print "trying to hit Wall"
if self.rect.left < 0 or self.rect.right > width:
self.speedx = -self.speedx
self.didBounceX = True
#print "hit xWall"
if not self.didBounceY:
if self.rect.top < 0 or self.rect.bottom > height:
self.speedy = -self.speedy
self.didBounceY = True
#print "hit xWall"
def collideBlock(self, block):
self.speedx = random.randint(-4,4)
self.speedy = random.randint(-4,4)
def animate(self):
if self.waitCount < self.maxWait:
self.waitCount += 2
else:
self.waitCount = 0
self.changed = True
if self.frame < self.maxFrame:
self.frame += 1
else:
self.frame = 0
self.image = self.images[self.frame]
def detect(self, Player):
if self.distToPoint(player.rect.center) < self.detectionRadius:
pX = Player.rect.center[0]
pY = Player.rect.center[1]
zX = self.rect.center[0]
zY = self.rect.center[1]
if pX > zX:
self.speedx = self.maxSpeed
elif pX < zX:
self.speedx = -self.maxSpeed
else:
self.speedx = 0
if pY > zY:
self.speedy = self.maxSpeed
elif pY < zY:
self.speedy = -self.maxSpeed
else:
self.speedy = 0
| {
"content_hash": "5928dd4f316843e0786fe8927b260a2a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 33.40860215053763,
"alnum_prop": 0.522690698422916,
"repo_name": "KRHS-GameProgramming-2014/Shamwow",
"id": "4bf3bcf09c23f4c952f93c162c8db8e876536796",
"size": "3107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Shammy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42700"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_ARB_ES3_compatibility'
_p.unpack_constants( """GL_COMPRESSED_RGB8_ETC2 0x9274
GL_COMPRESSED_SRGB8_ETC2 0x9275
GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276
GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277
GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278
GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279
GL_COMPRESSED_R11_EAC 0x9270
GL_COMPRESSED_SIGNED_R11_EAC 0x9271
GL_COMPRESSED_RG11_EAC 0x9272
GL_COMPRESSED_SIGNED_RG11_EAC 0x9273
GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69
GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A
GL_MAX_ELEMENT_INDEX 0x8D6B""", globals())
def glInitEs3CompatibilityARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "8cbbfbbe6652ca7ba5a37f0509a75b91",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 38.69565217391305,
"alnum_prop": 0.802247191011236,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "b31c5ac8daea4a175ac3a2a99f4f4fbe9ddb61b6",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/ARB/ES3_compatibility.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
'''OpenGL extension KHR.cl_event
This module customises the behaviour of the
OpenGL.raw.EGL.KHR.cl_event to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/KHR/cl_event.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.EGL import _types, _glgets
from OpenGL.raw.EGL.KHR.cl_event import *
from OpenGL.raw.EGL.KHR.cl_event import _EXTENSION_NAME
def glInitClEventKHR():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | {
"content_hash": "47ecad7d3d8d5438fdb2dda9db8eee15",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 31.47826086956522,
"alnum_prop": 0.7817679558011049,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "b784f08da1d7096daa47e69dfb9abb9e0b86a75e",
"size": "724",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/EGL/KHR/cl_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""
rnn
model built with tensorflow for constructing standard Recurrent Neural Network
(RNN) and Long Short-Term Memory (LSTM)
"""
import tensorflow as tf
class _BaseRNN(object):
"""base class recurrent neural network"""
def __init__(self, step_size, state_size, num_classes):
self._step_size = step_size
self._state_size = state_size
self._n_class = num_classes
@property
def x(self):
"""feature vector"""
return tf.placeholder(dtype=tf.float32,
shape=(None, self._step_size),
name='feature')
@property
def y_(self):
"""true label, in one hot format"""
return tf.placeholder(dtype=tf.float32,
shape=(None, self._n_class),
name='multilabel')
@property
def _zero_state(self):
"""zero state for initial state and initial output"""
return tf.placeholder(dtype=tf.float32,
shape=(None, self._state_size))
def get_weight_variable(self, name):
"""create new or reuse weight variable by variable name."""
init = tf.truncated_normal_initializer(stddev=.1)
if 'W_hx' in name:
return tf.get_variable(name=name,
shape=[self._step_size, self._state_size],
initializer=init)
elif 'W_hh' in name:
return tf.get_variable(name=name,
shape=[self._state_size, self._state_size],
initializer=init)
else:
raise RuntimeError('must specify hx or hh for rnn cell weights.'
'such an elegant hack :).')
def get_bias_variable(self, name):
"""create new or reuse bias variable by variable name."""
init = tf.constant_initializer(0.0)
return tf.get_variable(name=name,
shape=[self._state_size],
initializer=init)
def static_rnn(self):
"""whereas dynamic rnn is known to be preferred due to its
flexibility, this static approach aims to lay foundation to dynamic rnn
implementation."""
rnn_inputs = tf.unstack(self.x, axis=1)
output = state = tf.zeros_like(self._zero_state, name='initial_state')
output_receiver = list()
if self.__class__.__name__ == 'RNN':
for rnn_input in rnn_inputs:
output, state = self(rnn_input=rnn_input, state=state)
output_receiver.append(output)
elif self.__class__.__name__ == 'LSTM':
for rnn_input in rnn_inputs:
output, state = self(cell_input=rnn_input,
cell_output=output,
cell_state=state)
output_receiver.append(output)
else:
raise Exception('_BaseRNN can not be called directly.')
return output_receiver, state
class RNN(_BaseRNN):
def __init__(self, step_size, state_size, num_classes):
super(RNN, self).__init__(step_size, state_size, num_classes)
def __call__(self, rnn_input, state):
"""RNN cell implementation to Colah's blog (2015)."""
with tf.variable_scope('default_rnn_cell', reuse=None):
W_hx = self.get_weight_variable(name='W_hx')
W_hh = self.get_weight_variable(name='W_hh')
b_h = self.get_bias_variable(name='b_h')
output = tf.tanh(tf.matmul(rnn_input, W_hx) +
tf.matmul(state, W_hh) +
b_h)
state = output
return output, state
class LSTM(_BaseRNN):
def __init__(self, step_size, state_size, num_classes):
super(LSTM, self).__init__(step_size, state_size, num_classes)
def __call__(self, cell_input, cell_output, cell_state):
"""LSTM cell implemented to Hochreiter & Schmidhuber (1997)"""
with tf.variable_scope('default_lstm_cell', reuse=None):
forget_W_hx = self.get_weight_variable(name='forget_W_hx')
forget_W_hh = self.get_weight_variable(name='forget_W_hh')
forget_b_h = self.get_bias_variable(name='forget_b_h')
input_W_hx = self.get_weight_variable(name='input_W_hx')
input_W_hh = self.get_weight_variable(name='input_W_hh')
input_b_h = self.get_bias_variable(name='input_b_h')
cell_state_W_hx = self.get_weight_variable(name='cell_state_W_hx')
cell_state_W_hh = self.get_weight_variable(name='cell_state_W_hh')
cell_state_b_h = self.get_bias_variable(name='cell_state_b_h')
output_W_hx = self.get_weight_variable(name='output_W_hx')
output_W_hh = self.get_weight_variable(name='output_W_hh')
output_b_h = self.get_bias_variable(name='output_b_h')
forget_gate = tf.sigmoid(
tf.matmul(cell_input, forget_W_hx) +
tf.matmul(cell_output, forget_W_hh) +
forget_b_h)
input_gate = tf.sigmoid(
tf.matmul(cell_input, input_W_hx) +
tf.matmul(cell_output, input_W_hh) +
input_b_h)
cell_state_delta = tf.tanh(
tf.matmul(cell_input, cell_state_W_hx) +
tf.matmul(cell_output, cell_state_W_hh) +
cell_state_b_h)
# cell memory forgets old information and learns new information
cell_state_t = forget_gate * cell_state + input_gate * cell_state_delta
output_gate = tf.sigmoid(
tf.matmul(cell_input, output_W_hx) +
tf.matmul(cell_output, output_W_hh) +
output_b_h)
output = output_gate * tf.tanh(cell_state_t)
return output, cell_state_t
| {
"content_hash": "da921db790cba3d071d197a23d320e03",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 40.755102040816325,
"alnum_prop": 0.5384743782340177,
"repo_name": "minggli/chatbot",
"id": "909d27ad7997cf6618b3ff66c53120d9a8f35c83",
"size": "6015",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "chatbot/models/rnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57979"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
} |
import math
from copy import deepcopy
from fontTools.misc import transform
from fontParts.base.errors import FontPartsError
from fontParts.base import normalizers
# -------
# Helpers
# -------
class dynamicProperty(object):
"""
This implements functionality that is very similar
to Python's built in property function, but makes
it much easier for subclassing. Here is an example
of why this is needed:
class BaseObject(object):
_foo = 1
def _get_foo(self):
return self._foo
def _set_foo(self, value):
self._foo = value
foo = property(_get_foo, _set_foo)
class MyObject(BaseObject):
def _set_foo(self, value):
self._foo = value * 100
>>> m = MyObject()
>>> m.foo
1
>>> m.foo = 2
>>> m.foo
2
The expected value is 200. The _set_foo method
needs to be reregistered. Doing that also requires
reregistering the _get_foo method. It's possible
to do this, but it's messy and will make subclassing
less than ideal.
Using dynamicProperty solves this.
class BaseObject(object):
_foo = 1
foo = dynamicProperty("foo")
def _get_foo(self):
return self._foo
def _set_foo(self, value):
self._foo = value
class MyObject(BaseObject):
def _set_foo(self, value):
self._foo = value * 100
>>> m = MyObject()
>>> m.foo
1
>>> m.foo = 2
>>> m.foo
200
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
self.getterName = "_get_" + name
self.setterName = "_set_" + name
def __get__(self, obj, cls):
getter = getattr(obj, self.getterName, None)
if getter is not None:
return getter()
else:
# obj is None when the property is accessed
# via the class instead of an instance
if obj is None:
return self
raise FontPartsError("no getter for %r" % self.name)
def __set__(self, obj, value):
setter = getattr(obj, self.setterName, None)
if setter is not None:
setter(value)
else:
raise FontPartsError("no setter for %r" % self.name)
def interpolate(a, b, v):
return a + (b - a) * v
# ------------
# Base Objects
# ------------
class BaseObject(object):
# --------------
# Initialization
# --------------
def __init__(self, *args, **kwargs):
self._init(*args, **kwargs)
def _init(self, *args, **kwargs):
"""
Subclasses may override this method.
"""
pass
# ----
# repr
# ----
def __repr__(self):
contents = self._reprContents()
if contents:
contents = " ".join(contents)
contents = " " + contents
else:
contents = ""
s = "<{className}{contents} at {address}>".format(
className=self.__class__.__name__,
contents=contents,
address=id(self)
)
return s
@classmethod
def _reprContents(cls):
"""
Subclasses may override this method to
provide a list of strings for inclusion
in ``__repr__``. If so, they should call
``super`` and append their additions
to the returned ``list``.
"""
return []
# --------
# equality
# --------
def __eq__(self, other):
"""
Subclasses may override this method.
"""
if isinstance(other, self.__class__):
return self.naked() is other.naked()
return NotImplemented
def __ne__(self, other):
"""
Subclasses must not override this method.
"""
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
# ----
# Hash
# ----
def __hash__(self):
"""
Allow subclasses to be used in hashable collections.
Subclasses may override this method.
"""
return id(self.naked())
# ----
# Copy
# ----
copyClass = None
copyAttributes = ()
def copy(self):
"""
Copy this object into a new object of the same type.
The returned object will not have a parent object.
"""
copyClass = self.copyClass
if copyClass is None:
copyClass = self.__class__
copied = copyClass()
copied.copyData(self)
return copied
def copyData(self, source):
"""
Subclasses may override this method.
If so, they should call the super.
"""
for attr in self.copyAttributes:
selfValue = getattr(self, attr)
sourceValue = getattr(source, attr)
if isinstance(selfValue, BaseObject):
selfValue.copyData(sourceValue)
else:
setattr(self, attr, sourceValue)
# ----------
# Exceptions
# ----------
def raiseNotImplementedError(self):
"""
This exception needs to be raised frequently by
the base classes. So, it's here for convenience.
"""
raise NotImplementedError(
"The {className} subclass does not implement this method."
.format(className=self.__class__.__name__)
)
# ---------------------
# Environment Fallbacks
# ---------------------
def changed(self, *args, **kwargs):
"""
Tell the environment that something has changed in
the object. The behavior of this method will vary
from environment to environment.
>>> obj.changed()
"""
def naked(self):
"""
Return the environment's native object
that has been wrapped by this object.
>>> loweLevelObj = obj.naked()
"""
self.raiseNotImplementedError()
class BaseDict(BaseObject):
keyNormalizer = None
valueNormalizer = None
def copyData(self, source):
super(BaseDict, self).copyData(source)
self.update(source)
def __len__(self):
value = self._len()
return value
def _len(self):
"""
Subclasses may override this method.
"""
return len(self.keys())
def keys(self):
keys = self._keys()
if self.keyNormalizer is not None:
keys = [self.keyNormalizer.__func__(key) for key in keys]
return keys
def _keys(self):
"""
Subclasses may override this method.
"""
return [k for k, v in self.items()]
def items(self):
items = self._items()
if self.keyNormalizer is not None and self.valueNormalizer is not None:
values = [
(self.keyNormalizer.__func__(key),
self.valueNormalizer.__func__(value))
for (key, value) in items
]
return values
def _items(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def values(self):
values = self._values()
if self.valueNormalizer is not None:
values = [self.valueNormalizer.__func__(value) for value in values]
return values
def _values(self):
"""
Subclasses may override this method.
"""
return [v for k, v in self.items()]
def __contains__(self, key):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
return self._contains(key)
def _contains(self, key):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
has_key = __contains__
def __setitem__(self, key, value):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
if self.valueNormalizer is not None:
value = self.valueNormalizer.__func__(value)
self._setItem(key, value)
def _setItem(self, key, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def __getitem__(self, key):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
value = self._getItem(key)
if self.valueNormalizer is not None:
value = self.valueNormalizer.__func__(value)
return value
def _getItem(self, key):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def get(self, key, default=None):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
if default is not None and self.valueNormalizer is not None:
default = self.valueNormalizer.__func__(default)
value = self._get(key, default=default)
if value is not default and self.valueNormalizer is not None:
value = self.valueNormalizer.__func__(value)
return value
def _get(self, key, default=None):
"""
Subclasses may override this method.
"""
if key in self:
return self[key]
return default
def __delitem__(self, key):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
self._delItem(key)
def _delItem(self, key):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def pop(self, key, default=None):
if self.keyNormalizer is not None:
key = self.keyNormalizer.__func__(key)
if default is not None and self.valueNormalizer is not None:
default = self.valueNormalizer.__func__(default)
value = self._pop(key, default=default)
if self.valueNormalizer is not None:
value = self.valueNormalizer.__func__(value)
return value
def _pop(self, key, default=None):
"""
Subclasses may override this method.
"""
value = default
if key in self:
value = self[key]
del self[key]
return value
def __iter__(self):
return self._iter()
def _iter(self):
"""
Subclasses may override this method.
"""
keys = self.keys()
while keys:
key = keys[0]
yield key
keys = keys[1:]
def update(self, other):
other = deepcopy(other)
if self.keyNormalizer is not None and self.valueNormalizer is not None:
d = {}
for key, value in other.items():
key = self.keyNormalizer.__func__(key)
value = self.valueNormalizer.__func__(value)
d[key] = value
value = d
self._update(other)
def _update(self, other):
"""
Subclasses may override this method.
"""
for key, value in other.items():
self[key] = value
def clear(self):
self._clear()
def _clear(self):
"""
Subclasses may override this method.
"""
for key in self.keys():
del self[key]
class TransformationMixin(object):
# ---------------
# Transformations
# ---------------
def transformBy(self, matrix, origin=None):
"""
Transform the object.
>>> obj.transformBy((0.5, 0, 0, 2.0, 10, 0))
>>> obj.transformBy((0.5, 0, 0, 2.0, 10, 0), origin=(500, 500))
**matrix** must be a :ref:`type-transformation`.
**origin** defines the point at with the transformation
should originate. It must be a :ref:`type-coordinate`
or ``None``. The default is ``(0, 0)``.
"""
matrix = normalizers.normalizeTransformationMatrix(matrix)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
if origin is not None:
t = transform.Transform()
oX, oY = origin
t = t.translate(oX, oY)
t = t.transform(matrix)
t = t.translate(-oX, -oY)
matrix = tuple(t)
self._transformBy(matrix)
def _transformBy(self, matrix, **kwargs):
"""
This is the environment implementation of
:meth:`BaseObject.transformBy`.
**matrix** will be a :ref:`type-transformation`.
that has been normalized with
:func:`normalizers.normalizeTransformationMatrix`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def moveBy(self, value):
"""
Move the object.
>>> obj.transformBy((10, 0))
**value** must be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to move the object by.
"""
value = normalizers.normalizeTransformationOffset(value)
self._moveBy(value)
def _moveBy(self, value, **kwargs):
"""
This is the environment implementation of
:meth:`BaseObject.moveBy`.
**value** will be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to move the object by. It will have been
normalized with :func:`normalizers.normalizeTransformationOffset`.
Subclasses may override this method.
"""
x, y = value
t = transform.Offset(x, y)
self.transformBy(tuple(t), **kwargs)
def scaleBy(self, value, origin=None):
"""
Scale the object.
>>> obj.transformBy(2.0)
>>> obj.transformBy((0.5, 2.0), origin=(500, 500))
**value** must be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to scale the object by. **origin** defines the
point at with the scale should originate. It must be
a :ref:`type-coordinate` or ``None``. The default is
``(0, 0)``.
"""
value = normalizers.normalizeTransformationScale(value)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
self._scaleBy(value, origin=origin)
def _scaleBy(self, value, origin=None, **kwargs):
"""
This is the environment implementation of
:meth:`BaseObject.scaleBy`.
**value** will be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to scale the object by. It will have been
normalized with :func:`normalizers.normalizeTransformationScale`.
**origin** will be a :ref:`type-coordinate` defining
the point at which the scale should orginate.
Subclasses may override this method.
"""
x, y = value
t = transform.Identity.scale(x=x, y=y)
self.transformBy(tuple(t), origin=origin, **kwargs)
def rotateBy(self, value, origin=None):
"""
Rotate the object.
>>> obj.transformBy(45)
>>> obj.transformBy(45, origin=(500, 500))
**value** must be a :ref:`type-int-float` values
defining the angle to rotate the object by. **origin**
defines the point at with the rotation should originate.
It must be a :ref:`type-coordinate` or ``None``.
The default is ``(0, 0)``.
"""
value = normalizers.normalizeRotationAngle(value)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
self._rotateBy(value, origin=origin)
def _rotateBy(self, value, origin=None, **kwargs):
"""
This is the environment implementation of
:meth:`BaseObject.rotateBy`.
**value** will be a :ref:`type-int-float` value
defining the value to rotate the object by.
It will have been normalized with
:func:`normalizers.normalizeRotationAngle`.
**origin** will be a :ref:`type-coordinate` defining
the point at which the rotation should orginate.
Subclasses may override this method.
"""
a = math.radians(value)
t = transform.Identity.rotate(a)
self.transformBy(tuple(t), origin=origin, **kwargs)
def skewBy(self, value, origin=None):
"""
Skew the object.
>>> obj.skewBy(11)
>>> obj.skewBy((25, 10), origin=(500, 500))
**value** must be rone of the following:
* single :ref:`type-int-float` indicating the
value to skew the x direction by.
* iterable cointaining type :ref:`type-int-float`
defining the values to skew the x and y directions by.
**origin** defines the point at with the skew should
originate. It must be a :ref:`type-coordinate` or
``None``. The default is ``(0, 0)``.
"""
value = normalizers.normalizeTransformationSkewAngle(value)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
self._skewBy(value, origin=origin)
def _skewBy(self, value, origin=None, **kwargs):
"""
This is the environment implementation of
:meth:`BaseObject.skewBy`.
**value** will be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to skew the object by. It will have been
normalized with :func:`normalizers.normalizeTransformationSkewAngle`.
**origin** will be a :ref:`type-coordinate` defining
the point at which the skew should orginate.
Subclasses may override this method.
"""
x, y = value
x = math.radians(x)
y = math.radians(y)
t = transform.Identity.skew(x=x, y=y)
self.transformBy(tuple(t), origin=origin, **kwargs)
class InterpolationMixin(object):
# -------------
# Compatibility
# -------------
compatibilityReporterClass = None
def isCompatible(self, other, cls):
"""
Evaluate interpolation compatibility with other.
"""
if not isinstance(other, cls):
raise TypeError(
"""Compatibility between an instance of %r and an \
instance of %r can not be checked."""
% (cls.__name__, other.__class__.__name__))
reporter = self.compatibilityReporterClass(self, other)
self._isCompatible(other, reporter)
return not reporter.fatal, reporter
def _isCompatible(self, other, reporter):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
class SelectionMixin(object):
# -------------
# Selected Flag
# -------------
selected = dynamicProperty(
"base_selected",
"""
The object's selection state.
>>> obj.selected
False
>>> obj.selected = True
"""
)
def _get_base_selected(self):
value = self._get_selected()
value = normalizers.normalizeBoolean(value)
return value
def _set_base_selected(self, value):
value = normalizers.normalizeBoolean(value)
self._set_selected(value)
def _get_selected(self):
"""
This is the environment implementation of
:attr:`BaseObject.selected`. This must return a
**boolean** representing the selection state
of the object. The value will be normalized
with :func:`normalizers.normalizeBoolean`.
Subclasses must override this method if they
implement object selection.
"""
self.raiseNotImplementedError()
def _set_selected(self, value):
"""
This is the environment implementation of
:attr:`BaseObject.selected`. **value** will
be a **boolean** representing the object's
selection state. The value will have been
normalized with :func:`normalizers.normalizeBoolean`.
Subclasses must override this method if they
implement object selection.
"""
self.raiseNotImplementedError()
# -----------
# Sub-Objects
# -----------
@classmethod
def _getSelectedSubObjects(cls, subObjects):
selected = [obj for obj in subObjects if obj.selected]
return selected
@classmethod
def _setSelectedSubObjects(cls, subObjects, selected):
for obj in subObjects:
obj.selected = obj in selected
class PointPositionMixin(object):
"""
This adds a ``position`` attribute as a dyanmicProperty,
for use as a mixin with objects that have ``x`` and ``y``
attributes.
"""
position = dynamicProperty("base_position", "The point position.")
def _get_base_position(self):
value = self._get_position()
value = normalizers.normalizeCoordinateTuple(value)
return value
def _set_base_position(self, value):
value = normalizers.normalizeCoordinateTuple(value)
self._set_position(value)
def _get_position(self):
"""
Subclasses may override this method.
"""
return (self.x, self.y)
def _set_position(self, value):
"""
Subclasses may override this method.
"""
pX, pY = self.position
x, y = value
dX = x - pX
dY = y - pY
self.moveBy((dX, dY))
class IdentifierMixin(object):
# identifier
identifier = dynamicProperty(
"base_identifier",
"""
The unique identifier for the object.
This value will be an :ref:`type-identifier` or a ``None``.
This attribute is read only. ::
>>> object.identifier
'ILHGJlygfds'
To request an identifier if it does not exist use
`object.getIdentifier()`
"""
)
def _get_base_identifier(self):
value = self._get_identifier()
if value is not None:
value = normalizers.normalizeIdentifier(value)
return value
def _get_identifier(self):
"""
This is the environment implementation of
:attr:`BaseObject.identifier`. This must
return an :ref:`type-identifier`. If
the native object does not have an identifier
assigned one should be assigned and returned.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def getIdentifier(self):
"""
Create a new, unique identifier for and assign it to the object.
If the object already has an identifier, the existing one should
be returned.
"""
return self._getIdentifier()
def _getIdentifier(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _setIdentifier(self, value):
"""
This method is used internally to force a specific
identifier onto an object in certain situations.
Subclasses that allow setting an identifier to a
specific value may override this method.
"""
pass
def reference(obj):
# import weakref
# return weakref.ref(obj)
def wrapper():
return obj
return wrapper
| {
"content_hash": "76aefe6dbbc048856b062dbfe77186b8",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 79,
"avg_line_length": 28.174432497013143,
"alnum_prop": 0.5607666864557713,
"repo_name": "robofab-developers/fontParts",
"id": "4f814cb78832fa4146b90fc39a3c05d51649819c",
"size": "23582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/fontParts/base/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "913380"
},
{
"name": "Shell",
"bytes": "1837"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Genre, Instrument, Song
admin.site.register(Genre)
admin.site.register(Instrument)
admin.site.register(Song)
# admin.site.register(Followers)
| {
"content_hash": "42283eb34ee0165022d30e7e50d16b2f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 24.625,
"alnum_prop": 0.8071065989847716,
"repo_name": "ajay2611/mmb",
"id": "56cc369f63491d423777cb8420ec89165c2fb46d",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmb_repo/mmb_data/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "192692"
},
{
"name": "HTML",
"bytes": "86164"
},
{
"name": "JavaScript",
"bytes": "228636"
},
{
"name": "PHP",
"bytes": "92328"
},
{
"name": "Python",
"bytes": "75855"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
'''
Some default compression transforms (gzip, compress, etc).
@author: Michael Eddington
@version: $Id: compress.py 2020 2010-04-14 23:13:14Z meddingt $
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id: compress.py 2020 2010-04-14 23:13:14Z meddingt $
import zlib
try:
import bz2
except:
pass
from Peach.transformer import Transformer
class GzipCompress(Transformer):
'''
Gzip compression transform. Also allows for compression level
selection (default is 6).
'''
def __init__(self, level = 6):
'''
@type level: number
@param level: level is an integer from 1 to 9 controlling the level
of compression; 1 is fastest and produces the least compression, 9
is slowest and produces the most. The default value is 6.
'''
Transformer.__init__(self)
self._level = level
self._wbits = 15
def realEncode(self, data):
return zlib.compress(data, self._level)
def realDecode(self, data):
return zlib.decompress(data, self._wbits)
class GzipDecompress(Transformer):
'''
Gzip decompression transform.
'''
def __init__(self, wbits = 15):
'''
@type wbits: number
@param wbits: The absolute value of wbits is the base two logarithm
of the size of the history buffer (the ``window size'') used when
compressing data. Its absolute value should be between 8 and 15 for
the most recent versions of the zlib library, larger values resulting
in better compression at the expense of greater memory usage. The
default value is 15. When wbits is negative, the standard gzip
header is suppressed; this is an undocumented feature of the zlib
library, used for compatibility with unzip's compression file format.
'''
Transformer.__init__(self)
self._wbits = wbits
self._level = 6
def realEncode(self, data):
return zlib.decompress(data, self._wbits)
def realDecode(self, data):
return zlib.compress(data, self._level)
class Bz2Compress(Transformer):
'''
bzip2 compression transform. Also allows for compression level
selection (default is 9).
'''
def __init__(self, level = 9):
'''
@type level: number
@param level: The compresslevel parameter, if given, must be a number
between 1 and 9; the default is 9.
'''
Transformer.__init__(self)
self._level = level
def realEncode(self, data):
return bz2.compress(data, self._level)
def realDecode(self, data):
return bz2.decompress(data)
class Bz2Decompress(Transformer):
'''
bzip2 decompression transform.
'''
def realEncode(self, data):
return bz2.decompress(data)
def realDecode(self, data):
return bz2.compress(data, 6)
# end
| {
"content_hash": "c086ceebfc5b19ca70e590cb1b7cf009",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 28.49230769230769,
"alnum_prop": 0.728401727861771,
"repo_name": "thecrackofdawn/Peach2.3",
"id": "0755b22fad200f16e75f78ea57303fdb1ce8b634",
"size": "3705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Peach/Transformers/compress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29972"
},
{
"name": "C++",
"bytes": "21544"
},
{
"name": "CSS",
"bytes": "18213"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Objective-C",
"bytes": "403"
},
{
"name": "Python",
"bytes": "25902756"
},
{
"name": "Shell",
"bytes": "898"
},
{
"name": "XSLT",
"bytes": "18658"
}
],
"symlink_target": ""
} |
"""linkedlist.py: Class representing linkedlist"""
from listnode import ListNode
__author__ = 'Rohit Sinha'
class LinkedList:
def __init__(self):
self.head = None
def is_empty(self):
return self.head is None
def add(self, item):
temp = ListNode(item)
if self.head is None:
self.head = temp
else:
cur = self.head
# traverse to the last node and append there
while cur.get_next() is not None:
cur = cur.get_next()
cur.set_next(temp)
def size(self):
cur = self.head
count = 0
while cur is not None:
count += 1
cur = cur.get_next()
return count
def search(self, key):
cur = self.head
found = False
while cur is not None and not found:
if cur.get_data() == key:
found = True
else:
cur = cur.get_next()
return found
def remove(self, item):
cur = self.head
prev = None
found = False
# if the element is present then delete it
while cur is not None and not found:
if cur.get_data() == item:
found = True
else:
prev = cur
cur = cur.get_next()
if prev is None:
self.head = cur.get_next()
elif cur is not None:
prev.set_next(cur.get_next())
if __name__ == '__main__':
list = LinkedList()
print(list.is_empty())
list.add(1)
list.add(2)
list.add(3)
list.add(4)
print(list.size())
print(list.search(2))
print(list.search(5))
list.remove(2)
print(list.search(2))
list.remove(5) | {
"content_hash": "352486f594708522379b1b0a5961702d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 56,
"avg_line_length": 23.157894736842106,
"alnum_prop": 0.5011363636363636,
"repo_name": "rohitsinha54/Learning-Python",
"id": "077ebc721775f77d1f7e1eaa5540660ded4890ca",
"size": "1783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/linkedlist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11455"
}
],
"symlink_target": ""
} |
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy import dot
from filterpy.common import pretty_str
class GHFilterOrder(object):
""" A g-h filter of aspecified order 0, 1, or 2.
Strictly speaking, the g-h filter is order 1, and the 2nd order
filter is called the g-h-k filter. I'm not aware of any filter name
that encompasses orders 0, 1, and 2 under one name, or I would use it.
Parameters
----------
x0 : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dt : scalar
timestep
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
g : float
filter g gain parameter.
h : float, optional
filter h gain parameter, order 1 and 2 only
k : float, optional
filter k gain parameter, order 2 only
Atrributes
-------
x : np.array
State of the filter.
x[0] is the value being tracked
x[1] is the derivative of x[0] (order 1 and 2 only)
x[2] is the 2nd derivative of x[0] (order 2 only)
This is always an np.array, even for order 0 where you can
initialize x0 with a scalar.
y : np.array
Residual - difference between the measurement and the prediction
dt : scalar
timestep
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
g : float
filter g gain parameter.
h : float
filter h gain parameter, order 1 and 2 only
k : float
filter k gain parameter, order 2 only
z : 1D np.array or scalar
measurement passed into update()
"""
def __init__(self, x0, dt, order, g, h=None, k=None):
""" Creates a g-h filter of order 0, 1, or 2.
"""
if order < 0 or order > 2:
raise ValueError('order must be between 0 and 2')
if np.isscalar(x0):
self.x = np.zeros(order+1)
self.x[0] = x0
else:
self.x = np.copy(x0.astype(float))
self.dt = dt
self.order = order
self.g = g
self.h = h
self.k = k
self.y = np.zeros(len(self.x)) # residual
self.z = np.zeros(len(self.x)) # last measurement
def update(self, z, g=None, h=None, k=None):
"""
Update the filter with measurement z. z must be the same type
or treatable as the same type as self.x[0].
"""
if self.order == 0:
if g is None:
g = self.g
self.y = z - self.x[0]
self.x += dot(g, self.y)
elif self.order == 1:
if g is None:
g = self.g
if h is None:
h = self.h
x = self.x[0]
dx = self.x[1]
dxdt = dot(dx, self.dt)
self.y = z - (x + dxdt)
self.x[0] = x + dxdt + g*self.y
self.x[1] = dx + h*self.y / self.dt
self.z = z
else: # order == 2
if g is None:
g = self.g
if h is None:
h = self.h
if k is None:
k = self.k
x = self.x[0]
dx = self.x[1]
ddx = self.x[2]
dxdt = dot(dx, self.dt)
T2 = self.dt**2.
self.y = z -(x + dxdt +0.5*ddx*T2)
self.x[0] = x + dxdt + 0.5*ddx*T2 + g*self.y
self.x[1] = dx + ddx*self.dt + h*self.y / self.dt
self.x[2] = ddx + 2*k*self.y / (self.dt**2)
def __repr__(self):
return '\n'.join([
'GHFilterOrder object',
pretty_str('dt', self.dt),
pretty_str('order', self.order),
pretty_str('x', self.x),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('k', self.k),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
class GHFilter(object):
"""
Implements the g-h filter. The topic is too large to cover in
this comment. See my book "Kalman and Bayesian Filters in Python" [1]
or Eli Brookner's "Tracking and Kalman Filters Made Easy" [2].
A few basic examples are below, and the tests in ./gh_tests.py may
give you more ideas on use.
Parameters
----------
x : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dx : 1D np.array or scalar
Initial value for the derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
Attributes
----------
x : 1D np.array or scalar
filter state
dx : 1D np.array or scalar
derivative of the filter state.
x_prediction : 1D np.array or scalar
predicted filter state
dx_prediction : 1D np.array or scalar
predicted derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
y : np.array, or scalar
residual (difference between measurement and prior)
z : np.array, or scalar
measurement passed into update()
Examples
--------
Create a basic filter for a scalar value with g=.8, h=.2.
Initialize to 0, with a derivative(velocity) of 0.
>>> from filterpy.gh import GHFilter
>>> f = GHFilter (x=0., dx=0., dt=1., g=.8, h=.2)
Incorporate the measurement of 1
>>> f.update(z=1)
(0.8, 0.2)
Incorporate a measurement of 2 with g=1 and h=0.01
>>> f.update(z=2, g=1, h=0.01)
(2.0, 0.21000000000000002)
Create a filter with two independent variables.
>>> from numpy import array
>>> f = GHFilter (x=array([0,1]), dx=array([0,0]), dt=1, g=.8, h=.02)
and update with the measurements (2,4)
>>> f.update(array([2,4])
(array([ 1.6, 3.4]), array([ 0.04, 0.06]))
References
----------
[1] Labbe, "Kalman and Bayesian Filters in Python"
http://rlabbe.github.io/Kalman-and-Bayesian-Filters-in-Python
[2] Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
def __init__(self, x, dx, dt, g, h):
self.x = x
self.dx = dx
self.dt = dt
self.g = g
self.h = h
self.dx_prediction = self.dx
self.x_prediction = self.x
if np.ndim(x) == 0:
self.y = 0. # residual
self.z = 0.
else:
self.y = np.zeros(len(x))
self.z = np.zeros(len(x))
def update(self, z, g=None, h=None):
"""
performs the g-h filter predict and update step on the
measurement z. Modifies the member variables listed below,
and returns the state of x and dx as a tuple as a convienence.
**Modified Members**
x
filtered state variable
dx
derivative (velocity) of x
residual
difference between the measurement and the prediction for x
x_prediction
predicted value of x before incorporating the measurement z.
dx_prediction
predicted value of the derivative of x before incorporating the
measurement z.
Parameters
----------
z : any
the measurement
g : scalar (optional)
Override the fixed self.g value for this update
h : scalar (optional)
Override the fixed self.h value for this update
Returns
-------
x filter output for x
dx filter output for dx (derivative of x
"""
if g is None:
g = self.g
if h is None:
h = self.h
#prediction step
self.dx_prediction = self.dx
self.x_prediction = self.x + (self.dx*self.dt)
# update step
self.y = z - self.x_prediction
self.dx = self.dx_prediction + h * self.y / self.dt
self.x = self.x_prediction + g * self.y
return (self.x, self.dx)
def batch_filter(self, data, save_predictions=False, saver=None):
"""
Given a sequenced list of data, performs g-h filter
with a fixed g and h. See update() if you need to vary g and/or h.
Uses self.x and self.dx to initialize the filter, but DOES NOT
alter self.x and self.dx during execution, allowing you to use this
class multiple times without reseting self.x and self.dx. I'm not sure
how often you would need to do that, but the capability is there.
More exactly, none of the class member variables are modified
by this function, in distinct contrast to update(), which changes
most of them.
Parameters
----------
data : list like
contains the data to be filtered.
save_predictions : boolean
the predictions will be saved and returned if this is true
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
results : np.array shape (n+1, 2), where n=len(data)
contains the results of the filter, where
results[i,0] is x , and
results[i,1] is dx (derivative of x)
First entry is the initial values of x and dx as set by __init__.
predictions : np.array shape(n), optional
the predictions for each step in the filter. Only retured if
save_predictions == True
"""
x = self.x
dx = self.dx
n = len(data)
results = np.zeros((n+1, 2))
results[0, 0] = x
results[0, 1] = dx
if save_predictions:
predictions = np.zeros(n)
# optimization to avoid n computations of h / dt
h_dt = self.h / self.dt
for i, z in enumerate(data):
#prediction step
x_est = x + (dx * self.dt)
# update step
residual = z - x_est
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
x = x_est + self.g * residual
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:
predictions[i] = x_est
if saver is not None:
saver.save()
if save_predictions:
return results, predictions
return results
def VRF_prediction(self):
r"""
Returns the Variance Reduction Factor of the prediction
step of the filter. The VRF is the
normalized variance for the filter, as given in the equation below.
.. math::
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
References
----------
Asquith, "Weight Selection in First Order Linear Filters"
Report No RG-TR-69-12, U.S. Army Missle Command. Redstone Arsenal, Al.
November 24, 1970.
"""
g = self.g
h = self.h
return (2*g**2 + 2*h + g*h) / (g*(4 - 2*g - h))
def VRF(self):
r"""
Returns the Variance Reduction Factor (VRF) of the state variable
of the filter (x) and its derivatives (dx, ddx). The VRF is the
normalized variance for the filter, as given in the equations below.
.. math::
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
Returns
-------
vrf_x VRF of x state variable
vrf_dx VRF of the dx state variable (derivative of x)
"""
g = self.g
h = self.h
den = g*(4 - 2*g - h)
vx = (2*g**2 + 2*h - 3*g*h) / den
vdx = 2*h**2 / (self.dt**2 * den)
return (vx, vdx)
def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
class GHKFilter(object):
"""
Implements the g-h-k filter.
Parameters
----------
x : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dx : 1D np.array or scalar
Initial value for the derivative of the filter state.
ddx : 1D np.array or scalar
Initial value for the second derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
k : float
filter k gain parameter.
Attributes
----------
x : 1D np.array or scalar
filter state
dx : 1D np.array or scalar
derivative of the filter state.
ddx : 1D np.array or scalar
second derivative of the filter state.
x_prediction : 1D np.array or scalar
predicted filter state
dx_prediction : 1D np.array or scalar
predicted derivative of the filter state.
ddx_prediction : 1D np.array or scalar
second predicted derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
k : float
filter k gain parameter.
y : np.array, or scalar
residual (difference between measurement and prior)
z : np.array, or scalar
measurement passed into update()
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
def __init__(self, x, dx, ddx, dt, g, h, k):
self.x = x
self.dx = dx
self.ddx = ddx
self.x_prediction = self.x
self.dx_prediction = self.dx
self.ddx_prediction = self.ddx
self.dt = dt
self.g = g
self.h = h
self.k = k
if np.ndim(x) == 0:
self.y = 0. # residual
self.z = 0.
else:
self.y = np.zeros(len(x))
self.z = np.zeros(len(x))
def update(self, z, g=None, h=None, k=None):
"""
Performs the g-h filter predict and update step on the
measurement z.
On return, self.x, self.dx, self.y, and self.x_prediction
will have been updated with the results of the computation. For
convienence, self.x and self.dx are returned in a tuple.
Parameters
----------
z : scalar
the measurement
g : scalar (optional)
Override the fixed self.g value for this update
h : scalar (optional)
Override the fixed self.h value for this update
k : scalar (optional)
Override the fixed self.k value for this update
Returns
-------
x filter output for x
dx filter output for dx (derivative of x
"""
if g is None:
g = self.g
if h is None:
h = self.h
if k is None:
k = self.k
dt = self.dt
dt_sqr = dt**2
#prediction step
self.ddx_prediction = self.ddx
self.dx_prediction = self.dx + self.ddx*dt
self.x_prediction = self.x + self.dx*dt + .5*self.ddx*(dt_sqr)
# update step
self.y = z - self.x_prediction
self.ddx = self.ddx_prediction + 2*k*self.y / dt_sqr
self.dx = self.dx_prediction + h * self.y / dt
self.x = self.x_prediction + g * self.y
return (self.x, self.dx)
def batch_filter(self, data, save_predictions=False):
"""
Performs g-h filter with a fixed g and h.
Uses self.x and self.dx to initialize the filter, but DOES NOT
alter self.x and self.dx during execution, allowing you to use this
class multiple times without reseting self.x and self.dx. I'm not sure
how often you would need to do that, but the capability is there.
More exactly, none of the class member variables are modified
by this function.
Parameters
----------
data : list_like
contains the data to be filtered.
save_predictions : boolean
The predictions will be saved and returned if this is true
Returns
-------
results : np.array shape (n+1, 2), where n=len(data)
contains the results of the filter, where
results[i,0] is x , and
results[i,1] is dx (derivative of x)
First entry is the initial values of x and dx as set by __init__.
predictions : np.array shape(n), or None
the predictions for each step in the filter. Only returned if
save_predictions == True
"""
x = self.x
dx = self.dx
n = len(data)
results = np.zeros((n+1, 2))
results[0, 0] = x
results[0, 1] = dx
if save_predictions:
predictions = np.zeros(n)
# optimization to avoid n computations of h / dt
h_dt = self.h / self.dt
for i, z in enumerate(data):
#prediction step
x_est = x + (dx*self.dt)
# update step
residual = z - x_est
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
x = x_est + self.g * residual
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:
predictions[i] = x_est
if save_predictions:
return results, predictions
return results
def VRF_prediction(self):
r"""
Returns the Variance Reduction Factor for x of the prediction
step of the filter.
This implements the equation
.. math::
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
References
----------
Asquith and Woods, "Total Error Minimization in First
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
"""
g = self.g
h = self.h
k = self.k
gh2 = 2*g + h
return ((g*k*(gh2-4)+ h*(g*gh2+2*h)) /
(2*k - (g*(h+k)*(gh2-4))))
def bias_error(self, dddx):
"""
Returns the bias error given the specified constant jerk(dddx)
Parameters
----------
dddx : type(self.x)
3rd derivative (jerk) of the state variable x.
References
----------
Asquith and Woods, "Total Error Minimization in First
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
"""
return -self.dt**3 * dddx / (2*self.k)
def VRF(self):
r"""
Returns the Variance Reduction Factor (VRF) of the state variable
of the filter (x) and its derivatives (dx, ddx). The VRF is the
normalized variance for the filter, as given in the equations below.
.. math::
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
Returns
-------
vrf_x : type(x)
VRF of x state variable
vrf_dx : type(x)
VRF of the dx state variable (derivative of x)
vrf_ddx : type(x)
VRF of the ddx state variable (second derivative of x)
"""
g = self.g
h = self.h
k = self.k
# common subexpressions in the equations pulled out for efficiency,
# they don't 'mean' anything.
hg4 = 4- 2*g - h
ghk = g*h + g*k - 2*k
vx = (2*h*(2*(g**2) + 2*h - 3*g*h) - 2*g*k*hg4) / (2*k - g*(h+k) * hg4)
vdx = (2*(h**3) - 4*(h**2)*k + 4*(k**2)*(2-g)) / (2*hg4*ghk)
vddx = 8*h*(k**2) / ((self.dt**4)*hg4*ghk)
return (vx, vdx, vddx)
def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('k', self.k),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('ddx', self.ddx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('ddx_prediction', self.dx_prediction),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
def optimal_noise_smoothing(g):
""" provides g,h,k parameters for optimal smoothing of noise for a given
value of g. This is due to Polge and Bhagavan[1].
Parameters
----------
g : float
value for g for which we will optimize for
Returns
-------
(g,h,k) : (float, float, float)
values for g,h,k that provide optimal smoothing of noise
Examples
--------
.. code-block:: Python
from filterpy.gh import GHKFilter, optimal_noise_smoothing
g,h,k = optimal_noise_smoothing(g)
f = GHKFilter(0,0,0,1,g,h,k)
f.update(1.)
References
----------
[1] Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
Report No. RE-CR-76-1. University of Alabama in Huntsville.
July, 1975
"""
h = ((2*g**3 - 4*g**2) + (4*g**6 -64*g**5 + 64*g**4)**.5) / (8*(1-g))
k = (h*(2-g) - g**2) / g
return (g, h, k)
def least_squares_parameters(n):
""" An order 1 least squared filter can be computed by a g-h filter
by varying g and h over time according to the formulas below, where
the first measurement is at n=0, the second is at n=1, and so on:
.. math::
h_n = \\frac{6}{(n+2)(n+1)}
g_n = \\frac{2(2n+1)}{(n+2)(n+1)}
Parameters
----------
n : int
the nth measurement, starting at 0 (i.e. first measurement has n==0)
Returns
-------
(g,h) : (float, float)
g and h parameters for this time step for the least-squares filter
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, least_squares_parameters
lsf = GHFilter (0, 0, 1, 0, 0)
z = 10
for i in range(10):
g,h = least_squares_parameters(i)
lsf.update(z, g, h)
"""
den = (n+2)*(n+1)
g = (2*(2*n + 1)) / den
h = 6 / den
return (g, h)
def critical_damping_parameters(theta, order=2):
r""" Computes values for g and h (and k for g-h-k filter) for a
critically damped filter.
The idea here is to create a filter that reduces the influence of
old data as new data comes in. This allows the filter to track a
moving target better. This goes by different names. It may be called the
discounted least-squares g-h filter, a fading-memory polynomal filter
of order 1, or a critically damped g-h filter.
In a normal least-squares filter we compute the error for each point as
.. math::
\epsilon_t = (z-\\hat{x})^2
For a crically damped filter we reduce the influence of each error by
.. math::
\\theta^{t-i}
where
.. math::
0 <= \\theta <= 1
In other words the last error is scaled by theta, the next to last by
theta squared, the next by theta cubed, and so on.
Parameters
----------
theta : float, 0 <= theta <= 1
scaling factor for previous terms
order : int, 2 (default) or 3
order of filter to create the parameters for. g and h will be
calculated for the order 2, and g, h, and k for order 3.
Returns
-------
g : scalar
optimal value for g in the g-h or g-h-k filter
h : scalar
optimal value for h in the g-h or g-h-k filter
k : scalar
optimal value for g in the g-h-k filter
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, critical_damping_parameters
g,h = critical_damping_parameters(0.3)
critical_filter = GHFilter(0, 0, 1, g, h)
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
Report No. RE-CR-76-1. University of Alabama in Huntsville.
July, 1975
"""
if theta < 0 or theta > 1:
raise ValueError('theta must be between 0 and 1')
if order == 2:
return (1. - theta**2, (1. - theta)**2)
if order == 3:
return (1. - theta**3, 1.5*(1.-theta**2)*(1.-theta), .5*(1 - theta)**3)
raise ValueError('bad order specified: {}'.format(order))
def benedict_bornder_constants(g, critical=False):
""" Computes the g,h constants for a Benedict-Bordner filter, which
minimizes transient errors for a g-h filter.
Returns the values g,h for a specified g. Strictly speaking, only h
is computed, g is returned unchanged.
The default formula for the Benedict-Bordner allows ringing. We can
"nearly" critically damp it; ringing will be reduced, but not entirely
eliminated at the cost of reduced performance.
Parameters
----------
g : float
scaling factor g for the filter
critical : boolean, default False
Attempts to critically damp the filter.
Returns
-------
g : float
scaling factor g (same as the g that was passed in)
h : float
scaling factor h that minimizes the transient errors
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, benedict_bornder_constants
g, h = benedict_bornder_constants(.855)
f = GHFilter(0, 0, 1, g, h)
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
g_sqr = g**2
if critical:
return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr)
return (g, g_sqr / (2.-g))
| {
"content_hash": "e8cc454ee0f19a831090b8e6b03ca687",
"timestamp": "",
"source": "github",
"line_count": 1074,
"max_line_length": 85,
"avg_line_length": 26.399441340782122,
"alnum_prop": 0.5441046802807463,
"repo_name": "rlabbe/filterpy",
"id": "260109ff711893fa26e01550a3f087da7d665ef0",
"size": "28611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filterpy/gh/gh_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "472"
},
{
"name": "Python",
"bytes": "508002"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
} |
import re
# These are the regular expressions used to check if an option should be
# used to find a parent entity. The preferred way is using the "parent"
# prefix, as it avoid potential conflicts with other options, but we
# need to support the "identifier" suffix without the "parent" prefix
# for backwards compatibility.
PARENT_ID_OPTION_EXPRESSIONS = [
re.compile(r"^--parent-(?P<type>.+)-(identifier|name)$"),
re.compile(r"^--(?P<type>.+)-identifier$"),
]
class OptionHelper(object):
@staticmethod
def is_parent_id_option(option):
"""
Checks if the given option name is a reference to a parent
entity.
"""
for parent_id_option_expression in PARENT_ID_OPTION_EXPRESSIONS:
if parent_id_option_expression.match(option):
return True
return False
@staticmethod
def get_parent_id_type(option):
"""
Extracts the name of the type from an option that is a reference to
a parent entity. For example, if the option is "--parent-host-name"
this method will return "host".
"""
for parent_id_option_expression in PARENT_ID_OPTION_EXPRESSIONS:
match = parent_id_option_expression.match(option)
if match is not None:
return match.group("type")
return None
| {
"content_hash": "26758469080afcb8178b1dd81b1e25c9",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 34.56410256410256,
"alnum_prop": 0.6431750741839762,
"repo_name": "oVirt/ovirt-engine-cli",
"id": "edf626e093502d7a0590730baa68f054a290c9b8",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ovirtcli/utils/optionhelper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "870"
},
{
"name": "Python",
"bytes": "365026"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import os
import unittest as unittest
import sys
from contextlib import contextmanager
from io import StringIO
from version.version import Version, VersionUtils, parse_version
from version.cli import main
class TestSemanticVersion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.version = Version("pyversion")
cls.v1 = parse_version("1!1.2.3")
cls.v2 = parse_version("1.2.3.post2")
cls.v3 = parse_version("1.2.3.a1")
cls.v4 = parse_version("1.2.a1")
cls.v5 = parse_version("2014b")
cls.v6 = parse_version("2.1.3.45.654")
cls.v7 = parse_version("1.2.3.g39485hdjk")
def test_version_obj(self):
v = Version("pyversion")
v2 = VersionUtils.increment(v)
self.assertNotEqual(v, v2)
self.assertEqual(v, self.version)
def test_unknown_package(self):
v = Version("jd85kd9f0")
v2 = VersionUtils.increment(v)
self.assertNotEqual(v, v2)
def test_release_version_override(self):
os.environ["RELEASE_VERSION"] = "2.4.5.6.7.8"
v = Version("pyversion")
v2 = VersionUtils.increment(v)
self.assertNotEqual(v, v2)
self.assertEqual(v, self.version)
self.assertEqual(v2, "2.4.5.6.7.8")
def test_increment_epoch(self):
os.environ["RELEASE_TYPE"] = "epoch"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
self.assertEqual(v1, "2!1.0.0")
self.assertEqual(v2, "1!1.0.0")
self.assertEqual(v3, "1!1.0.0")
self.assertEqual(v4, "1!1.0.0")
self.assertEqual(v5, "1!1.0.0")
self.assertEqual(v6, "1!1.0.0")
def test_increment_micro(self):
os.environ["RELEASE_TYPE"] = "micro"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
self.assertEqual(v1, "1!1.2.4")
self.assertEqual(v2, "1.2.4")
self.assertEqual(v3, "1.2.3")
self.assertEqual(v4, "1.2.1")
self.assertEqual(v5, "2014.0.1")
self.assertEqual(v6, "2.1.4")
def test_increment_minor(self):
os.environ["RELEASE_TYPE"] = "minor"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
self.assertEqual(v1, "1!1.3.0")
self.assertEqual(v2, "1.3.0")
self.assertEqual(v3, "1.3.0")
self.assertEqual(v4, "1.3")
self.assertEqual(v5, "2014.1")
self.assertEqual(v6, "2.2.0")
def test_increment_major(self):
os.environ["RELEASE_TYPE"] = "major"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
self.assertEqual(v1, "1!2.0.0")
self.assertEqual(v2, "2.0.0")
self.assertEqual(v3, "2.0.0")
self.assertEqual(v4, "2.0")
self.assertEqual(v5, "2015")
self.assertEqual(v6, "3.0.0")
def test_increment_pre_release(self):
os.environ["RELEASE_TYPE"] = "pre"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
self.assertEqual(v1, "1!1.2.4.pre1")
self.assertEqual(v2, "1.2.4.pre1")
self.assertEqual(v3, "1.2.3a2")
self.assertEqual(v4, "1.2a2")
self.assertEqual(v5, "2014b1")
self.assertEqual(v6, "2.1.4.pre1")
def test_increment_dev_release(self):
os.environ["RELEASE_TYPE"] = "dev"
v1 = VersionUtils.increment(self.v1)
v2 = VersionUtils.increment(self.v2)
v3 = VersionUtils.increment(self.v3)
v4 = VersionUtils.increment(self.v4)
v5 = VersionUtils.increment(self.v5)
v6 = VersionUtils.increment(self.v6)
@contextmanager
def capture_output(self):
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def test_cli_pypi_package(self):
test_package = "flask-ask"
with self.capture_output() as (out, err):
main([test_package])
output = out.getvalue().strip()
self.assertEqual(
output.split("\n")[0],
f"get_version_from_pkg_resources: The '{test_package}' distribution was not found and is required by the application",
)
with self.capture_output() as (out, err):
main([test_package, "increment"])
output_inc = out.getvalue().strip()
self.assertEqual(output_inc.split("\n")[1][:-1], output.split("\n")[1][:-1])
def test_unknown_cli(self):
test_package = "unknown_package"
with self.capture_output() as (out, err):
main([test_package])
output = out.getvalue().strip()
self.assertEqual(
output.split("\n")[0],
f"get_version_from_pkg_resources: The '{test_package}' distribution was not found and is required by the application",
)
| {
"content_hash": "d00431dd21ef57c97b4f923317af95f5",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 130,
"avg_line_length": 37.22012578616352,
"alnum_prop": 0.6013856032443393,
"repo_name": "rocktavious/pyversion",
"id": "c31a1e7677e5e7c18aac1b4221c630397e693096",
"size": "5918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "version_tests/test_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18368"
}
],
"symlink_target": ""
} |
import json
from unittest.mock import ANY, MagicMock, Mock, call, patch
import pytest
import tornado
from jupyterlab_git.git import Git
from jupyterlab_git.handlers import NAMESPACE, setup_handlers, GitHandler
from .testutils import assert_http_error, maybe_future
from tornado.httpclient import HTTPClientError
def test_mapping_added():
mock_web_app = Mock()
mock_web_app.settings = {"base_url": "nb_base_url"}
setup_handlers(mock_web_app)
mock_web_app.add_handlers.assert_called_once_with(".*", ANY)
@pytest.mark.parametrize(
"path, with_cm", (("url", False), ("url/to/path", False), ("url/to/path", True))
)
def test_GitHandler_url2localpath(path, with_cm, jp_web_app, jp_root_dir):
req = tornado.httputil.HTTPServerRequest()
req.connection = MagicMock()
handler = GitHandler(jp_web_app, req)
if with_cm:
assert (
str(jp_root_dir / path),
handler.contents_manager,
) == handler.url2localpath(path, with_cm)
else:
assert str(jp_root_dir / path) == handler.url2localpath(path, with_cm)
@patch("jupyterlab_git.handlers.GitAllHistoryHandler.git", spec=Git)
async def test_all_history_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
show_top_level = {"code": 0, "path": "foo"}
branch = "branch_foo"
log = "log_foo"
status = "status_foo"
local_path = jp_root_dir / "test_path"
mock_git.show_top_level.return_value = maybe_future(show_top_level)
mock_git.branch.return_value = maybe_future(branch)
mock_git.log.return_value = maybe_future(log)
mock_git.status.return_value = maybe_future(status)
# When
body = {"history_count": 25}
response = await jp_fetch(
NAMESPACE, local_path.name, "all_history", body=json.dumps(body), method="POST"
)
# Then
mock_git.show_top_level.assert_called_with(str(local_path))
mock_git.branch.assert_called_with(str(local_path))
mock_git.log.assert_called_with(str(local_path), 25)
mock_git.status.assert_called_with(str(local_path))
assert response.code == 200
payload = json.loads(response.body)
assert payload == {
"code": show_top_level["code"],
"data": {
"show_top_level": show_top_level,
"branch": branch,
"log": log,
"status": status,
},
}
@patch("jupyterlab_git.git.execute")
async def test_git_show_prefix(mock_execute, jp_fetch, jp_root_dir):
# Given
path = "path/to/repo"
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future((0, str(path), ""))
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_prefix",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] == str(path)
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-prefix"],
cwd=str(local_path / "subfolder"),
),
]
)
async def test_git_show_prefix_for_excluded_path(
jp_fetch, jp_server_config, jp_root_dir
):
local_path = jp_root_dir / "ignored-path"
try:
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subdir",
"show_prefix",
body="{}",
method="POST",
)
except HTTPClientError as e:
assert e.code == 404
@patch("jupyterlab_git.git.execute")
async def test_git_show_prefix_not_a_git_repo(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future(
(128, "", "fatal: not a git repository (or any")
)
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_prefix",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] is None
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-prefix"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.git.execute")
async def test_git_show_top_level(mock_execute, jp_fetch, jp_root_dir):
# Given
path = "path/to/repo"
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future((0, str(path), ""))
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_top_level",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] == str(path)
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-toplevel"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.git.execute")
async def test_git_show_top_level_not_a_git_repo(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future(
(128, "", "fatal: not a git repository (or any")
)
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_top_level",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] is None
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-toplevel"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.handlers.GitBranchHandler.git", spec=Git)
async def test_branch_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
branch = {
"code": 0,
"branches": [
{
"is_current_branch": True,
"is_remote_branch": False,
"name": "feature-foo",
"upstream": "origin/feature-foo",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "master",
"upstream": "origin/master",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "feature-bar",
"upstream": None,
"top_commit": "01234567899999abcdefghijklmnopqrstuvwxyz",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/feature-foo",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/master",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
],
}
mock_git.branch.return_value = maybe_future(branch)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "branch", body="{}", method="POST"
)
# Then
mock_git.branch.assert_called_with(str(local_path))
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0, "branches": branch["branches"]}
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
body = {"history_count": 20}
response = await jp_fetch(
NAMESPACE, local_path.name, "log", body=json.dumps(body), method="POST"
)
# Then
mock_git.log.assert_called_with(str(local_path), 20, None)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler_no_history_count(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "log", body="{}", method="POST"
)
# Then
mock_git.log.assert_called_with(str(local_path), 25, None)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("localbranch")
mock_git.get_upstream_branch.return_value = maybe_future(
{"code": 0, "remote_short_name": ".", "remote_branch": "localbranch"}
)
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "localbranch")
mock_git.push.assert_called_with(
".", "HEAD:localbranch", str(local_path), None, False, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_remotebranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "remote-branch-name",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
mock_git.push.assert_called_with(
"origin/something",
"HEAD:remote-branch-name",
str(local_path),
None,
False,
False,
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {
"code": 128,
"command": "",
"message": "fatal: no upstream configured for branch 'foo'",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
# When
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NAMESPACE, local_path.name, "push", body="{}", method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": list(),
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_multipleupstream(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remotes = ["origin", "upstream"]
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": remotes})
mock_git.push.return_value = maybe_future({"code": 0})
# When
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NAMESPACE, local_path.name, "push", body="{}", method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": remotes,
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_unique_remote(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remote = "origin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": [remote]})
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_called_with(
remote,
"foo",
str(local_path),
None,
set_upstream=True,
force=False,
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pushdefault(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remote = "rorigin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future(
{"options": {"remote.pushdefault": remote}}
)
mock_git.remote_show.return_value = maybe_future({"remotes": [remote, "upstream"]})
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_called_with(
remote,
"foo",
str(local_path),
None,
set_upstream=True,
force=False,
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_nobranch(
mock_git, jp_fetch, jp_root_dir
):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
remote = "online"
# When
body = {"remote": remote}
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body=json.dumps(body), method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(
remote, "HEAD:foo", str(local_path), None, True, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_branch(
mock_git, jp_fetch, jp_root_dir
):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
remote = "online"
remote_branch = "onfoo"
# When
body = {"remote": "/".join((remote, remote_branch))}
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body=json.dumps(body), method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(
remote, "HEAD:" + remote_branch, str(local_path), None, True, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_forward_slashes(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "foo/bar",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "upstream", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {"code": 0, "remote_short_name": ".", "remote_branch": "foo/bar"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "upstream", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.git.execute")
async def test_content(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"git": "previous"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(local_path),
),
],
)
async def test_content_working(jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
dummy_file = local_path / filename
dummy_file.parent.mkdir(parents=True)
dummy_file.write_text(content)
# When
body = {
"filename": filename,
"reference": {"special": "WORKING"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
async def test_content_notebook_working(jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/notebook.ipynb"
content = """{
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"mimetype": "text/x-python",
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"""
dummy_file = local_path / filename
dummy_file.parent.mkdir(parents=True)
dummy_file.write_text(content)
# When
body = {
"filename": filename,
"reference": {"special": "WORKING"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
@patch("jupyterlab_git.git.execute")
async def test_content_index(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"special": "INDEX"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("", filename)],
cwd=str(local_path),
),
],
)
@patch("jupyterlab_git.git.execute")
async def test_content_base(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
obj_ref = "915bb14609daab65e5304e59d89c626283ae49fc"
mock_execute.side_effect = [
maybe_future(
(
0,
"100644 {1} 1 {0}\x00100644 285bdbc14e499b85ec407512a3bb3992fa3d4082 2 {0}\x00100644 66ac842dfb0b5c20f757111d6b3edd56d80622b4 3 {0}\x00".format(
filename, obj_ref
),
"",
)
),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"special": "BASE"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", obj_ref],
cwd=str(local_path),
),
],
)
@patch("jupyterlab_git.git.execute")
async def test_content_unknown_special(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"special": "unknown"},
}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="unknown special ref")
@patch("jupyterlab_git.git.execute")
async def test_content_show_handled_error(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future(
(
-1,
"",
"fatal: Path '{}' does not exist (neither on disk nor in the index)".format(
filename
),
)
)
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == ""
@patch("jupyterlab_git.git.execute")
async def test_content_binary(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future((0, "-\t-\t{}".format(filename), ""))
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="file is not UTF-8")
@patch("jupyterlab_git.git.execute")
async def test_content_show_unhandled_error(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future((-1, "", "Dummy error"))
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="Dummy error")
@patch("jupyterlab_git.git.execute")
async def test_content_getcontent_deleted_file(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/absent_file"
content = "dummy content file\nwith multiple lines"
# When
body = {
"filename": filename,
"reference": {"special": "WORKING"},
}
# Then
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == ""
| {
"content_hash": "5984b3ae6c72cbb4bce0a2834da177c7",
"timestamp": "",
"source": "github",
"line_count": 954,
"max_line_length": 166,
"avg_line_length": 30.243186582809223,
"alnum_prop": 0.6031470955219742,
"repo_name": "jupyterlab/jupyterlab-git",
"id": "d9626795f48fbebe3a9e50c6a45bbccfe7f773d7",
"size": "28852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jupyterlab_git/tests/test_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15091"
},
{
"name": "JavaScript",
"bytes": "2954"
},
{
"name": "Jupyter Notebook",
"bytes": "12291"
},
{
"name": "Python",
"bytes": "239443"
},
{
"name": "TypeScript",
"bytes": "594889"
}
],
"symlink_target": ""
} |
"""Define resources for the BigQuery Routines API."""
from google.protobuf import json_format
import six
import google.cloud._helpers
from google.cloud.bigquery import _helpers
import google.cloud.bigquery_v2.types
class Routine(object):
"""Resource representing a user-defined routine.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines
Args:
routine_ref (Union[str, google.cloud.bigquery.routine.RoutineReference]):
A pointer to a routine. If ``routine_ref`` is a string, it must
included a project ID, dataset ID, and routine ID, each separated
by ``.``.
``**kwargs`` (Dict):
Initial property values.
"""
_PROPERTY_TO_API_FIELD = {
"arguments": "arguments",
"body": "definitionBody",
"created": "creationTime",
"etag": "etag",
"imported_libraries": "importedLibraries",
"language": "language",
"modified": "lastModifiedTime",
"reference": "routineReference",
"return_type": "returnType",
"type_": "routineType",
}
def __init__(self, routine_ref, **kwargs):
if isinstance(routine_ref, six.string_types):
routine_ref = RoutineReference.from_string(routine_ref)
self._properties = {"routineReference": routine_ref.to_api_repr()}
for property_name in kwargs:
setattr(self, property_name, kwargs[property_name])
@property
def reference(self):
"""google.cloud.bigquery.routine.RoutineReference: Reference
describing the ID of this routine.
"""
return RoutineReference.from_api_repr(
self._properties[self._PROPERTY_TO_API_FIELD["reference"]]
)
@property
def path(self):
"""str: URL path for the routine's APIs."""
return self.reference.path
@property
def project(self):
"""str: ID of the project containing the routine."""
return self.reference.project
@property
def dataset_id(self):
"""str: ID of dataset containing the routine."""
return self.reference.dataset_id
@property
def routine_id(self):
"""str: The routine ID."""
return self.reference.routine_id
@property
def etag(self):
"""str: ETag for the resource (:data:`None` until set from the
server).
Read-only.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["etag"])
@property
def type_(self):
"""str: The fine-grained type of the routine.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#RoutineType
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["type_"])
@type_.setter
def type_(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["type_"]] = value
@property
def created(self):
"""Optional[datetime.datetime]: Datetime at which the routine was
created (:data:`None` until set from the server).
Read-only.
"""
value = self._properties.get(self._PROPERTY_TO_API_FIELD["created"])
if value is not None and value != 0:
# value will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(value)
)
@property
def modified(self):
"""Optional[datetime.datetime]: Datetime at which the routine was
last modified (:data:`None` until set from the server).
Read-only.
"""
value = self._properties.get(self._PROPERTY_TO_API_FIELD["modified"])
if value is not None and value != 0:
# value will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(value)
)
@property
def language(self):
"""Optional[str]: The language of the routine.
Defaults to ``SQL``.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["language"])
@language.setter
def language(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["language"]] = value
@property
def arguments(self):
"""List[google.cloud.bigquery.routine.RoutineArgument]: Input/output
argument of a function or a stored procedure.
In-place modification is not supported. To set, replace the entire
property value with the modified list of
:class:`~google.cloud.bigquery.routine.RoutineArgument` objects.
"""
resources = self._properties.get(self._PROPERTY_TO_API_FIELD["arguments"], [])
return [RoutineArgument.from_api_repr(resource) for resource in resources]
@arguments.setter
def arguments(self, value):
if not value:
resource = []
else:
resource = [argument.to_api_repr() for argument in value]
self._properties[self._PROPERTY_TO_API_FIELD["arguments"]] = resource
@property
def return_type(self):
"""google.cloud.bigquery_v2.types.StandardSqlDataType: Return type of
the routine.
If absent, the return type is inferred from
:attr:`~google.cloud.bigquery.routine.Routine.body` at query time in
each query that references this routine. If present, then the
evaluated result will be cast to the specified returned type at query
time.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Routine.FIELDS.return_type
"""
resource = self._properties.get(self._PROPERTY_TO_API_FIELD["return_type"])
if not resource:
return resource
output = google.cloud.bigquery_v2.types.StandardSqlDataType()
output = json_format.ParseDict(resource, output, ignore_unknown_fields=True)
return output
@return_type.setter
def return_type(self, value):
if value:
resource = json_format.MessageToDict(value)
else:
resource = None
self._properties[self._PROPERTY_TO_API_FIELD["return_type"]] = resource
@property
def imported_libraries(self):
"""List[str]: The path of the imported JavaScript libraries.
The :attr:`~google.cloud.bigquery.routine.Routine.language` must
equal ``JAVACRIPT``.
Examples:
Set the ``imported_libraries`` to a list of Google Cloud Storage
URIs.
.. code-block:: python
routine = bigquery.Routine("proj.dataset.routine_id")
routine.imported_libraries = [
"gs://cloud-samples-data/bigquery/udfs/max-value.js",
]
"""
return self._properties.get(
self._PROPERTY_TO_API_FIELD["imported_libraries"], []
)
@imported_libraries.setter
def imported_libraries(self, value):
if not value:
resource = []
else:
resource = value
self._properties[self._PROPERTY_TO_API_FIELD["imported_libraries"]] = resource
@property
def body(self):
"""str: The body of the routine."""
return self._properties.get(self._PROPERTY_TO_API_FIELD["body"])
@body.setter
def body(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["body"]] = value
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a routine given its API representation.
Args:
resource (Dict[str, object]):
Resource, as returned from the API.
Returns:
google.cloud.bigquery.routine.Routine:
Python object, as parsed from ``resource``.
"""
ref = cls(RoutineReference.from_api_repr(resource["routineReference"]))
ref._properties = resource
return ref
def to_api_repr(self):
"""Construct the API resource representation of this routine.
Returns:
Dict[str, object]: Routine represented as an API resource.
"""
return self._properties
def _build_resource(self, filter_fields):
"""Generate a resource for ``update``."""
return _helpers._build_resource_from_properties(self, filter_fields)
def __repr__(self):
return "Routine('{}.{}.{}')".format(
self.project, self.dataset_id, self.routine_id
)
class RoutineArgument(object):
"""Input/output argument of a function or a stored procedure.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#argument
Args:
``**kwargs`` (Dict):
Initial property values.
"""
_PROPERTY_TO_API_FIELD = {
"data_type": "dataType",
"kind": "argumentKind",
# Even though it's not necessary for field mapping to map when the
# property name equals the resource name, we add these here so that we
# have an exhaustive list of all properties.
"name": "name",
"mode": "mode",
}
def __init__(self, **kwargs):
self._properties = {}
for property_name in kwargs:
setattr(self, property_name, kwargs[property_name])
@property
def name(self):
"""Optional[str]: Name of this argument.
Can be absent for function return argument.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["name"])
@name.setter
def name(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["name"]] = value
@property
def kind(self):
"""Optional[str]: The kind of argument, for example ``FIXED_TYPE`` or
``ANY_TYPE``.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Argument.FIELDS.argument_kind
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["kind"])
@kind.setter
def kind(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["kind"]] = value
@property
def mode(self):
"""Optional[str]: The input/output mode of the argument."""
return self._properties.get(self._PROPERTY_TO_API_FIELD["mode"])
@mode.setter
def mode(self, value):
self._properties[self._PROPERTY_TO_API_FIELD["mode"]] = value
@property
def data_type(self):
"""Optional[google.cloud.bigquery_v2.types.StandardSqlDataType]: Type
of a variable, e.g., a function argument.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#Argument.FIELDS.data_type
"""
resource = self._properties.get(self._PROPERTY_TO_API_FIELD["data_type"])
if not resource:
return resource
output = google.cloud.bigquery_v2.types.StandardSqlDataType()
output = json_format.ParseDict(resource, output, ignore_unknown_fields=True)
return output
@data_type.setter
def data_type(self, value):
if value:
resource = json_format.MessageToDict(value)
else:
resource = None
self._properties[self._PROPERTY_TO_API_FIELD["data_type"]] = resource
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a routine argument given its API representation.
Args:
resource (Dict[str, object]): Resource, as returned from the API.
Returns:
google.cloud.bigquery.routine.RoutineArgument:
Python object, as parsed from ``resource``.
"""
ref = cls()
ref._properties = resource
return ref
def to_api_repr(self):
"""Construct the API resource representation of this routine argument.
Returns:
Dict[str, object]: Routine argument represented as an API resource.
"""
return self._properties
def __eq__(self, other):
if not isinstance(other, RoutineArgument):
return NotImplemented
return self._properties == other._properties
def __ne__(self, other):
return not self == other
def __repr__(self):
all_properties = [
"{}={}".format(property_name, repr(getattr(self, property_name)))
for property_name in sorted(self._PROPERTY_TO_API_FIELD)
]
return "RoutineArgument({})".format(", ".join(all_properties))
class RoutineReference(object):
"""A pointer to a routine.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/routines#routinereference
"""
def __init__(self):
self._properties = {}
@property
def project(self):
"""str: ID of the project containing the routine."""
return self._properties["projectId"]
@property
def dataset_id(self):
"""str: ID of dataset containing the routine."""
return self._properties["datasetId"]
@property
def routine_id(self):
"""str: The routine ID."""
return self._properties["routineId"]
@property
def path(self):
"""str: URL path for the routine's APIs."""
return "/projects/%s/datasets/%s/routines/%s" % (
self.project,
self.dataset_id,
self.routine_id,
)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct a routine reference given its API representation.
Args:
resource (Dict[str, object]):
Routine reference representation returned from the API.
Returns:
google.cloud.bigquery.routine.RoutineReference:
Routine reference parsed from ``resource``.
"""
ref = cls()
ref._properties = resource
return ref
@classmethod
def from_string(cls, routine_id, default_project=None):
"""Factory: construct a routine reference from routine ID string.
Args:
routine_id (str):
A routine ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and routine ID, each separated by ``.``.
default_project (str):
Optional. The project ID to use when ``routine_id`` does not
include a project ID.
Returns:
google.cloud.bigquery.routine.RoutineReference:
Routine reference parsed from ``routine_id``.
Raises:
ValueError:
If ``routine_id`` is not a fully-qualified routine ID in
standard SQL format.
"""
proj, dset, routine = _helpers._parse_3_part_id(
routine_id, default_project=default_project, property_name="routine_id"
)
return cls.from_api_repr(
{"projectId": proj, "datasetId": dset, "routineId": routine}
)
def to_api_repr(self):
"""Construct the API resource representation of this routine reference.
Returns:
Dict[str, object]: Routine reference represented as an API resource.
"""
return self._properties
def __eq__(self, other):
"""Two RoutineReferences are equal if they point to the same routine."""
if not isinstance(other, RoutineReference):
return NotImplemented
return str(self) == str(other)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self == other
def __repr__(self):
return "RoutineReference.from_string('{}')".format(str(self))
def __str__(self):
"""String representation of the reference.
This is a fully-qualified ID, including the project ID and dataset ID.
"""
return "{}.{}.{}".format(self.project, self.dataset_id, self.routine_id)
| {
"content_hash": "329cdafa40dc3306e3335b68d055152b",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 103,
"avg_line_length": 32.289795918367346,
"alnum_prop": 0.6013778283402856,
"repo_name": "tseaver/google-cloud-python",
"id": "044368e751082307a8ee59cadc30daf5703c1a2b",
"size": "16424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigquery/google/cloud/bigquery/routine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "30519057"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis
import uuid
class HState2CProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule State2CProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2CProcDef, self).__init__(name='HState2CProcDef', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """State2CProcDef"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2CProcDef')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class State() node
self.add_node()
self.vs[3]["mm__"] = """State"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class State()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Transition() node
self.add_node()
self.vs[5]["mm__"] = """Transition"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class Transition()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# match class EntryPoint() node
self.add_node()
self.vs[7]["mm__"] = """EntryPoint"""
self.vs[7]["attr1"] = """1"""
# match_contains node for class EntryPoint()
self.add_node()
self.vs[8]["mm__"] = """match_contains"""
# match class StateMachine() node
self.add_node()
self.vs[9]["mm__"] = """StateMachine"""
self.vs[9]["attr1"] = """1"""
# match_contains node for class StateMachine()
self.add_node()
self.vs[10]["mm__"] = """match_contains"""
# apply class LocalDef() node
self.add_node()
self.vs[11]["mm__"] = """LocalDef"""
self.vs[11]["attr1"] = """1"""
# apply_contains node for class LocalDef()
self.add_node()
self.vs[12]["mm__"] = """apply_contains"""
# apply class ProcDef() node
self.add_node()
self.vs[13]["mm__"] = """ProcDef"""
self.vs[13]["attr1"] = """1"""
# apply_contains node for class ProcDef()
self.add_node()
self.vs[14]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[15]["mm__"] = """Name"""
self.vs[15]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[16]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[17]["mm__"] = """Name"""
self.vs[17]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[18]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[19]["mm__"] = """Name"""
self.vs[19]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[20]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[21]["mm__"] = """Name"""
self.vs[21]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[22]["mm__"] = """apply_contains"""
# apply class ConditionSet() node
self.add_node()
self.vs[23]["mm__"] = """ConditionSet"""
self.vs[23]["attr1"] = """1"""
# apply_contains node for class ConditionSet()
self.add_node()
self.vs[24]["mm__"] = """apply_contains"""
# apply class Inst() node
self.add_node()
self.vs[25]["mm__"] = """Inst"""
self.vs[25]["attr1"] = """1"""
# apply_contains node for class Inst()
self.add_node()
self.vs[26]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[27]["mm__"] = """Name"""
self.vs[27]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[28]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[29]["mm__"] = """Name"""
self.vs[29]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[30]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[31]["mm__"] = """Name"""
self.vs[31]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[32]["mm__"] = """apply_contains"""
# apply class Name() node
self.add_node()
self.vs[33]["mm__"] = """Name"""
self.vs[33]["attr1"] = """1"""
# apply_contains node for class Name()
self.add_node()
self.vs[34]["mm__"] = """apply_contains"""
# match association State--initialTransition-->Transition node
self.add_node()
self.vs[35]["attr1"] = """initialTransition"""
self.vs[35]["mm__"] = """directLink_S"""
# match association Transition--dest-->EntryPoint node
self.add_node()
self.vs[36]["attr1"] = """dest"""
self.vs[36]["mm__"] = """directLink_S"""
# match association EntryPoint--owningStateMachine-->StateMachine node
self.add_node()
self.vs[37]["attr1"] = """owningStateMachine"""
self.vs[37]["mm__"] = """directLink_S"""
# apply association LocalDef--def-->ProcDef node
self.add_node()
self.vs[38]["attr1"] = """def"""
self.vs[38]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[39]["attr1"] = """channelNames"""
self.vs[39]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[40]["attr1"] = """channelNames"""
self.vs[40]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[41]["attr1"] = """channelNames"""
self.vs[41]["mm__"] = """directLink_T"""
# apply association ProcDef--channelNames-->Name node
self.add_node()
self.vs[42]["attr1"] = """channelNames"""
self.vs[42]["mm__"] = """directLink_T"""
# apply association ProcDef--p-->ConditionSet node
self.add_node()
self.vs[43]["attr1"] = """p"""
self.vs[43]["mm__"] = """directLink_T"""
# apply association ConditionSet--alternative-->Inst node
self.add_node()
self.vs[44]["attr1"] = """alternative"""
self.vs[44]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[45]["attr1"] = """channelNames"""
self.vs[45]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[46]["attr1"] = """channelNames"""
self.vs[46]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[47]["attr1"] = """channelNames"""
self.vs[47]["mm__"] = """directLink_T"""
# apply association Inst--channelNames-->Name node
self.add_node()
self.vs[48]["attr1"] = """channelNames"""
self.vs[48]["mm__"] = """directLink_T"""
# backward association State---->LocalDef node
self.add_node()
self.vs[49]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class State()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Transition()
(0,8), # matchmodel -> match_contains
(8,7), # match_contains -> match_class EntryPoint()
(0,10), # matchmodel -> match_contains
(10,9), # match_contains -> match_class StateMachine()
(1,12), # applymodel -> apply_contains
(12,11), # apply_contains -> apply_class LocalDef()
(1,14), # applymodel -> apply_contains
(14,13), # apply_contains -> apply_class ProcDef()
(1,16), # applymodel -> apply_contains
(16,15), # apply_contains -> apply_class Name()
(1,18), # applymodel -> apply_contains
(18,17), # apply_contains -> apply_class Name()
(1,20), # applymodel -> apply_contains
(20,19), # apply_contains -> apply_class Name()
(1,22), # applymodel -> apply_contains
(22,21), # apply_contains -> apply_class Name()
(1,24), # applymodel -> apply_contains
(24,23), # apply_contains -> apply_class ConditionSet()
(1,26), # applymodel -> apply_contains
(26,25), # apply_contains -> apply_class Inst()
(1,28), # applymodel -> apply_contains
(28,27), # apply_contains -> apply_class Name()
(1,30), # applymodel -> apply_contains
(30,29), # apply_contains -> apply_class Name()
(1,32), # applymodel -> apply_contains
(32,31), # apply_contains -> apply_class Name()
(1,34), # applymodel -> apply_contains
(34,33), # apply_contains -> apply_class Name()
(3,35), # match_class State() -> association initialTransition
(35,5), # association initialTransition -> match_class Transition()
(5,36), # match_class Transition() -> association dest
(36,7), # association dest -> match_class EntryPoint()
(7,37), # match_class EntryPoint() -> association owningStateMachine
(37,9), # association owningStateMachine -> match_class StateMachine()
(11,38), # apply_class LocalDef() -> association def
(38,13), # association def -> apply_class ProcDef()
(13,39), # apply_class ProcDef() -> association channelNames
(39,15), # association channelNames -> apply_class Name()
(13,40), # apply_class ProcDef() -> association channelNames
(40,17), # association channelNames -> apply_class Name()
(13,41), # apply_class ProcDef() -> association channelNames
(41,19), # association channelNames -> apply_class Name()
(13,42), # apply_class ProcDef() -> association channelNames
(42,21), # association channelNames -> apply_class Name()
(13,43), # apply_class ProcDef() -> association p
(43,23), # association p -> apply_class ConditionSet()
(23,44), # apply_class ConditionSet() -> association alternative
(44,25), # association alternative -> apply_class Inst()
(25,45), # apply_class Inst() -> association channelNames
(45,27), # association channelNames -> apply_class Name()
(25,46), # apply_class Inst() -> association channelNames
(46,29), # association channelNames -> apply_class Name()
(25,47), # apply_class Inst() -> association channelNames
(47,31), # association channelNames -> apply_class Name()
(25,48), # apply_class Inst() -> association channelNames
(48,33), # association channelNames -> apply_class Name()
(11,49), # apply_class LocalDef() -> backward_association
(49,3), # backward_association -> apply_class State()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((3,'isComposite'),('constant','true')), ((13,'name'),('constant','C')), ((15,'literal'),('constant','exit')), ((17,'literal'),('constant','exack')), ((19,'literal'),('constant','enp')), ((21,'literal'),('constant','sh')), ((23,'__ApplyAttribute'),('constant','condsetcompstate')), ((25,'name'),('concat',(('constant','S'),(9,'name')))), ((27,'literal'),('constant','exit_in')), ((29,'literal'),('constant','exack_in')), ((31,'literal'),('concat',(('constant','A'),('concat',((7,'name'),('constant','A')))))), ((33,'literal'),('constant','sh_in')), ]
| {
"content_hash": "4eaa100758c99a5590114be23e4ddcd5",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 579,
"avg_line_length": 42.51948051948052,
"alnum_prop": 0.5047342700061087,
"repo_name": "levilucio/SyVOLT",
"id": "a0ac2a7f682ae4a6a09659cb5ba9e53f67b434f6",
"size": "13096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/transformation/handbuilt/HState2CProcDef.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
'''Tests for shell.py parser'''
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
if sys.version_info < (3,):
from io import BytesIO as StrIO
else:
from io import StringIO as StrIO
from powerline.commands.main import get_argparser, finish_args
from tests import TestCase
from tests.lib import replace_attr
class TestParser(TestCase):
def test_main_err(self):
parser = get_argparser()
out = StrIO()
err = StrIO()
def flush():
out.truncate(0)
err.truncate(0)
with replace_attr(sys, 'stdout', out, 'stderr', err):
for raising_args, raising_reg in [
([], 'too few arguments|the following arguments are required: ext'),
(['-r'], 'expected one argument'),
(['shell', '-r'], 'expected one argument'),
(['shell', '-w'], 'expected one argument'),
(['shell', '-c'], 'expected one argument'),
(['shell', '-t'], 'expected one argument'),
(['shell', '-p'], 'expected one argument'),
(['shell', '-R'], 'expected one argument'),
(['shell', '--renderer-module'], 'expected one argument'),
(['shell', '--width'], 'expected one argument'),
(['shell', '--last-exit-code'], 'expected one argument'),
(['shell', '--last-pipe-status'], 'expected one argument'),
(['shell', '--config-override'], 'expected one argument'),
(['shell', '--theme-override'], 'expected one argument'),
(['shell', '--config-path'], 'expected one argument'),
(['shell', '--renderer-arg'], 'expected one argument'),
(['shell', '--jobnum'], 'expected one argument'),
(['-r', '.zsh'], 'too few arguments|the following arguments are required: ext'),
(['shell', '--last-exit-code', 'i'], 'invalid int_or_sig value'),
(['shell', '--last-pipe-status', '1 i'], 'invalid <lambda> value'),
]:
self.assertRaises(SystemExit, parser.parse_args, raising_args)
self.assertFalse(out.getvalue())
self.assertRegexpMatches(err.getvalue(), raising_reg)
flush()
def test_main_normal(self):
parser = get_argparser()
out = StrIO()
err = StrIO()
with replace_attr(sys, 'stdout', out, 'stderr', err):
for argv, expargs in [
(['shell'], {'ext': ['shell']}),
(['shell', '-r', '.zsh'], {'ext': ['shell'], 'renderer_module': '.zsh'}),
([
'shell',
'left',
'-r', '.zsh',
'--last-exit-code', '10',
'--last-pipe-status', '10 20 30',
'--jobnum=10',
'-w', '100',
'-c', 'common.term_truecolor=true',
'-c', 'common.spaces=4',
'-t', 'default.segment_data.hostname.before=H:',
'-p', '.',
'-p', '..',
'-R', 'smth={"abc":"def"}',
], {
'ext': ['shell'],
'side': 'left',
'renderer_module': '.zsh',
'last_exit_code': 10,
'last_pipe_status': [10, 20, 30],
'jobnum': 10,
'width': 100,
'config_override': {'common': {'term_truecolor': True, 'spaces': 4}},
'theme_override': {
'default': {
'segment_data': {
'hostname': {
'before': 'H:'
}
}
}
},
'config_path': ['.', '..'],
'renderer_arg': {'smth': {'abc': 'def'}},
}),
(['shell', '-R', 'arg=true'], {'ext': ['shell'], 'renderer_arg': {'arg': True}}),
(['shell', '-R', 'arg=true', '-R', 'arg='], {'ext': ['shell'], 'renderer_arg': {}}),
(['shell', '-R', 'arg='], {'ext': ['shell'], 'renderer_arg': {}}),
(['shell', '-t', 'default.segment_info={"hostname": {}}'], {
'ext': ['shell'],
'theme_override': {
'default': {
'segment_info': {
'hostname': {}
}
}
},
}),
(['shell', '-c', 'common={ }'], {'ext': ['shell'], 'config_override': {'common': {}}}),
]:
args = parser.parse_args(argv)
finish_args({}, args)
for key, val in expargs.items():
self.assertEqual(getattr(args, key), val)
for key, val in args.__dict__.items():
if key not in expargs:
self.assertFalse(val, msg='key {0} is {1} while it should be something false'.format(key, val))
self.assertFalse(err.getvalue() + out.getvalue(), msg='unexpected output: {0!r} {1!r}'.format(
err.getvalue(),
out.getvalue(),
))
if __name__ == '__main__':
from tests import main
main()
| {
"content_hash": "b7faad56613b57d5b526793e40d9da3b",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 108,
"avg_line_length": 35.201550387596896,
"alnum_prop": 0.5029729134551861,
"repo_name": "bartvm/powerline",
"id": "af3562a3357147a6c880253ed3ea675d76b646ab",
"size": "4572",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_cmdline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "5840"
},
{
"name": "C",
"bytes": "3781"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "729067"
},
{
"name": "Shell",
"bytes": "47568"
},
{
"name": "VimL",
"bytes": "16969"
}
],
"symlink_target": ""
} |
from figment import Component
import random
class Sticky(Component):
"""A difficult-to-drop item."""
def __init__(self, stickiness=0.5):
self.stickiness = stickiness
def to_dict(self):
return {"stickiness": self.stickiness}
def roll_for_drop(self):
return random.random() < self.stickiness
| {
"content_hash": "05ec076a71a347e38b86e88a81cf4f3e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 48,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.6507462686567164,
"repo_name": "vreon/figment",
"id": "b9a82e2cd8ce1071fb17777dd7cb0f4ec77de5c4",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/theworldfoundry/theworldfoundry/components/sticky.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30206"
}
],
"symlink_target": ""
} |
from testlib import *
from auxly.filesys import makedirs, delete, isempty, copy
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(BaseTest):
def test_makedirs_1(test):
"""Make a single dir."""
path = DIR[0]
test.assertFalse(op.exists(path))
test.assertTrue(makedirs(path))
test.assertTrue(op.isdir(path))
test.assertTrue(isempty(path))
test.assertTrue(delete(path))
test.assertFalse(op.exists(path))
def test_makedirs_2(test):
"""Make nested dirs."""
path = op.join(DIR[0], DIR[1])
test.assertIsNone(isempty(path))
test.assertFalse(op.exists(path))
test.assertTrue(makedirs(path))
test.assertTrue(op.exists(path))
test.assertTrue(op.isdir(path))
test.assertFalse(isempty(DIR[0]))
test.assertTrue(isempty(path))
test.assertTrue(delete(path))
test.assertIsNone(isempty(path))
test.assertFalse(op.exists(path))
test.assertTrue(op.isdir(DIR[0]))
test.assertTrue(delete(DIR[0]))
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "95077bdec2819697c16128cccfbd962a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 35.90243902439025,
"alnum_prop": 0.47690217391304346,
"repo_name": "jeffrimko/Auxly",
"id": "af5b52eaaf0b21db8b0d739b4ec129b1660d8bb8",
"size": "1671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/filesys_test_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2962"
},
{
"name": "Python",
"bytes": "70432"
}
],
"symlink_target": ""
} |
"""A module of potentially useful class types"""
import inspect
__all__ = ['Dict', 'immutableobject', 'Singleton']
__author__ = 'Jon Nappi'
class Dict(dict):
"""A *dict* builtin subclass. Currently the only additional functionality
that this class provides is the addition of the __iadd__ method, which
provides += functionality to dictionaries. ie:
::
>>> d1 = Dict(one=1, two=2)
>>> d2 = Dict(three=3, four=4)
>>> d1 += d2
>>> d1
... {'one': 1, 'three': 3, 'two': 2, 'four': 4}
It's important to note that keys in the right-most dictionary will take
precedence over keys in the left-most one. ie, if the same key exists in
d2 as d1, then the value for d1 will be overriden on +='ing them together
"""
def __iadd__(self, other):
if not isinstance(other, dict):
pass
for key, val in other.items():
self[key] = val
return self
# noinspection PyPep8Naming
class immutableobject:
"""This classtype provides a means through which to create a class with
immutable attributes. That is, once this class has been instanciated, you
can no longer update the values of this class's attributes.
::
>>> class Foo(immutableobject):
... def __init__(self, x, y):
... self.x = x
... self.y = y
... super(Foo, self).__init__()
>>> f = Foo(1, 2)
>>> f.x
... 1
>>> f.x = 5
>>> f.x
... 1
You can optionally specify the `fail_quietly` flag in `immutableobject`'s
__init__method to False. This will then raise TypeError's when a user
attempts to update the value for an immutable attribute. Please note that
this TypeError convention was adopted from the way builtin tuples'
immutability behaves.
"""
__initialized = False
def __init__(self, fail_quietly=True):
self._fail_quietly = fail_quietly
self.__initialized = True
def __setattr__(self, key, value):
if not self.__initialized:
super(immutableobject, self).__setattr__(key, value)
elif not self._fail_quietly:
try:
name = str(self.__class__).split('.')[-1][:-2]
except (AttributeError, IndexError, Exception):
name = 'immutableobject'
msg = '{} object does not support item assignment'
raise TypeError(msg.format(name))
class _Singleton(type):
"""A metaclass for providing singleton-type functionality to all instances
of this type.
"""
_instances = {}
_meta_key = None
def __call__(cls, *args, **kwargs):
cls._prep_meta_key()
if cls._meta_key not in cls._instances:
# super(_Singleton, cls) evaluates to type; *args/**kwargs get
# passed to class __init__ method via type.__call__
cls._instances[cls._meta_key] = \
super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls._meta_key]
def _prep_meta_key(cls):
"""Make sure that our meta_key has been setup properly"""
if cls not in cls._meta_key:
# We want the class type as part of the key, to ensure that there
# can be more than one type of Singleton
cls._meta_key += (cls,)
def new(cls, *args, **kwargs):
"""Overwrite any existing instances of this Singleton type and replace
it with a new instance
"""
cls._prep_meta_key()
if cls._meta_key in cls._instances:
cls._instances.pop(cls._meta_key)
return cls.__call__(*args, **kwargs)
def delete(cls):
"""Delete any existing Singleton of this type, if one exists"""
cls._prep_meta_key()
if cls._meta_key in cls._instances:
obj = cls._instances.pop(cls._meta_key)
del obj
_Singleton._meta_key = inspect.getmro(_Singleton)
class Singleton(_Singleton('SingletonMeta', (object,), {})):
"""A Singleton type for ensuring that only one type of any given object
exists at any given time. For example
::
>>> class Foo(Singleton):
... def __init__(self, x):
... super(Foo, self).__init__()
... self.x = x
>>> f = Foo(5)
>>> f.x
... 5
>>> f = Foo(8675309)
>>> f.x
... 5
"""
pass
| {
"content_hash": "206bbed389394b75aa64677e5dd8aab9",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 78,
"avg_line_length": 34.19230769230769,
"alnum_prop": 0.5635545556805399,
"repo_name": "moogar0880/python-misc",
"id": "7eeb9b88456eb273b0ffd1f1b96b0752dd55a43a",
"size": "4469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/classtypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11411"
}
],
"symlink_target": ""
} |
from construct import OptionalGreedyRange, Sequence, StringAdapter, Peek
from construct import LengthValueAdapter, Struct, Switch, Container, IfThenElse
from construct import PascalString
from construct import MetaField, SBInt8, SBInt32, UBInt8, UBInt16
from construct import UBInt32, UBInt64
from twisted.internet import protocol, reactor
from varint import VarInt
from hammerencodings import ucs2
from codecs import register
register(ucs2)
def ProtoStringNetty(name):
return PascalString(name, length_field=VarInt("lengeth"))
class DoubleAdapter(LengthValueAdapter):
def _encode(self, obj, context):
return len(obj) / 2, obj
def ProtoString(name):
sa = StringAdapter(
DoubleAdapter(
Sequence(
name,
UBInt16("length"),
MetaField("data", lambda ctx: ctx["length"] * 2)
)
),
encoding="ucs2"
)
return sa
handshake_netty_4 = Struct(
"handshake",
VarInt("protocol"),
ProtoStringNetty("host"),
UBInt16("port"),
VarInt("state")
)
packets_netty = {
0x00: handshake_netty_4
}
packets_by_name_netty = {
"handshake": 0x00
}
handshake22 = Struct(
"handshake22",
ProtoString("username")
)
handshake39 = Struct(
"handshake39",
SBInt8("protocol"),
ProtoString("username"),
ProtoString("host"),
SBInt32("port")
)
handshake_packet = Struct(
"handshake_packet",
Peek(SBInt8("peekedVersion")),
IfThenElse(
"old_handshake",
lambda ctx: (ctx.peekedVersion >= 38 and ctx.peekedVersion <= 78),
handshake39,
handshake22
)
)
login22 = Struct(
"login22",
UBInt64("22-unused-long"),
UBInt32("22-unused-int"),
UBInt8("22-unused-sbyte1"),
UBInt8("22-unused-sbyte2"),
UBInt8("22-unused-byte1"),
UBInt8("22-unused-byte2")
)
login28 = Struct(
"login28",
ProtoString("28-unused-emptystring"),
UBInt32("28-unused-int1"),
UBInt32("28-unused-int2"),
UBInt8("28-unused-sbyte1"),
UBInt8("28-unused-byte1"),
UBInt8("28-unused-byte2")
)
login_packet = Struct(
"login",
UBInt32("protocol"),
ProtoString("username"),
Switch(
"usused-matter",
lambda ctx: ctx.protocol,
{
22: login22,
23: login22,
28: login28,
29: login28,
},
default=UBInt8("UNKNOWN-PROTOCOL")
)
)
packets_by_name = {
"login": 0x01,
"handshake": 0x02,
}
packets = {
0x01: login_packet,
0x02: handshake_packet,
}
packet_netty = Struct(
"full_packet",
VarInt("length"),
VarInt("header"),
Switch("payload", lambda ctx: ctx.header, packets_netty)
)
packet = Struct(
"full_packet",
UBInt8("header"),
Switch("payload", lambda ctx: ctx.header, packets)
)
packet_stream = Struct(
"packet_stream",
Peek(UBInt8("peeked")),
OptionalGreedyRange(
IfThenElse(
"old_or_new",
lambda ctx: ctx.peeked not in [chr(1), chr(2)],
packet_netty,
packet,
)
),
OptionalGreedyRange(
UBInt8("leftovers")
)
)
def make_packet(packet, *args, **kwargs):
if packet not in packets_by_name:
print "Couldn't create unsupported packet: %s" % packet
return ""
header = packets_by_name[packet]
print "0%.2x" % header
for arg in args:
kwargs.update(dict(arg))
container = Container(**kwargs)
payload = packets[header].build(container)
print "Making packet: <%s> (0x%.2x)" % (packet, header)
print payload
return chr(header)+payload
def parse_packets(buff):
container = packet_stream.parse(buff)
l = [(i.header, i.payload) for i in container.old_or_new]
leftovers = "".join(chr(i) for i in container.leftovers)
for header, payload in l:
print "Parsed packet 0x%.2x" % header
print payload
return l, leftovers
class Hammer(protocol.Protocol):
buff = ""
protocol_found = False
def write_packet(self, header, **payload):
self.transport.write(make_packet(header, **payload))
def dataReceived(self, data):
self.buff += data
packets, self.buff = parse_packets(self.buff)
for header, payload in packets:
if header == packets_by_name["handshake"]:
if 'protocol' in payload.old_handshake.keys():
self.protocol_found = True
print "protocol: %d" % payload.old_handshake.protocol
else:
container = Container(username="-")
payload = handshake22.build(container)
self.transport.write(chr(header)+payload)
elif (header == packets_by_name["login"] and
not self.protocol_found):
self.protocol_found = True
print "protocol: %d" % payload.protocol
elif header == packets_by_name_netty["handshake"]:
if payload.state == 2:
self.protocol_found = True
print "protocol: %d" % payload.protocol
def main():
factory = protocol.ServerFactory()
factory.protocol = Hammer
reactor.listenTCP(25565, factory)
reactor.run()
if __name__ == '__main__':
main()
| {
"content_hash": "a292ec46e737ab473637ae255e191501",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 23.064935064935064,
"alnum_prop": 0.5957207207207207,
"repo_name": "bravoserver/hammer",
"id": "4455b3583226be106215f289e92eeafdcc2a76d5",
"size": "5328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8956"
}
],
"symlink_target": ""
} |
import placebo
import boto3
import os
import functools
from placebo.serializer import Format
def placebo_session(function):
"""
Decorator to help do testing with placebo.
Simply wrap the function you want to test and make sure to add
a "session" argument so the decorator can pass the placebo session.
Accepts the following environment variables to configure placebo:
PLACEBO_MODE: set to "record" to record AWS calls and save them
PLACEBO_PROFILE: optionally set an AWS credential profile to record with
PLACEBO_DIR: set the directory to record to / read from
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
session_kwargs = {
'region_name': os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
}
profile_name = os.environ.get('PLACEBO_PROFILE', None)
if profile_name:
session_kwargs['profile_name'] = profile_name
session = boto3.Session(**session_kwargs)
self = args[0]
prefix = self.__class__.__name__ + '.' + function.__name__
base_dir = os.environ.get(
"PLACEBO_DIR", os.path.join(os.getcwd(), "placebo"))
record_dir = os.path.join(base_dir, prefix)
record_format = os.environ.get('PLACEBO_FORMAT', Format.DEFAULT)
if not os.path.exists(record_dir):
os.makedirs(record_dir)
pill = placebo.attach(session, data_path=record_dir,
record_format=record_format)
if os.environ.get('PLACEBO_MODE') == 'record':
pill.record()
else:
pill.playback()
kwargs['session'] = session
return function(*args, **kwargs)
return wrapper
| {
"content_hash": "dfe9d542d22d334fe2c8bbf11ac4aa08",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.624274099883856,
"repo_name": "garnaat/placebo",
"id": "3b717013cc355a44896af686ecab35ec35f4c703",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "placebo/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35813"
}
],
"symlink_target": ""
} |
import json
import logging
import os
import re
import webapp2
import uuid
from perf_insights import trace_info
from perf_insights import cloud_config
import third_party.cloudstorage as gcs
from google.appengine.api import datastore_errors
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
gcs_path = '/%s/%s.gz' % (
cloud_config.Get().trace_upload_bucket, trace_uuid)
gcs_file = gcs.open(gcs_path,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
for arg in self.request.arguments():
arg_key = arg.replace('-', '_').lower()
if arg_key in trace_object._properties:
try:
setattr(trace_object, arg_key, self.request.get(arg))
except datastore_errors.BadValueError:
pass
scenario_config = self.request.get('config')
if scenario_config:
config_json = json.loads(scenario_config)
if 'scenario_name' in config_json:
trace_object.scenario_name = config_json['scenario_name']
tags_string = self.request.get('tags')
if tags_string:
# Tags are comma separated and should only include alphanumeric + '-'.
if re.match('^[a-zA-Z0-9-,]+$', tags_string):
trace_object.tags = tags_string.split(',')
else:
logging.warning('The provided tags string includes one or more invalid'
' characters and will be ignored')
trace_object.ver = self.request.get('product-version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
| {
"content_hash": "1c9dcde31779e1f814c138e33f0bb4c6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 33.282051282051285,
"alnum_prop": 0.597457627118644,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "c1179bc1518c706755c3c80c8df0d653497105e1",
"size": "2763",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "perf_insights/perf_insights/endpoints/upload.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
import os
import cv2
import sys
import h5py
import parmap
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm as tqdm
import matplotlib.pylab as plt
def dodgeV2(image, mask):
return cv2.divide(image, 255-mask, scale=256)
def make_sketch(filename):
"""
Create sketch for the RGB image
"""
img = cv2.imread(filename)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray_inv = 255 - img_gray
img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21), sigmaX=0, sigmaY=0)
img_blend = dodgeV2(img_gray, img_blur)
return img_blend
def format_sketch(filename, size):
sketch = make_sketch(filename)
sketch = sketch[20:220, 20:220]
sketch = cv2.resize(sketch, (size, size), interpolation=cv2.INTER_AREA)
sketch = sketch.reshape((1, size, size, 1)).transpose(0, 3, 1, 2)
return sketch
def format_image(img_path, size):
"""
Load img with opencv and reshape
"""
img_color = cv2.imread(img_path)
img_color = img_color[20:220, 20:220, :] # crop to center around face (empirical values)
img_color = cv2.resize(img_color, (size, size), interpolation=cv2.INTER_AREA)
img_color = img_color.reshape((1, size, size, 3)).transpose(0, 3, 1, 2)
img_color = img_color[:, ::-1, :, :] # BGR to RGB
return img_color
def parse_attibutes():
attr_file = os.path.join(raw_dir, "lfw_attributes.txt")
arr = []
with open(attr_file, "r") as f:
lines = f.readlines()
list_col_names = lines[1].rstrip().split("\t")[1:]
for l in lines[2:]:
arr.append(l.rstrip().split("\t"))
arr = np.array(arr)
df = pd.DataFrame(arr, columns=list_col_names)
col_float = df.columns.values[2:]
for c in col_float:
df[c] = df[c].astype(np.float32)
df["imagenum"] = df.imagenum.apply(lambda x: x.zfill(4))
df["person"] = df.person.apply(lambda x: "_".join(x.split(" ")))
df["image_path"] = df.person + "/" + df.person + "_" + df.imagenum + ".jpg"
df["image_path"] = df["image_path"].apply(lambda x: os.path.join(raw_dir, "lfw-deepfunneled", x))
df.to_csv(os.path.join(data_dir, "lfw_processed_attributes.csv"), index=False)
return df
def build_HDF5(size):
"""
Gather the data in a single HDF5 file.
"""
df_attr = parse_attibutes()
list_col_labels = [c for c in df_attr.columns.values
if c not in ["person", "imagenum", "image_path"]]
# Put train data in HDF5
hdf5_file = os.path.join(data_dir, "lfw_%s_data.h5" % size)
with h5py.File(hdf5_file, "w") as hfw:
data_color = hfw.create_dataset("lfw_%s_color" % size,
(0, 3, size, size),
maxshape=(None, 3, size, size),
dtype=np.uint8)
data_sketch = hfw.create_dataset("lfw_%s_sketch" % size,
(0, 1, size, size),
maxshape=(None, 1, size, size),
dtype=np.uint8)
label = hfw.create_dataset("labels", data=df_attr[list_col_labels].values)
label.attrs["label_names"] = list_col_labels
arr_img = df_attr.image_path.values
num_files = len(arr_img)
chunk_size = 1000
num_chunks = num_files / chunk_size
arr_chunks = np.array_split(np.arange(num_files), num_chunks)
for chunk_idx in tqdm(arr_chunks):
list_img_path = arr_img[chunk_idx].tolist()
output = parmap.map(format_image, list_img_path, size)
output_sketch = parmap.map(format_sketch, list_img_path, size)
arr_img_color = np.concatenate(output, axis=0)
arr_img_sketch = np.concatenate(output_sketch, axis=0)
# Resize HDF5 dataset
data_color.resize(data_color.shape[0] + arr_img_color.shape[0], axis=0)
data_color[-arr_img_color.shape[0]:] = arr_img_color.astype(np.uint8)
data_sketch.resize(data_sketch.shape[0] + arr_img_sketch.shape[0], axis=0)
data_sketch[-arr_img_sketch.shape[0]:] = arr_img_sketch.astype(np.uint8)
def compute_vgg(size, batch_size=32):
"""
get VGG feature
"""
from keras.applications import vgg16
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
# load data
hdf5_file = os.path.join(data_dir, "lfw_%s_data.h5" % size)
with h5py.File(hdf5_file, "a") as hf:
X = hf["lfw_%s_color" % size][:].astype(np.float32)
X = preprocess_input(X)
X = np.transpose(X,(0,2,3,1))
# compute features
base_model = vgg16.VGG16(weights='imagenet', include_top=False)
model = Model(input=base_model.input, output=base_model.get_layer('block2_conv2').output)
vgg16_feat = model.predict(X, batch_size=batch_size, verbose=1)
hf.create_dataset('lfw_%s_vgg' % size, data=vgg16_feat)
def get_bw(size):
"""
get black and white images
"""
# load data
hdf5_file = os.path.join(data_dir, "lfw_%s_data.h5" % size)
with h5py.File(hdf5_file, "a") as hf:
img = hf["lfw_%s_color" % size][:].astype(np.float32).transpose((0,2,3,1))
bw = np.dot(img[...,:3], [0.299, 0.587, 0.114])
print(bw.shape, 'is the shape of B&W images')
hf.create_dataset('lfw_%s_bw' % size, data=bw)
def check_HDF5(size):
"""
Plot images with landmarks to check the processing
"""
# Get hdf5 file
hdf5_file = os.path.join(data_dir, "lfw_%s_data.h5" % size)
with h5py.File(hdf5_file, "r") as hf:
data_color = hf["data"]
label = hf["labels"]
attrs = label.attrs["label_names"]
for i in range(data_color.shape[0]):
plt.figure(figsize=(20, 10))
img = data_color[i, :, :, :].transpose(1,2,0)[:, :, ::-1]
# Get the 10 labels with highest values
idx = label[i].argsort()[-10:]
plt.xlabel(", ".join(attrs[idx]), fontsize=12)
plt.imshow(img)
plt.show()
plt.clf()
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build dataset')
# parser.add_argument("keras_model_path", type=str,
# help="Path to keras deep-learning-models directory")
parser.add_argument('--img_size', default=64, type=int,
help='Desired Width == Height')
parser.add_argument('--do_plot', default=False, type=bool,
help='Whether to visualize saved images')
parser.add_argument('--batch_size', default=32, type=int,
help='Batch size for VGG predictions')
args = parser.parse_args()
raw_dir = "../../data/raw"
data_dir = "../../data/processed"
for d in [raw_dir, data_dir]:
if not os.path.exists(d):
os.makedirs(d)
build_HDF5(args.img_size)
get_bw(args.img_size)
compute_vgg(args.img_size)
if args.do_plot:
check_HDF5(args.img_size)
| {
"content_hash": "c3126e80b017876bd4bb7021b34a0680",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 105,
"avg_line_length": 33.713615023474176,
"alnum_prop": 0.5724829410945551,
"repo_name": "TengdaHan/Convolutional_Sketch_Inversion",
"id": "9d37760bb693239fb0ae799e159badb43af01ec4",
"size": "7181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/data/make_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19130"
}
],
"symlink_target": ""
} |
import json
from twisted.trial import unittest
from nacl.public import PrivateKey, PublicKey, Box
from .common import TwoNodeMixin
from ..mailbox import channel
from ..mailbox.delivery import createMsgA, ReturnTransport, OutboundHTTPTransport
from ..mailbox.server import parseMsgA, parseMsgB
class Transports(TwoNodeMixin, unittest.TestCase):
def test_create_from_channel(self):
nA, nB, entA, entB = self.make_connected_nodes()
c = channel.OutboundChannel(nA.db, entA["id"])
transports = c.createTransports()
self.failUnlessEqual(len(transports), 2)
classes = set([t.__class__ for t in transports])
self.failUnlessEqual(classes, set([ReturnTransport,
OutboundHTTPTransport]))
self.failUnless(isinstance(transports[0], ReturnTransport))
def test_msgA(self):
nA, nB, entA, entB = self.make_connected_nodes()
msgC = "msgC"
trec = json.loads(entA["their_channel_record_json"])["transports"][0]
msgA = createMsgA(trec, msgC)
pubkey1_s, boxed = parseMsgA(msgA)
tpriv_hex = self.tports2["local"]["retrieval"]["privkey"]
tpriv = PrivateKey(tpriv_hex.decode("hex"))
b = Box(tpriv, PublicKey(pubkey1_s))
msgB = b.decrypt(boxed)
MSTT, msgC2 = parseMsgB(msgB)
self.failUnlessEqual(msgC, msgC2)
# TODO: use a stable fake TT private key instead of randomly
# generating one (and throwing it away) in Agent.build_transports(),
# so we can decrypt it here and make sure it matches
def test_local(self):
nA, nB, entA, entB = self.make_connected_nodes(transport="local")
chanAB = json.loads(entA["their_channel_record_json"])
transportsAB = chanAB["transports"]
self.failUnlessEqual(len(transportsAB), 1)
self.failUnlessEqual(transportsAB[0]["type"], "http")
local_tids = [row["id"]
for row in (nA.db.execute("SELECT * FROM"
" mailbox_server_transports")
.fetchall())
if row["symkey"] is None]
self.failUnlessEqual(len(local_tids), 1)
tid_1 = nA.agent.mailbox_server.get_local_transport()
tid_2 = nA.agent.mailbox_server.get_local_transport()
self.failUnlessEqual(tid_1, tid_2)
def test_send_local(self):
nA, nB, entA, entB = self.make_connected_nodes(transport="local")
#chanAB = json.loads(entA["their_channel_record_json"])
messages = []
def message_received(mbid, msgC):
messages.append((mbid,msgC))
nB.agent.msgC_received = message_received
d = nA.agent.send_message(entA["id"], {"hi": "world"})
def _sent(res):
self.failUnlessEqual(len(messages), 1)
self.failUnlessEqual(messages[0][0], "local")
d.addCallback(_sent)
return d
def test_send_local_payload(self):
nA, nB, entA, entB = self.make_connected_nodes(transport="local")
P1 = {"hi": "world"}
P2 = {"hi": "2"}
payloads = []
def payload_received(tid, seqnum, payload_json):
payloads.append((tid,seqnum,payload_json))
nB.agent.payload_received = payload_received
d = nA.agent.send_message(entA["id"], P1)
def _sent(res):
self.failUnlessEqual(len(payloads), 1)
tid,seqnum,payload_json = payloads[0]
self.failUnlessEqual(tid, entB["id"])
self.failUnlessEqual(seqnum, 1)
self.failUnlessEqual(json.loads(payload_json), P1)
d.addCallback(_sent)
# now bounce node B and confirm that it can grab the server port when
# it comes back up
d.addCallback(lambda _: nB.disownServiceParent())
d.addCallback(lambda _: self.startNode(nB.basedir))
def _new_nodeB(new_nB):
new_nB.agent.payload_received = payload_received
d.addCallback(_new_nodeB)
d.addCallback(lambda _: nA.agent.send_message(entA["id"], P2))
def _sent2(res):
self.failUnlessEqual(len(payloads), 2)
tid,seqnum,payload_json = payloads[1]
self.failUnlessEqual(tid, entB["id"])
self.failUnlessEqual(seqnum, 2)
self.failUnlessEqual(json.loads(payload_json), P2)
d.addCallback(_sent2)
return d
def test_send_local_payload_stored(self):
nA, nB, entA, entB = self.make_connected_nodes(transport="local")
P1 = {"hi": "world"}
d = nA.agent.send_message(entA["id"], P1)
def _sent(res):
c = nB.db.execute("SELECT * FROM inbound_messages")
rows = c.fetchall()
self.failUnlessEqual(len(rows), 1)
self.failUnlessEqual(rows[0]["id"], 1) # global msgid
self.failUnlessEqual(rows[0]["cid"], entB["id"])
self.failUnlessEqual(rows[0]["seqnum"], 1)
self.failUnlessEqual(json.loads(rows[0]["payload_json"]), P1)
d.addCallback(_sent)
d.addCallback(lambda _: nB.agent.command_fetch_all_messages())
def _fetched(messages):
self.failUnlessEqual(len(messages), 1)
self.failUnlessEqual(messages[0]["id"], 1)
self.failUnlessEqual(messages[0]["cid"], entB["id"])
self.failUnlessEqual(messages[0]["seqnum"], 1)
self.failUnlessEqual(messages[0]["payload"], P1)
d.addCallback(_fetched)
return d
| {
"content_hash": "76414374dbc1333e967555982ebf21a5",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 81,
"avg_line_length": 41.92424242424242,
"alnum_prop": 0.5999277195518612,
"repo_name": "warner/petmail",
"id": "5404feed31c7e2d25137b13624a0c7c9aae19b53",
"size": "5534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/petmail/test/test_transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1894"
},
{
"name": "Emacs Lisp",
"bytes": "5279"
},
{
"name": "HTML",
"bytes": "9968"
},
{
"name": "JavaScript",
"bytes": "19036"
},
{
"name": "Makefile",
"bytes": "3247"
},
{
"name": "Python",
"bytes": "437895"
}
],
"symlink_target": ""
} |
from CIM14.ENTSOE.Dynamics.IEC61970.Wires.WiresSynchronousMachine import WiresSynchronousMachine
from CIM14.ENTSOE.Dynamics.IEC61970.Wires.WiresRegulatingControl import WiresRegulatingControl
from CIM14.ENTSOE.Dynamics.IEC61970.Wires.WiresRegulatingCondEq import WiresRegulatingCondEq
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#Wires"
nsPrefix = "cimWires"
| {
"content_hash": "d743da3f5455da2efb96c1896f0452ed",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 96,
"avg_line_length": 58,
"alnum_prop": 0.8522167487684729,
"repo_name": "rwl/PyCIM",
"id": "4dda8023a8455d9bc81e768c2feee92268798a18",
"size": "1507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/ENTSOE/Dynamics/IEC61970/Wires/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
import os
import unittest
import cobra
import cobra.auth as c_auth
import cobra.auth.shadowfile as c_auth_shadow
import cobra.tests as c_tests
class CobraBasicTest(unittest.TestCase):
#def setUp(self):
#pass
#def tearDown(self):
#pass
def test_cobra_proxy(self):
testobj = c_tests.TestObject()
daemon = cobra.CobraDaemon(port=60600)
objname = daemon.shareObject( testobj )
daemon.fireThread()
t = cobra.CobraProxy('cobra://localhost:60600/%s' % objname)
c_tests.accessTestObject( t )
daemon.stopServer()
def test_cobra_msgpack(self):
try:
import msgpack
except ImportError:
self.skipTest('No msgpack installed!')
testobj = c_tests.TestObject()
daemon = cobra.CobraDaemon(port=60610, msgpack=True)
objname = daemon.shareObject( testobj )
daemon.fireThread()
t = cobra.CobraProxy('cobra://localhost:60610/%s?msgpack=1' % objname)
c_tests.accessTestObject( t )
daemon.stopServer()
def test_cobra_authentication(self):
testobj = c_tests.TestObject()
daemon = cobra.CobraDaemon(port=60601)
daemon.setAuthModule( c_auth.CobraAuthenticator() )
daemon.fireThread()
objname = daemon.shareObject( testobj )
# Lets fail because of no-auth first
try:
p = cobra.CobraProxy('cobra://localhost:60601/%s' % objname)
raise Exception('Allowed un-authd connection!')
except cobra.CobraAuthException as e:
pass
# Now fail with wrong auth
try:
p = cobra.CobraProxy('cobra://localhost:60601/%s' % objname, authinfo={})
raise Exception('Allowed bad-auth connection!')
except cobra.CobraAuthException as e:
pass
# Now lets succeed
authinfo = { 'user':'invisigoth', 'passwd':'secret' }
t = cobra.CobraProxy('cobra://localhost:60601/%s' % objname, authinfo=authinfo)
c_tests.accessTestObject( t )
daemon.stopServer()
def test_cobra_shadowauth(self):
testobj = c_tests.TestObject()
daemon = cobra.CobraDaemon(port=60602)
shadowfile = c_tests.testFileName('shadowpass.txt')
authmod = c_auth_shadow.ShadowFileAuth( shadowfile )
daemon.setAuthModule( authmod )
daemon.fireThread()
objname = daemon.shareObject( testobj )
# Now lets succeed
authinfo = { 'user':'invisigoth', 'passwd':'secret' }
t = cobra.CobraProxy('cobra://localhost:60602/%s' % objname, authinfo=authinfo)
c_tests.accessTestObject(t)
self.assertEqual( t.getUser(), 'invisigoth')
daemon.stopServer()
def test_cobra_refcount(self):
testobj = c_tests.TestObject()
daemon = cobra.CobraDaemon(port=60660)
objname = daemon.shareObject( testobj, doref=True )
daemon.fireThread()
with cobra.CobraProxy('cobra://localhost:60660/%s' % objname) as t:
c_tests.accessTestObject( t )
self.assertIsNone( daemon.getSharedObject( objname ) )
daemon.stopServer()
#def test_cobra_ssl(self):
#def test_cobra_ssl_clientcert(self):
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2d9125621f4db870aed1bc0b7443ab2d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 87,
"avg_line_length": 28.264957264957264,
"alnum_prop": 0.618385243423042,
"repo_name": "bat-serjo/vivisect",
"id": "c6e6eaed6700b1aaf76ea6aca53cef8d3da742ba",
"size": "3307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cobra/tests/testbasic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11662904"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 Pani Networks Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import path
from setuptools import setup, find_packages
from codecs import open
import vpcrouter
here = path.abspath(path.dirname(__file__))
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except (IOError):
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'vpcrouter',
version = vpcrouter.__version__,
url = "http://github.com/romana/vpc-router/",
license = "Apache Software License",
author = "Juergen Brendel",
author_email = "jbrendel@paninetworks.com",
description = "Automated route management, backup routes and "
"route failover for Amazon VPC environments",
long_description = long_description,
packages = find_packages(),
include_package_data = True,
entry_points = {
'console_scripts' : ['vpcrouter=vpcrouter.main:main'],
},
install_requires = [
'argparse==1.2.1',
'boto==2.47.0',
'bottle==0.12.13',
'netaddr==0.7.19',
'wsgiref==0.1.2',
'watchdog==0.8.3',
'multiping>=1.0.4',
'ipaddress>=1.0.17'
],
classifiers = [
'Programming Language :: Python',
'Natural Language :: English',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Clustering',
'Topic :: System :: Networking'
]
)
| {
"content_hash": "e8d2fe161fcf4861f7058faad3034c30",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 75,
"avg_line_length": 32.083333333333336,
"alnum_prop": 0.6064935064935065,
"repo_name": "romana/vpc-router",
"id": "24785a007acbf290d91cdd16b7683da21ecad9e4",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "212538"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
} |
import sys
import os
import iopc
import ops
import pprint
def buildModule(pkg_enabled, pkg_name, pkg_args, local_repo_path, output_path):
if pkg_enabled == 1:
print "===Env===[" + pkg_name + "]"
ops.mkdir(output_path)
if(os.path.exists(local_repo_path)):
build_pkg = None
print pkg_name, iopc.PACKAGE_CFG, local_repo_path
build_pkg = ops.loadModule(pkg_name, iopc.PACKAGE_CFG, [local_repo_path])
args = {"pkg_name": pkg_name, "pkg_path": local_repo_path, "output_path": output_path, "pkg_args": pkg_args}
build_pkg.MAIN_ENV(args)
iopc.add_selected_package(pkg_name)
else:
print local_repo_path + " Not exist!"
def Main(args):
cfg = iopc.getCfg(args)
account = iopc.getAccount(args)
params = iopc.getParams(args)
output_dir = iopc.getOutputDir(args)
is_single_package = iopc.isSinglePackage(args)
single_package_name = iopc.getSinglePackageName(args)
packages_dir = cfg['packages_dir']
packages = cfg['packages']
for pkg in packages:
pkg_name = pkg['name']
pkg_enabled = pkg['enabled']
pkg_args = pkg['args']
local_repo_path = os.path.abspath(packages_dir + os.sep + pkg_name)
output_path = os.path.abspath(output_dir + os.sep + pkg_name)
if is_single_package:
if single_package_name == pkg_name:
buildModule(pkg_enabled, pkg_name, pkg_args, local_repo_path, output_path)
return
else:
buildModule(pkg_enabled, pkg_name, pkg_args, local_repo_path, output_path)
| {
"content_hash": "88691b9e7a58a482b5e2093d38ab32be",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 120,
"avg_line_length": 38.80952380952381,
"alnum_prop": 0.6110429447852761,
"repo_name": "YuanYuLin/PackMan_IOPC",
"id": "675b713bdb9c44289303cf546aa4897e49fd90bd",
"size": "1630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyiopc/iopc_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14803"
},
{
"name": "Lua",
"bytes": "1709"
},
{
"name": "Python",
"bytes": "137178"
},
{
"name": "Shell",
"bytes": "25425"
}
],
"symlink_target": ""
} |
import fnmatch
import os
import sys
import codecs
def process_file(path):
with open(path, 'rt') as file:
content = file.read()
file.close();
if(content.startswith(codecs.BOM_UTF8)):
with open(path, 'wt') as file:
file.write(content[len(codecs.BOM_UTF8):]);
file.close();
sources = ['Sources/Internal', 'Modules', 'Programs']
for source in sources:
for root, dirnames, filenames in os.walk("../../../" + source):
for ext in ['cpp', 'h', 'mm']:
for filename in fnmatch.filter(filenames, '*.'+ext):
file = os.path.join(root, filename)
process_file(file)
| {
"content_hash": "6a3f25b1d4905f188246ad083743820f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 64,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.5993740219092332,
"repo_name": "dava/dava.engine",
"id": "7bdad2bd4a072a85b8179863e38b570a382a8b01",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "Bin/RepoTools/Scripts/remove_BOM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "166572"
},
{
"name": "Batchfile",
"bytes": "18562"
},
{
"name": "C",
"bytes": "61621347"
},
{
"name": "C#",
"bytes": "574524"
},
{
"name": "C++",
"bytes": "50229645"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "11439187"
},
{
"name": "CSS",
"bytes": "32773"
},
{
"name": "Cuda",
"bytes": "37073"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "44259"
},
{
"name": "Fortran",
"bytes": "8835"
},
{
"name": "GLSL",
"bytes": "3726"
},
{
"name": "Go",
"bytes": "1235"
},
{
"name": "HTML",
"bytes": "8621333"
},
{
"name": "Java",
"bytes": "232072"
},
{
"name": "JavaScript",
"bytes": "2560"
},
{
"name": "Lua",
"bytes": "43080"
},
{
"name": "M4",
"bytes": "165145"
},
{
"name": "Makefile",
"bytes": "1349214"
},
{
"name": "Mathematica",
"bytes": "4633"
},
{
"name": "Module Management System",
"bytes": "15224"
},
{
"name": "Objective-C",
"bytes": "1909821"
},
{
"name": "Objective-C++",
"bytes": "498191"
},
{
"name": "Pascal",
"bytes": "99390"
},
{
"name": "Perl",
"bytes": "396608"
},
{
"name": "Python",
"bytes": "782784"
},
{
"name": "QML",
"bytes": "43105"
},
{
"name": "QMake",
"bytes": "156"
},
{
"name": "Roff",
"bytes": "71083"
},
{
"name": "Ruby",
"bytes": "22742"
},
{
"name": "SAS",
"bytes": "16030"
},
{
"name": "Shell",
"bytes": "2482394"
},
{
"name": "Slash",
"bytes": "117430"
},
{
"name": "Smalltalk",
"bytes": "5908"
},
{
"name": "TeX",
"bytes": "428489"
},
{
"name": "Vim script",
"bytes": "133255"
},
{
"name": "Visual Basic",
"bytes": "54056"
},
{
"name": "WebAssembly",
"bytes": "13987"
}
],
"symlink_target": ""
} |
from test_framework.mininode import *
from test_framework.test_framework import moorecointestframework
from test_framework.util import *
import logging
'''
in this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. we
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
max_requests = 128
class testmanager(nodeconncb):
# set up nodeconncb callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# log the requests
for inv in message.inv:
if inv.hash not in self.blockreqcounts:
self.blockreqcounts[inv.hash] = 0
self.blockreqcounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectokay:
raise earlydisconnecterror(0)
def __init__(self):
nodeconncb.__init__(self)
self.log = logging.getlogger("blockrelaytest")
self.create_callback_map()
def add_new_connection(self, connection):
self.connection = connection
self.blockreqcounts = {}
self.disconnectokay = false
def run(self):
try:
fail = false
self.connection.rpc.generate(1) # leave ibd
numblockstogenerate = [ 8, 16, 128, 1024 ]
for count in range(len(numblockstogenerate)):
current_invs = []
for i in range(numblockstogenerate[count]):
current_invs.append(cinv(2, random.randrange(0, 1<<256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockreqcounts:
total_requests += self.blockreqcounts[key]
if self.blockreqcounts[key] > 1:
raise assertionerror("error, test failed: block %064x requested more than once" % key)
if total_requests > max_requests:
raise assertionerror("error, too many blocks (%d) requested" % total_requests)
print "round %d: success (total requests: %d)" % (count, total_requests)
except assertionerror as e:
print "test failed: ", e.args
self.disconnectokay = true
self.connection.disconnect_node()
class maxblocksinflighttest(moorecointestframework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("moorecoind", "moorecoind"),
help="binary to test max block requests behavior")
def setup_chain(self):
print "initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = testmanager()
test.add_new_connection(nodeconn('127.0.0.1', p2p_port(0), self.nodes[0], test))
networkthread().start() # start up network handling in another thread
test.run()
if __name__ == '__main__':
maxblocksinflighttest().main()
| {
"content_hash": "e152bad243e2c9e0f598d45a7cdfbb4c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 114,
"avg_line_length": 40.38947368421053,
"alnum_prop": 0.5882199635131613,
"repo_name": "moorecoin/MooreCoinMiningAlgorithm",
"id": "b88200c12da7470781825c43b479127a727229be",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/maxblocksinflight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "508092"
},
{
"name": "C++",
"bytes": "3658254"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "19799"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2102"
},
{
"name": "Makefile",
"bytes": "59162"
},
{
"name": "Objective-C++",
"bytes": "7238"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "405919"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "40662"
},
{
"name": "TypeScript",
"bytes": "5864828"
}
],
"symlink_target": ""
} |
"""
Copyright 2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
import time
from icetea_lib.bench import Bench
from interface import interfaceUp, interfaceDown
#from mbed_clitest.tools import test_case
#from mbed_clitest.TestStepError import SkippedTestcaseException
from icetea_lib.TestStepError import TestStepFail
class Testcase(Bench):
def __init__(self):
Bench.__init__(self,
name="TCPSOCKET_ACCEPT",
title = "TCPSOCKET_ACCEPT",
purpose = "Test that TCPSocket::bind(), TCPSocket::listen() and TCPSocket::accept() works",
status = "released",
component= ["mbed-os", "netsocket"],
type="smoke",
subtype="socket",
requirements={
"duts": {
'*': { #requirements for all nodes
"count":2,
"type": "hardware",
"application": {
"name": "TEST_APPS-device-socket_app"
}
},
"1": {"nick": "dut1"},
"2": {"nick": "dut2"}
}
}
)
def setup(self):
interface = interfaceUp(self, ["dut1"])
self.server_ip = interface["dut1"]["ip"]
interface = interfaceUp(self, ["dut2"])
self.client_ip = interface["dut2"]["ip"]
def clientThread(self):
self.logger.info("Starting")
time.sleep(5) #wait accept from server
self.command("dut2", "socket " + str(self.client_socket_id) + " open")
self.command("dut2", "socket " + str(self.client_socket_id) + " connect " + str(self.server_ip) + " " + str(self.used_port))
def case(self):
self.used_port = 2000
response = self.command("dut1", "socket new TCPSocket")
self.server_base_socket_id = int(response.parsed['socket_id'])
self.command("dut1", "socket " + str(self.server_base_socket_id) + " open")
response = self.command("dut1", "socket " + str(self.server_base_socket_id) + " bind port " + str(self.used_port), report_cmd_fail = False)
if response.retcode == -1:
if (response.verify_trace("NSAPI_ERROR_UNSUPPORTED", break_in_fail = False)):
raise SkippedTestcaseException("UNSUPPORTED")
else:
TestStepFail("Bind port failed")
self.command("dut1", "socket " + str(self.server_base_socket_id) + " listen")
response = self.command("dut2", "socket new TCPSocket")
self.client_socket_id = int(response.parsed['socket_id'])
#Create a thread which calls client connect()
t = threading.Thread(name='clientThread', target=self.clientThread)
t.start()
response = self.command("dut1", "socket " + str(self.server_base_socket_id) + " accept")
t.join()
self.accept_socket_id = int(response.parsed['socket_id'])
if response.timedelta < 5.0: # Check that socket accept call blocks
raise TestStepFail("Longer response time expected")
self.command("dut1", "socket " + str(self.accept_socket_id) + " send hello")
response = self.command("dut2", "socket " + str(self.client_socket_id) + " recv 5")
data = response.parsed['data'].replace(":","")
if data != "hello":
raise TestStepFail("Received data doesn't match the sent data")
def teardown(self):
response = self.command("dut2", "socket " + str(self.client_socket_id) + " delete")
response = self.command("dut1", "socket " + str(self.server_base_socket_id) + " delete")
response = self.command("dut1", "socket " + str(self.accept_socket_id) + " close")
interfaceDown(self, ["dut1"])
interfaceDown(self, ["dut2"])
| {
"content_hash": "bd23f6c58c06f29cd3208e28d08fa203",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 147,
"avg_line_length": 43,
"alnum_prop": 0.5651601579640193,
"repo_name": "andcor02/mbed-os",
"id": "49cee2ff1e3184555dd10ca31603e91cfd580f00",
"size": "4558",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "TEST_APPS/testcases/netsocket/TCPSOCKET_ACCEPT.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6601399"
},
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "C",
"bytes": "295194591"
},
{
"name": "C++",
"bytes": "9038670"
},
{
"name": "CMake",
"bytes": "5285"
},
{
"name": "HTML",
"bytes": "2063156"
},
{
"name": "Makefile",
"bytes": "103497"
},
{
"name": "Objective-C",
"bytes": "460244"
},
{
"name": "Perl",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "38809"
},
{
"name": "Shell",
"bytes": "16862"
},
{
"name": "XSLT",
"bytes": "5596"
}
],
"symlink_target": ""
} |
import threading
from scapy.all import *
import subprocess as sp
import Queue
import time
class Sniffer(threading.Thread):
def __init__(self, queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.__queue = queue
sp.Popen(['hostapd', '/etc/hostapd/hostapd.conf'])
def run(self):
def record(packet, ignore = set()):
self.__queue.put(("WiFi", packet.src, time.time()))
sniff(prn=record)
| {
"content_hash": "ce445c3c182abfe2adeba15a3542d80c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 29.4375,
"alnum_prop": 0.6157112526539278,
"repo_name": "zdavidli/HomeHoneypot",
"id": "7e60f4dff109864a198f6841b5055900935eae01",
"size": "494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wifisniffer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7543"
},
{
"name": "Python",
"bytes": "6139"
}
],
"symlink_target": ""
} |
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from mellow.roles import Role
def minimum_role_required(role):
def _minimum_role_required(view_func):
def wrapped_view_func(request, *args, **kwargs):
# Ensure that the user is authenticated
if not request.user.is_authenticated():
raise PermissionDenied
# Get the required role and the user's role
required_role = Role.get_role(role)
user_role = Role.get_role(request.user.role)
if not user_role.supersedes(required_role):
raise PermissionDenied
return view_func(request, *args, **kwargs)
return wrapped_view_func
return _minimum_role_required
class MinimumRoleRequiredMixin(object):
role = None
def get_required_role(self):
if self.role:
return Role.get_role(self.role)
else:
raise ImproperlyConfigured("Views which inherit from MinimumRoleRequiredMixin must have a \"role\" member.")
def get_user_role(self, request):
return Role.get_role(request.user.role)
def dispatch(self, request, **kwargs):
# Ensure that the user is authenticated
if not request.user.is_authenticated():
raise PermissionDenied
# Get the required role and the user's role
required_role = self.get_required_role()
user_role = self.get_user_role(request)
if not user_role.supersedes(required_role):
raise PermissionDenied
return super(MinimumRoleRequiredMixin, self).dispatch(request, **kwargs)
| {
"content_hash": "dc28010d473ba94f287f920103f80b98",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 120,
"avg_line_length": 32.1764705882353,
"alnum_prop": 0.6489945155393053,
"repo_name": "flagshipenterprise/django-mellow-auth",
"id": "570b736066c03438163fe1515b105d153c31302e",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mellow/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32212"
}
],
"symlink_target": ""
} |
import contextlib
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
| {
"content_hash": "b5bb3b160cc18eb19fc5ed17dbc863ea",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.7337278106508875,
"repo_name": "openstack/oslo.db",
"id": "47cf511727062724e08d4fd34739f4f9995b2007",
"size": "810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_db/tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "660750"
},
{
"name": "Shell",
"bytes": "2301"
}
],
"symlink_target": ""
} |
"""
An example plugin for Spock
Demonstrates the following functionality:
- Receiving chat messages
- Sending chat commands
- Using inventory
- Moving to location
- Triggering a periodic event using a timer
- Registering for an event upon startup
- Placing blocks
- Reading blocks
"""
__author__ = 'Cosmo Harrigan, Morgan Creekmore'
import logging
# Import any modules that you need in your plugin
from spock.mcmap import mapdata
from spock.plugins.base import PluginBase
from spock.utils import pl_announce
from spock.vector import Vector3
# Required import
logger = logging.getLogger('spock')
# The bot will walk to this starting position. Set it to a sensible
# location for your world file. The format is: (x, y, z)
TARGET_COORDINATES = Vector3(10, 2, 10)
# Required class decorator
@pl_announce('ExamplePlugin')
class ExamplePlugin(PluginBase):
# Require other plugins that you want use later in the plugin
requires = ('Movement', 'Timers', 'World', 'ClientInfo', 'Inventory',
'Interact', 'Chat')
# Example of registering an event handler
# Event types are enumerated here:
# https://github.com/SpockBotMC/SpockBot/blob/master/spock/mcp
# /mcdata.py#L213
events = {
# This event will be triggered when a chat message is received
# from the server
'PLAY<Chat Message': 'chat_event_handler',
# This event will be triggered after authentication when the bot
# joins the game
'client_join_game': 'perform_initial_actions',
}
def __init__(self, ploader, settings):
# Used to init the PluginBase
super(ExamplePlugin, self).__init__(ploader, settings)
# Example of registering a timer that triggers a method periodically
frequency = 5 # Number of seconds between triggers
self.timers.reg_event_timer(frequency, self.periodic_event_handler)
def perform_initial_actions(self, name, data):
"""Sends a chat message, then moves to target coordinates."""
# Send a chat message
self.chat.chat('Bot active')
# Walk to target coordinates
self.movement.move_location = TARGET_COORDINATES
def chat_event_handler(self, name, data):
"""Called when a chat message occurs in the game"""
logger.info('Chat message received: {0}'.format(data))
def periodic_event_handler(self):
"""Triggered every 5 seconds by a timer"""
# Search the hotbar for cobblestone
slot = self.inventory.find_slot(4, self.inventory.window.hotbar_slots)
logger.info(slot)
# Switch to slot with cobblestone
if slot is not None:
self.inventory.select_active_slot(slot)
# Switch to first slot because there is no cobblestone in hotbar
else:
self.inventory.select_active_slot(0)
logger.info('My position: {0} pitch: {1} yaw: {2}'.format(
self.clientinfo.position,
self.clientinfo.position.pitch,
self.clientinfo.position.yaw))
# Place a block in front of the player
self.interact.place_block(self.clientinfo.position
+ Vector3(-1, -1, 0))
# Read a block under the player
block_pos = self.clientinfo.position
block_id, meta = self.world.get_block(block_pos.x,
block_pos.y,
block_pos.z)
block_at = mapdata.get_block(block_id, meta)
self.interact.chat('Found block %s at %s' % (block_at.display_name,
block_pos))
| {
"content_hash": "db9f3da015f392eac630a125bc32f3fa",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 35.18269230769231,
"alnum_prop": 0.6381525006832468,
"repo_name": "MrSwiss/SpockBot",
"id": "4106fdba8bd41588d07ace8a6054fafcd148d52d",
"size": "3659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic/example_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "283412"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
import sys
import os
from heapq import heappush, heappop
from collections import defaultdict
filenames = sys.argv[1:]
if len(filenames) == 0:
print "-- Huffman encoding generation tool -- "
print "Usage: ./HuffGen.py file1.txt file2.txt file3.txt ..."
sys.exit(1)
hist = defaultdict(int)
def addLine(line):
"""
Analyze the frequency of letters in \p line.
"""
for c in line: hist[c] += 1
# Read all of the input files and analyze the content of the files.
for f in filenames:
for line in open(f):
addLine(line.rstrip('\n').strip())
# Sort all of the characters by their appearance frequency.
sorted_chars = sorted(hist.items(), key=lambda x: x[1] * len(x[0]) , reverse=True)
class Node:
""" This is a node in the Huffman tree """
def __init__(self, hits, value = None, l = None, r = None):
self.hit = hits # Number of occurrences for this node.
self.left = l # Left subtree.
self.right = r # Right subtree.
self.val = value # Character value for leaf nodes.
def merge(Left, Right):
""" This is the merge phase of the huffman encoding algorithm
This (static) method creates a new node that combines \p Left and \p Right.
"""
return Node(Left.hit + Right.hit, None, Left, Right)
def __cmp__(self, other):
""" Compare this node to another node based on their frequency. """
return self.hit > other.hit
def getMaxEncodingLength(self):
""" Return the length of the longest possible encoding word"""
v = 0
if self.left:
v = max(v, 1 + self.left .getMaxEncodingLength())
if self.right:
v = max(v, 1 + self.right.getMaxEncodingLength())
return v
def generate_decoder(self, depth):
"""
Generate the CPP code for the decoder.
"""
space = " " * depth
if self.val:
return space + "return {'%s', %d};" % (str(self.val), depth)
T = """{0}if ((tailbits & 1) == {1}) {{\n{0} tailbits/=2;\n{2}\n{0}}}"""
sb = ""
if self.left:
sb += T.format(space, 0, self.left .generate_decoder(depth + 1)) + "\n"
if self.right:
sb += T.format(space, 1, self.right.generate_decoder(depth + 1))
return sb
def generate_encoder(self, stack):
"""
Generate the CPP code for the encoder.
"""
if self.val:
sb = "if (ch == '" + str(self.val) +"') {"
sb += "/*" + "".join(map(str, reversed(stack))) + "*/ "
# Encode the bit stream as a numeric value. Updating the APInt in one go
# is much faster than inserting one bit at a time.
numeric_val = 0
for bit in reversed(stack): numeric_val = numeric_val * 2 + bit
# num_bits - the number of bits that we use in the bitstream.
# bits - the numeric value of the bits that we encode in the bitstream.
sb += "bits = %d; num_bits = %d; " % (numeric_val, len(stack))
sb += "return; }\n"
return sb
sb = ""
if self.left:
sb += self.left .generate_encoder(stack + [0])
if self.right:
sb += self.right.generate_encoder(stack + [1])
return sb
# Only accept these characters into the tree.
charset = r"0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ$"
# Convert the characters and frequencies to a list of trees
# where each tree is a node that holds a single character.
nodes = []
for c in sorted_chars:
if c[0] in charset:
n = Node(c[1],c[0])
heappush(nodes, n)
# This is the Merge phase of the Huffman algorithm:
while len(nodes) > 1:
v1 = heappop(nodes)
v2 = heappop(nodes)
nv = Node.merge(v1, v2)
heappush(nodes, nv)
# Calculate the reverse mapping between the char to its index.
index_of_char = ["-1"] * 256
idx = 0
for c in charset:
index_of_char[ord(c)] = str(idx)
idx+=1
print "#ifndef SWIFT_MANGLER_HUFFMAN_H"
print "#define SWIFT_MANGLER_HUFFMAN_H"
print "#include <assert.h>"
print "#include <utility>"
print "#include \"llvm/ADT/APInt.h\""
print "using APInt = llvm::APInt;"
print "// This file is autogenerated. Do not modify this file."
print "// Processing text files:", " ".join([os.path.basename(f) for f in filenames])
print "namespace Huffman {"
print "// The charset that the fragment indices can use:"
print "const unsigned CharsetLength = %d;" % len(charset)
print "const unsigned LongestEncodingLength = %d;" % (nodes[0].getMaxEncodingLength())
print "const char *Charset = \"%s\";" % charset
print "const int IndexOfChar[] = {", ",".join(index_of_char),"};"
print "std::pair<char, unsigned> variable_decode(uint64_t tailbits) {\n", nodes[0].generate_decoder(0), "\n assert(false); return {0, 0};\n}"
print "void variable_encode(uint64_t &bits, uint64_t &num_bits, char ch) {\n", nodes[0].generate_encoder([]),"assert(false);\n}"
print "} // namespace"
print "#endif /* SWIFT_MANGLER_HUFFMAN_H */"
| {
"content_hash": "deaa7d5d1fb64a337c424c49ccbb271f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 141,
"avg_line_length": 34.142857142857146,
"alnum_prop": 0.6422594142259415,
"repo_name": "adrfer/swift",
"id": "5e0cadc6bcd0c6b0770a25c357534cbcaa37deb3",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/name-compression/HuffGen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39747"
},
{
"name": "C++",
"bytes": "17630958"
},
{
"name": "CMake",
"bytes": "199423"
},
{
"name": "DTrace",
"bytes": "1427"
},
{
"name": "Emacs Lisp",
"bytes": "33812"
},
{
"name": "LLVM",
"bytes": "45324"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "166580"
},
{
"name": "Objective-C++",
"bytes": "150686"
},
{
"name": "Perl",
"bytes": "2219"
},
{
"name": "Python",
"bytes": "290022"
},
{
"name": "Ruby",
"bytes": "2087"
},
{
"name": "Shell",
"bytes": "110627"
},
{
"name": "Swift",
"bytes": "10708685"
},
{
"name": "VimL",
"bytes": "10707"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class OutliercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outliercolor", parent_name="violin.marker", **kwargs
):
super(OutliercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| {
"content_hash": "75510ff9638d8d54079c532292fe6f9f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 33.23076923076923,
"alnum_prop": 0.6180555555555556,
"repo_name": "plotly/plotly.py",
"id": "3ffa778f15a382f8ec069ec7a29f9b8ffd90e108",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/violin/marker/_outliercolor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import os
import re
import cv2
import numpy as np
class IkaUtils(object):
@staticmethod
def isWindows():
try:
os.uname()
except AttributeError:
return True
return False
@staticmethod
def dprint(text):
print(text, file=sys.stderr)
@staticmethod
def baseDirectory():
base_directory = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
base_directory = re.sub('[\\/]+$', '', base_directory)
if os.path.isfile(base_directory):
# In this case, this version of IkaLog is py2exe'd,
# and base_directory is still pointing at the executable.
base_directory = os.path.dirname(base_directory)
return base_directory
# Find the local player.
#
# @param context IkaLog Context.
# @return The player information (Directionary class) if found.
@staticmethod
def getMyEntryFromContext(context):
for e in context['game']['players']:
if e['me']:
return e
return None
# Get player's title.
#
# @param playerEntry The player.
# @return Title in string. Returns None if playerEntry doesn't have title data.
@staticmethod
def playerTitle(playerEntry):
if playerEntry is None:
return None
if not (('gender' in playerEntry) and ('prefix' in playerEntry)):
return None
prefix = re.sub('の', '', playerEntry['prefix'])
return "%s%s" % (prefix, playerEntry['gender'])
@staticmethod
def map2text(map, unknown=None, lang="ja"):
if map is None:
if unknown is None:
unknown = "?"
return unknown
return map['name']
@staticmethod
def rule2text(rule, unknown=None, lang="ja"):
if rule is None:
if unknown is None:
unknown = "?"
return unknown
return rule['name']
@staticmethod
def cropImageGray(img, left, top, width, height):
if len(img.shape) > 2 and img.shape[2] != 1:
return cv2.cvtColor(
img[top:top + height, left:left + width],
cv2.COLOR_BGR2GRAY
)
return img[top:top + height, left:left + width]
@staticmethod
def matchWithMask(img, mask, threshold=99.0, orig_threshold=70.0, debug=False):
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Check false-positive
orig_hist = cv2.calcHist([img], [0], None, [3], [0, 256])
match2 = orig_hist[2] / np.sum(orig_hist)
if match2 > orig_threshold:
# False-Positive condition.
#print("original %f > orig_threshold %f" % (match2, orig_threshold))
return False
ret, thresh1 = cv2.threshold(img, 230, 255, cv2.THRESH_BINARY)
added = thresh1 + mask
hist = cv2.calcHist([added], [0], None, [3], [0, 256])
match = hist[2] / np.sum(hist)
if debug and (match > threshold):
print("match2 %f match %f > threshold %f" %
(match2, match, threshold))
cv2.imshow('match_img', img)
cv2.imshow('match_mask', mask)
cv2.imshow('match_added', added)
# cv2.waitKey()
return match > threshold
@staticmethod
def loadMask(file, left, top, width, height):
mask = cv2.imread(file)
if mask is None:
print("マスクデータ %s のロードに失敗しました")
# raise a exception
mask = mask[top:top + height, left:left + width]
# BGR to GRAY
if mask.shape[2] > 1:
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
return mask
@staticmethod
def getWinLoseText(won, win_text="勝ち", lose_text="負け", unknown_text="不明"):
if won is None:
return unknown_text
return win_text if won else lose_text
@staticmethod
def writeScreenshot(destfile, frame):
try:
cv2.imwrite(destfile, frame)
return os.path.isfile(destfile)
except:
print("Screenshot: failed")
return False
| {
"content_hash": "3b9226b7fe285579338fe29f82823037",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 84,
"avg_line_length": 29.01360544217687,
"alnum_prop": 0.564126611957796,
"repo_name": "mzsm/IkaLog",
"id": "0914c79046b5043e34d3fbb8de81b57ed24c6979",
"size": "4978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ikalog/utils/ikautils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "233853"
}
],
"symlink_target": ""
} |
"""
ServiceType module
"""
from ovs.dal.dataobject import DataObject
from ovs.dal.structures import Property
class ServiceType(DataObject):
"""
A ServiceType represents some kind of service that needs to be managed by the framework.
"""
__properties = [Property('name', str, doc='Name of the ServiceType.')]
__relations = []
__dynamics = []
| {
"content_hash": "20e6bad9553cc75c3e4e79b2310ff01f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 92,
"avg_line_length": 26.214285714285715,
"alnum_prop": 0.6839237057220708,
"repo_name": "sql-analytics/openvstorage",
"id": "4ee54c2cb2e3bcadbbf70264b175310bf1886be2",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ovs/dal/hybrids/servicetype.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "23516"
},
{
"name": "CSS",
"bytes": "10475"
},
{
"name": "Diff",
"bytes": "816"
},
{
"name": "HTML",
"bytes": "186526"
},
{
"name": "JavaScript",
"bytes": "710424"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "1633348"
},
{
"name": "Shell",
"bytes": "10567"
}
],
"symlink_target": ""
} |
class Solution(object):
def findSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def findSubsequencesHelper(nums, pos, seq, result):
if len(seq) >= 2:
result.append(list(seq))
lookup = set()
for i in xrange(pos, len(nums)):
if (not seq or nums[i] >= seq[-1]) and \
nums[i] not in lookup:
lookup.add(nums[i])
seq.append(nums[i])
findSubsequencesHelper(nums, i+1, seq, result)
seq.pop()
result, seq = [], []
findSubsequencesHelper(nums, 0, seq, result)
return result
| {
"content_hash": "b82f42cebec2d8e8491683a15e00ab33",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.4685792349726776,
"repo_name": "kamyu104/LeetCode",
"id": "c546bf9d13ad8a33bbd5b65d81d5c77989283ed8",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/increasing-subsequences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1008761"
},
{
"name": "Go",
"bytes": "1907"
},
{
"name": "Java",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "1421980"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
} |
""" Defines the UStr type and HasUniqueStrings mixin class for efficiently
creating lists of objects containing traits whose string values must be
unique within the list.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
from .trait_base import is_str
from .has_traits import HasTraits
from .trait_value import TraitValue, TypeValue
from .trait_types import List
from .trait_handlers import TraitType, NoDefaultSpecified
#-------------------------------------------------------------------------------
# 'UStr' class:
#-------------------------------------------------------------------------------
class UStr ( TraitType ):
""" Trait type that ensures that a value assigned to a trait is unique
within the list it belongs to.
"""
#: The type value to assign to restore the original list item type when a
#: list item is removed from the monitored list:
str_type = TraitValue()
#: The informational text describing the trait:
info_text = 'a unique string'
def __init__ ( self, owner, list_name, str_name,
default_value = NoDefaultSpecified, **metadata ):
""" Initializes the type.
"""
super( UStr, self ).__init__( default_value, **metadata )
self.owner = owner
self.list_name = list_name
self.str_name = str_name
self.ustr_type = TypeValue( self )
self.names = dict( [ ( getattr( item, str_name ), item )
for item in getattr( owner, list_name ) ] )
self.roots = {}
self.available = {}
owner.on_trait_change( self._items_modified, list_name + '[]' )
def validate ( self, object, name, value ):
""" Ensures that a value being assigned to a trait is a unique string.
"""
if isinstance( value, basestring ):
names = self.names
old_name = getattr( object, name )
if names.get( old_name ) is object:
self._remove( old_name )
if value not in names:
names[ value ] = object
return value
available = self.available.get( value )
while True:
if available is None:
new_value = None
break
index = available.pop()
if len( available ) == 0:
del self.available[ value ]
available = None
new_value = '%s_%d' % ( value, index )
if new_value not in names:
break
if new_value is None:
self.roots[ value ] = index = \
self.roots.setdefault( value, 1 ) + 1
new_value = '%s_%d' % ( value, index )
names[ new_value ] = object
return new_value
self.error( object, name, value )
def _remove ( self, name ):
""" Removes a specified name.
"""
self.names.pop( name, None )
col = name.rfind( '_' )
if col >= 0:
try:
index = int( name[ col + 1: ] )
prefix = name[ : col ]
if prefix in self.roots:
if prefix not in self.available:
self.available[ prefix ] = set()
self.available[ prefix ].add( index )
except:
pass
def _items_modified ( self, object, name, removed, added ):
""" Handles items being added to or removed from the monitored list.
"""
str_name = self.str_name
str_type = self.str_type
ustr_type = self.ustr_type
for item in removed:
setattr( item, str_name, str_type )
self._remove( getattr( item, str_name ) )
for item in added:
setattr( item, str_name, ustr_type )
setattr( item, str_name, getattr( item, str_name ) )
#-------------------------------------------------------------------------------
# 'HasUniqueStrings' class:
#-------------------------------------------------------------------------------
class HasUniqueStrings ( HasTraits ):
""" Mixin or base class for objects containing lists with items containing
string valued traits that must be unique.
List traits within the class that contain items which have string traits
which must be unique should indicate this by attaching metadata of the
form::
unique_string = 'trait1, trait2, ..., traitn'
where each 'traiti' value is the name of a trait within each list item
that must contain unique string data.
For example::
usa = List( State, unique_string = 'name, abbreviation' )
"""
#-- Private Traits ---------------------------------------------------------
# List of UStr traits that have been attached to object list traits:
_ustr_traits = List
#-- HasTraits Object Initializer -------------------------------------------
def traits_init ( self ):
""" Adds any UStrMonitor objects to list traits with 'unique_string'
metadata.
"""
super( HasUniqueStrings, self ).traits_init()
for name, trait in self.traits( unique_string = is_str ).items():
for str_name in trait.unique_string.split( ',' ):
self._ustr_traits.append( UStr( self, name, str_name.strip() ) )
items = getattr( self, name )
if len( items ) > 0:
setattr( self, name, [] )
setattr( self, name, items )
| {
"content_hash": "1761d7d58f01285ac5075f2f59037cfb",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 80,
"avg_line_length": 35.98757763975155,
"alnum_prop": 0.49309630652399034,
"repo_name": "burnpanck/traits",
"id": "970aada5ea8a5c490930d5a0ed2c7bcb17d8349e",
"size": "6420",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "traits/ustr_trait.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "660"
},
{
"name": "C",
"bytes": "186780"
},
{
"name": "Python",
"bytes": "1085281"
}
],
"symlink_target": ""
} |
class ActiveSliceItemController(object):
def __init__(self, active_slice_item, model_part):
self._active_slice_item = active_slice_item
self._model_part = model_part
self.connectSignals()
def connectSignals(self):
a_s_i = self._active_slice_item
m_p = self._model_part
m_p.partActiveSliceResizeSignal.connect(a_s_i.updateRectSlot)
m_p.partActiveSliceIndexSignal.connect(a_s_i.updateIndexSlot)
m_p.partStrandChangedSignal.connect(a_s_i.strandChangedSlot)
# end def
def disconnectSignals(self):
a_s_i = self._active_slice_item
m_p = self._model_part
m_p.partActiveSliceResizeSignal.disconnect(a_s_i.updateRectSlot)
m_p.partActiveSliceIndexSignal.disconnect(a_s_i.updateIndexSlot)
m_p.partStrandChangedSignal.disconnect(a_s_i.strandChangedSlot)
# end def
# end class | {
"content_hash": "1dc1b26a16a8bdf131675a5cc9abdf7b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 37.125,
"alnum_prop": 0.6835016835016835,
"repo_name": "JMMolenaar/cadnano2.5",
"id": "36e14b2fef65135cba5b37e4df8d7f68f8dd22fb",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cadnano/gui/controllers/itemcontrollers/activesliceitemcontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2645"
},
{
"name": "Python",
"bytes": "1501551"
},
{
"name": "QMake",
"bytes": "3719"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import simplejson as json
from app.storage.storage_encryption import StorageEncryption
# pylint: disable=W0212
class TestStorageEncryption(TestCase):
def setUp(self):
super().setUp()
self.encrypter = StorageEncryption('user_id', 'user_ik', 'pepper')
def test_encrypted_storage_requires_user_id(self):
with self.assertRaises(ValueError):
StorageEncryption(None, 'key', 'pepper')
def test_encrypted_storage_requires_user_ik(self):
with self.assertRaises(ValueError):
StorageEncryption('1', None, 'pepper')
def test_generate_key(self):
key1 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key2 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key3 = StorageEncryption('user2', 'user_ik_2', 'pepper').key._key['k']
self.assertEqual(key1, key2)
self.assertNotEqual(key1, key3)
self.assertNotEqual(key2, key3)
def test_generate_key_different_user_ids(self):
key1 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key2 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key3 = StorageEncryption('user2', 'user_ik_1', 'pepper').key._key['k']
self.assertEqual(key1, key2)
self.assertNotEqual(key1, key3)
self.assertNotEqual(key2, key3)
def test_generate_key_different_user_iks(self):
key1 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key2 = StorageEncryption('user1', 'user_ik_1', 'pepper').key._key['k']
key3 = StorageEncryption('user1', 'user_ik_2', 'pepper').key._key['k']
self.assertEqual(key1, key2)
self.assertNotEqual(key1, key3)
self.assertNotEqual(key2, key3)
def test_encryption_decryption(self):
data = {
'data1': 'Test Data One',
'data2': 'Test Data Two'
}
encrypted_data = self.encrypter.encrypt_data(data)
self.assertNotEqual(encrypted_data, data)
self.assertIsInstance(encrypted_data, str)
decrypted_data = self.encrypter.decrypt_data(encrypted_data)
decrypted_data = json.loads(decrypted_data)
self.assertEqual(data, decrypted_data)
def test_no_pepper(self):
with self.assertRaises(ValueError):
self.encrypter = StorageEncryption('user_id', 'user_ik', None)
| {
"content_hash": "bc434b4695db008e911b39ff5f3f7402",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 39.90163934426229,
"alnum_prop": 0.637633525061627,
"repo_name": "ONSdigital/eq-survey-runner",
"id": "d8a2893b115ffd6a31ada54d2875ed9f9f619e98",
"size": "2434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/storage/test_storage_encryption.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "520"
},
{
"name": "HTML",
"bytes": "236859"
},
{
"name": "JavaScript",
"bytes": "423942"
},
{
"name": "Python",
"bytes": "1409591"
},
{
"name": "SCSS",
"bytes": "25858"
},
{
"name": "Shell",
"bytes": "10196"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'boilerplate.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
| {
"content_hash": "2c2e26bb7613c772657c32402f3317e0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 67,
"avg_line_length": 31.555555555555557,
"alnum_prop": 0.6461267605633803,
"repo_name": "vellonce/django1.6-bootstrap",
"id": "fffef42d5859e4e037b470c4e5a06cfda0d81d4a",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boilerplate/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4903"
}
],
"symlink_target": ""
} |
import errno
import os
import shutil
import struct
from binascii import hexlify, unhexlify
from collections import defaultdict
from configparser import ConfigParser
from datetime import datetime
from functools import partial
from itertools import islice
import msgpack
from .constants import * # NOQA
from .hashindex import NSIndex
from .helpers import Error, ErrorWithTraceback, IntegrityError, format_file_size, parse_file_size
from .helpers import Location
from .helpers import ProgressIndicatorPercent
from .helpers import bin_to_hex
from .helpers import hostname_is_unique
from .helpers import secure_erase, truncate_and_unlink
from .locking import Lock, LockError, LockErrorT
from .logger import create_logger
from .lrucache import LRUCache
from .platform import SaveFile, SyncFile, sync_dir, safe_fadvise
from .algorithms.checksums import crc32
from .crypto.file_integrity import IntegrityCheckedFile, FileIntegrityError
logger = create_logger(__name__)
MAGIC = b'BORG_SEG'
MAGIC_LEN = len(MAGIC)
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
LIST_SCAN_LIMIT = 100000 # repo.list() / .scan() result count limit the borg client uses
FreeSpace = partial(defaultdict, int)
class Repository:
"""
Filesystem based transactional key value store
Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
called segments. Each segment is a series of log entries. The segment number together with the offset of each
entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
time for the purposes of the log.
Log entries are either PUT, DELETE or COMMIT.
A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
When reading from a repository it is first checked whether the last segment is committed. If it is not, then
all segments after the last committed segment are deleted; they contain log entries whose consistency is not
established by a COMMIT.
Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
the platform (including the hardware). See platform.base.SyncFile for details.
A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
A DELETE marks a key as deleted.
For a given key only the last entry regarding the key, which is called current (all other entries are called
superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
Otherwise the last PUT defines the value of the key.
By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
such obsolete entries is called sparse, while a segment containing no such entries is called compact.
Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
superseded entries where current.
On disk layout:
dir/README
dir/config
dir/data/<X // SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
File system interaction
-----------------------
LoggedIO generally tries to rely on common behaviours across transactional file systems.
Segments that are deleted are truncated first, which avoids problems if the FS needs to
allocate space to delete the dirent of the segment. This mostly affects CoW file systems,
traditional journaling file systems have a fairly good grip on this problem.
Note that deletion, i.e. unlink(2), is atomic on every file system that uses inode reference
counts, which includes pretty much all of them. To remove a dirent the inodes refcount has
to be decreased, but you can't decrease the refcount before removing the dirent nor can you
decrease the refcount after removing the dirent. File systems solve this with a lock,
and by ensuring it all stays within the same FS transaction.
Truncation is generally not atomic in itself, and combining truncate(2) and unlink(2) is of
course never guaranteed to be atomic. Truncation in a classic extent-based FS is done in
roughly two phases, first the extents are removed then the inode is updated. (In practice
this is of course way more complex).
LoggedIO gracefully handles truncate/unlink splits as long as the truncate resulted in
a zero length file. Zero length segments are considered to not exist, while LoggedIO.cleanup()
will still get rid of them.
"""
class DoesNotExist(Error):
"""Repository {} does not exist."""
class AlreadyExists(Error):
"""A repository already exists at {}."""
class InvalidRepository(Error):
"""{} is not a valid repository. Check repo config."""
class CheckNeeded(ErrorWithTraceback):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(ErrorWithTraceback):
"""Object with key {} not found in repository {}."""
def __init__(self, id, repo):
if isinstance(id, bytes):
id = bin_to_hex(id)
super().__init__(id, repo)
class InsufficientFreeSpaceError(Error):
"""Insufficient free space to complete transaction (required: {}, available: {})."""
class StorageQuotaExceeded(Error):
"""The storage quota ({}) has been exceeded ({}). Try deleting some archives."""
def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True,
append_only=False, storage_quota=None):
self.path = os.path.abspath(path)
self._location = Location('file://%s' % self.path)
self.io = None # type: LoggedIO
self.lock = None
self.index = None
# This is an index of shadowed log entries during this transaction. Consider the following sequence:
# segment_n PUT A, segment_x DELETE A
# After the "DELETE A" in segment_x the shadow index will contain "A -> [n]".
self.shadow_index = {}
self._active_txn = False
self.lock_wait = lock_wait
self.do_lock = lock
self.do_create = create
self.created = False
self.exclusive = exclusive
self.append_only = append_only
self.storage_quota = storage_quota
self.storage_quota_use = 0
self.transaction_doomed = None
def __del__(self):
if self.lock:
self.close()
assert False, "cleanup happened in Repository.__del__"
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.path)
def __enter__(self):
if self.do_create:
self.do_create = False
self.create(self.path)
self.created = True
self.open(self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
no_space_left_on_device = exc_type is OSError and exc_val.errno == errno.ENOSPC
# The ENOSPC could have originated somewhere else besides the Repository. The cleanup is always safe, unless
# EIO or FS corruption ensues, which is why we specifically check for ENOSPC.
if self._active_txn and no_space_left_on_device:
logger.warning('No space left on device, cleaning up partial transaction to free space.')
cleanup = True
else:
cleanup = False
self._rollback(cleanup=cleanup)
self.close()
@property
def id_str(self):
return bin_to_hex(self.id)
def check_can_create_repository(self, path):
"""
Raise self.AlreadyExists if a repository already exists at *path* or any parent directory.
Checking parent directories is done for two reasons:
(1) It's just a weird thing to do, and usually not intended. A Borg using the "parent" repository
may be confused, or we may accidentally put stuff into the "data/" or "data/<n>/" directories.
(2) When implementing repository quotas (which we currently don't), it's important to prohibit
folks from creating quota-free repositories. Since no one can create a repository within another
repository, user's can only use the quota'd repository, when their --restrict-to-path points
at the user's repository.
"""
if os.path.exists(path) and (not os.path.isdir(path) or os.listdir(path)):
raise self.AlreadyExists(path)
while True:
# Check all parent directories for Borg's repository README
previous_path = path
# Thus, path = previous_path/..
path = os.path.abspath(os.path.join(previous_path, os.pardir))
if path == previous_path:
# We reached the root of the directory hierarchy (/.. = / and C:\.. = C:\).
break
try:
# Use binary mode to avoid troubles if a README contains some stuff not in our locale
with open(os.path.join(path, 'README'), 'rb') as fd:
# Read only the first ~100 bytes (if any), in case some README file we stumble upon is large.
readme_head = fd.read(100)
# The first comparison captures our current variant (REPOSITORY_README), the second comparison
# is an older variant of the README file (used by 1.0.x).
if b'Borg Backup repository' in readme_head or b'Borg repository' in readme_head:
raise self.AlreadyExists(path)
except OSError:
# Ignore FileNotFound, PermissionError, ...
pass
def create(self, path):
"""Create a new empty repository at `path`
"""
self.check_can_create_repository(path)
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write(REPOSITORY_README)
os.mkdir(os.path.join(path, 'data'))
config = ConfigParser(interpolation=None)
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', str(DEFAULT_SEGMENTS_PER_DIR))
config.set('repository', 'max_segment_size', str(DEFAULT_MAX_SEGMENT_SIZE))
config.set('repository', 'append_only', str(int(self.append_only)))
if self.storage_quota:
config.set('repository', 'storage_quota', str(self.storage_quota))
else:
config.set('repository', 'storage_quota', '0')
config.set('repository', 'additional_free_space', '0')
config.set('repository', 'id', bin_to_hex(os.urandom(32)))
self.save_config(path, config)
def save_config(self, path, config):
config_path = os.path.join(path, 'config')
old_config_path = os.path.join(path, 'config.old')
if os.path.isfile(old_config_path):
logger.warning("Old config file not securely erased on previous config update")
secure_erase(old_config_path)
if os.path.isfile(config_path):
try:
os.link(config_path, old_config_path)
except OSError as e:
if e.errno in (errno.EMLINK, errno.ENOSYS, errno.EPERM):
logger.warning("Hardlink failed, cannot securely erase old config file")
else:
raise
with SaveFile(config_path) as fd:
config.write(fd)
if os.path.isfile(old_config_path):
secure_erase(old_config_path)
def save_key(self, keydata):
assert self.config
keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
self.config.set('repository', 'key', keydata)
self.save_config(self.path, self.config)
def load_key(self):
keydata = self.config.get('repository', 'key')
return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
def get_free_nonce(self):
if not self.lock.got_exclusive_lock():
raise AssertionError("bug in code, exclusive lock should exist here")
nonce_path = os.path.join(self.path, 'nonce')
try:
with open(nonce_path, 'r') as fd:
return int.from_bytes(unhexlify(fd.read()), byteorder='big')
except FileNotFoundError:
return None
def commit_nonce_reservation(self, next_unreserved, start_nonce):
if not self.lock.got_exclusive_lock():
raise AssertionError("bug in code, exclusive lock should exist here")
if self.get_free_nonce() != start_nonce:
raise Exception("nonce space reservation with mismatched previous state")
nonce_path = os.path.join(self.path, 'nonce')
with SaveFile(nonce_path, binary=False) as fd:
fd.write(bin_to_hex(next_unreserved.to_bytes(8, byteorder='big')))
def destroy(self):
"""Destroy the repository at `self.path`
"""
if self.append_only:
raise ValueError(self.path + " is in append-only mode")
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
def get_index_transaction_id(self):
indices = sorted(int(fn[6:])
for fn in os.listdir(self.path)
if fn.startswith('index.') and fn[6:].isdigit() and os.stat(os.path.join(self.path, fn)).st_size != 0)
if indices:
return indices[-1]
else:
return None
def check_transaction(self):
index_transaction_id = self.get_index_transaction_id()
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
# we have a transaction id from the index, but we did not find *any*
# commit in the segment files (thus no segments transaction id).
# this can happen if a lot of segment files are lost, e.g. due to a
# filesystem or hardware malfunction. it means we have no identifiable
# valid (committed) state of the repo which we could use.
msg = '%s" - although likely this is "beyond repair' % self.path # dirty hack
raise self.CheckNeeded(msg)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
def get_transaction_id(self):
self.check_transaction()
return self.get_index_transaction_id()
def break_lock(self):
Lock(os.path.join(self.path, 'lock')).break_lock()
def open(self, path, exclusive, lock_wait=None, lock=True):
self.path = path
if not os.path.isdir(path):
raise self.DoesNotExist(path)
if lock:
self.lock = Lock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait, kill_stale_locks=hostname_is_unique()).acquire()
else:
self.lock = None
self.config = ConfigParser(interpolation=None)
self.config.read(os.path.join(self.path, 'config'))
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
self.close()
raise self.InvalidRepository(path)
self.max_segment_size = self.config.getint('repository', 'max_segment_size')
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.additional_free_space = parse_file_size(self.config.get('repository', 'additional_free_space', fallback=0))
# append_only can be set in the constructor
# it shouldn't be overridden (True -> False) here
self.append_only = self.append_only or self.config.getboolean('repository', 'append_only', fallback=False)
if self.storage_quota is None:
# self.storage_quota is None => no explicit storage_quota was specified, use repository setting.
self.storage_quota = self.config.getint('repository', 'storage_quota', fallback=0)
self.id = unhexlify(self.config.get('repository', 'id').strip())
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
def commit(self, save_space=False):
"""Commit transaction
"""
# save_space is not used anymore, but stays for RPC/API compatibility.
if self.transaction_doomed:
exception = self.transaction_doomed
self.rollback()
raise exception
self.check_free_space()
self.log_storage_quota()
self.io.write_commit()
if not self.append_only:
self.compact_segments()
self.write_index()
self.rollback()
def _read_integrity(self, transaction_id, key):
integrity_file = 'integrity.%d' % transaction_id
integrity_path = os.path.join(self.path, integrity_file)
try:
with open(integrity_path, 'rb') as fd:
integrity = msgpack.unpack(fd)
except FileNotFoundError:
return
if integrity.get(b'version') != 2:
logger.warning('Unknown integrity data version %r in %s', integrity.get(b'version'), integrity_file)
return
return integrity[key].decode()
def open_index(self, transaction_id, auto_recover=True):
if transaction_id is None:
return NSIndex()
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
integrity_data = self._read_integrity(transaction_id, b'index')
try:
with IntegrityCheckedFile(index_path, write=False, integrity_data=integrity_data) as fd:
return NSIndex.read(fd)
except (ValueError, OSError, FileIntegrityError) as exc:
logger.warning('Repository index missing or corrupted, trying to recover from: %s', exc)
os.unlink(index_path)
if not auto_recover:
raise
self.prepare_txn(self.get_transaction_id())
# don't leave an open transaction around
self.commit()
return self.open_index(self.get_transaction_id())
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
if not self.lock.got_exclusive_lock():
if self.exclusive is not None:
# self.exclusive is either True or False, thus a new client is active here.
# if it is False and we get here, the caller did not use exclusive=True although
# it is needed for a write operation. if it is True and we get here, something else
# went very wrong, because we should have a exclusive lock, but we don't.
raise AssertionError("bug in code, exclusive lock should exist here")
# if we are here, this is an old client talking to a new server (expecting lock upgrade).
# or we are replaying segments and might need a lock upgrade for that.
try:
self.lock.upgrade()
except (LockError, LockErrorT):
# if upgrading the lock to exclusive fails, we do not have an
# active transaction. this is important for "serve" mode, where
# the repository instance lives on - even if exceptions happened.
self._active_txn = False
raise
if not self.index or transaction_id is None:
try:
self.index = self.open_index(transaction_id, auto_recover=False)
except (ValueError, OSError, FileIntegrityError) as exc:
logger.warning('Checking repository transaction due to previous error: %s', exc)
self.check_transaction()
self.index = self.open_index(transaction_id, auto_recover=False)
if transaction_id is None:
self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
self.storage_quota_use = 0
self.shadow_index.clear()
else:
if do_cleanup:
self.io.cleanup(transaction_id)
hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
integrity_data = self._read_integrity(transaction_id, b'hints')
try:
with IntegrityCheckedFile(hints_path, write=False, integrity_data=integrity_data) as fd:
hints = msgpack.unpack(fd)
except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError, FileIntegrityError) as e:
logger.warning('Repository hints file missing or corrupted, trying to recover: %s', e)
if not isinstance(e, FileNotFoundError):
os.unlink(hints_path)
# index must exist at this point
os.unlink(index_path)
self.check_transaction()
self.prepare_txn(transaction_id)
return
if hints[b'version'] == 1:
logger.debug('Upgrading from v1 hints.%d', transaction_id)
self.segments = hints[b'segments']
self.compact = FreeSpace()
self.storage_quota_use = 0
for segment in sorted(hints[b'compact']):
logger.debug('Rebuilding sparse info for segment %d', segment)
self._rebuild_sparse(segment)
logger.debug('Upgrade to v2 hints complete')
elif hints[b'version'] != 2:
raise ValueError('Unknown hints file version: %d' % hints[b'version'])
else:
self.segments = hints[b'segments']
self.compact = FreeSpace(hints[b'compact'])
self.storage_quota_use = hints.get(b'storage_quota_use', 0)
self.log_storage_quota()
# Drop uncommitted segments in the shadow index
for key, shadowed_segments in self.shadow_index.items():
for segment in list(shadowed_segments):
if segment > transaction_id:
shadowed_segments.remove(segment)
def write_index(self):
def flush_and_sync(fd):
fd.flush()
os.fsync(fd.fileno())
def rename_tmp(file):
os.rename(file + '.tmp', file)
hints = {
b'version': 2,
b'segments': self.segments,
b'compact': self.compact,
b'storage_quota_use': self.storage_quota_use,
}
integrity = {
# Integrity version started at 2, the current hints version.
# Thus, integrity version == hints version, for now.
b'version': 2,
}
transaction_id = self.io.get_segments_transaction_id()
assert transaction_id is not None
# Log transaction in append-only mode
if self.append_only:
with open(os.path.join(self.path, 'transactions'), 'a') as log:
print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
# Write hints file
hints_name = 'hints.%d' % transaction_id
hints_file = os.path.join(self.path, hints_name)
with IntegrityCheckedFile(hints_file + '.tmp', filename=hints_name, write=True) as fd:
msgpack.pack(hints, fd)
flush_and_sync(fd)
integrity[b'hints'] = fd.integrity_data
# Write repository index
index_name = 'index.%d' % transaction_id
index_file = os.path.join(self.path, index_name)
with IntegrityCheckedFile(index_file + '.tmp', filename=index_name, write=True) as fd:
# XXX: Consider using SyncFile for index write-outs.
self.index.write(fd)
flush_and_sync(fd)
integrity[b'index'] = fd.integrity_data
# Write integrity file, containing checksums of the hints and index files
integrity_name = 'integrity.%d' % transaction_id
integrity_file = os.path.join(self.path, integrity_name)
with open(integrity_file + '.tmp', 'wb') as fd:
msgpack.pack(integrity, fd)
flush_and_sync(fd)
# Rename the integrity file first
rename_tmp(integrity_file)
sync_dir(self.path)
# Rename the others after the integrity file is hypothetically on disk
rename_tmp(hints_file)
rename_tmp(index_file)
sync_dir(self.path)
# Remove old auxiliary files
current = '.%d' % transaction_id
for name in os.listdir(self.path):
if not name.startswith(('index.', 'hints.', 'integrity.')):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
def check_free_space(self):
"""Pre-commit check for sufficient free space to actually perform the commit."""
# As a baseline we take four times the current (on-disk) index size.
# At this point the index may only be updated by compaction, which won't resize it.
# We still apply a factor of four so that a later, separate invocation can free space
# (journaling all deletes for all chunks is one index size) or still make minor additions
# (which may grow the index up to twice its current size).
# Note that in a subsequent operation the committed index is still on-disk, therefore we
# arrive at index_size * (1 + 2 + 1).
# In that order: journaled deletes (1), hashtable growth (2), persisted index (1).
required_free_space = self.index.size() * 4
# Conservatively estimate hints file size:
# 10 bytes for each segment-refcount pair, 10 bytes for each segment-space pair
# Assume maximum of 5 bytes per integer. Segment numbers will usually be packed more densely (1-3 bytes),
# as will refcounts and free space integers. For 5 MiB segments this estimate is good to ~20 PB repo size.
# Add 4K to generously account for constant format overhead.
hints_size = len(self.segments) * 10 + len(self.compact) * 10 + 4096
required_free_space += hints_size
required_free_space += self.additional_free_space
if not self.append_only:
full_segment_size = self.max_segment_size + MAX_OBJECT_SIZE
if len(self.compact) < 10:
# This is mostly for the test suite to avoid overestimated free space needs. This can be annoying
# if TMP is a small-ish tmpfs.
compact_working_space = sum(self.io.segment_size(segment) - free for segment, free in self.compact.items())
logger.debug('check_free_space: few segments, not requiring a full free segment')
compact_working_space = min(compact_working_space, full_segment_size)
logger.debug('check_free_space: calculated working space for compact as %d bytes', compact_working_space)
required_free_space += compact_working_space
else:
# Keep one full worst-case segment free in non-append-only mode
required_free_space += full_segment_size
try:
st_vfs = os.statvfs(self.path)
except OSError as os_error:
logger.warning('Failed to check free space before committing: ' + str(os_error))
return
# f_bavail: even as root - don't touch the Federal Block Reserve!
free_space = st_vfs.f_bavail * st_vfs.f_bsize
logger.debug('check_free_space: required bytes {}, free bytes {}'.format(required_free_space, free_space))
if free_space < required_free_space:
if self.created:
logger.error('Not enough free space to initialize repository at this location.')
self.destroy()
else:
self._rollback(cleanup=True)
formatted_required = format_file_size(required_free_space)
formatted_free = format_file_size(free_space)
raise self.InsufficientFreeSpaceError(formatted_required, formatted_free)
def log_storage_quota(self):
if self.storage_quota:
logger.info('Storage quota: %s out of %s used.',
format_file_size(self.storage_quota_use), format_file_size(self.storage_quota))
def compact_segments(self):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
segments = self.segments
unused = [] # list of segments, that are not used anymore
logger = create_logger('borg.debug.compact_segments')
def complete_xfer(intermediate=True):
# complete the current transfer (when some target segment is full)
nonlocal unused
# commit the new, compact, used segments
segment = self.io.write_commit(intermediate=intermediate)
logger.debug('complete_xfer: wrote %scommit at segment %d', 'intermediate ' if intermediate else '', segment)
# get rid of the old, sparse, unused segments. free space.
for segment in unused:
logger.debug('complete_xfer: deleting unused segment %d', segment)
assert self.segments.pop(segment) == 0, 'Corrupted segment reference count - corrupted index or hints'
self.io.delete_segment(segment)
del self.compact[segment]
unused = []
logger.debug('compaction started.')
pi = ProgressIndicatorPercent(total=len(self.compact), msg='Compacting segments %3.0f%%', step=1,
msgid='repository.compact_segments')
for segment, freeable_space in sorted(self.compact.items()):
if not self.io.segment_exists(segment):
logger.warning('segment %d not found, but listed in compaction data', segment)
del self.compact[segment]
pi.show()
continue
segment_size = self.io.segment_size(segment)
if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
logger.debug('not compacting segment %d (only %d bytes are sparse)', segment, freeable_space)
pi.show()
continue
segments.setdefault(segment, 0)
logger.debug('compacting segment %d with usage count %d and %d freeable bytes',
segment, segments[segment], freeable_space)
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_COMMIT:
continue
in_index = self.index.get(key)
is_index_object = in_index == (segment, offset)
if tag == TAG_PUT and is_index_object:
try:
new_segment, offset = self.io.write_put(key, data, raise_full=True)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_PUT and not is_index_object:
# If this is a PUT shadowed by a later tag, then it will be gone when this segment is deleted after
# this loop. Therefore it is removed from the shadow index.
try:
self.shadow_index[key].remove(segment)
except (KeyError, ValueError):
pass
elif tag == TAG_DELETE and not in_index:
# If the shadow index doesn't contain this key, then we can't say if there's a shadowed older tag,
# therefore we do not drop the delete, but write it to a current segment.
shadowed_put_exists = key not in self.shadow_index or any(
# If the key is in the shadow index and there is any segment with an older PUT of this
# key, we have a shadowed put.
shadowed < segment for shadowed in self.shadow_index[key])
delete_is_not_stable = index_transaction_id is None or segment > index_transaction_id
if shadowed_put_exists or delete_is_not_stable:
# (introduced in 6425d16aa84be1eaaf88)
# This is needed to avoid object un-deletion if we crash between the commit and the deletion
# of old segments in complete_xfer().
#
# However, this only happens if the crash also affects the FS to the effect that file deletions
# did not materialize consistently after journal recovery. If they always materialize in-order
# then this is not a problem, because the old segment containing a deleted object would be deleted
# before the segment containing the delete.
#
# Consider the following series of operations if we would not do this, ie. this entire if:
# would be removed.
# Columns are segments, lines are different keys (line 1 = some key, line 2 = some other key)
# Legend: P=TAG_PUT, D=TAG_DELETE, c=commit, i=index is written for latest commit
#
# Segment | 1 | 2 | 3
# --------+-------+-----+------
# Key 1 | P | D |
# Key 2 | P | | P
# commits | c i | c | c i
# --------+-------+-----+------
# ^- compact_segments starts
# ^- complete_xfer commits, after that complete_xfer deletes
# segments 1 and 2 (and then the index would be written).
#
# Now we crash. But only segment 2 gets deleted, while segment 1 is still around. Now key 1
# is suddenly undeleted (because the delete in segment 2 is now missing).
# Again, note the requirement here. We delete these in the correct order that this doesn't happen,
# and only if the FS materialization of these deletes is reordered or parts dropped this can happen.
# In this case it doesn't cause outright corruption, 'just' an index count mismatch, which will be
# fixed by borg-check --repair.
#
# Note that in this check the index state is the proxy for a "most definitely settled" repository state,
# ie. the assumption is that *all* operations on segments <= index state are completed and stable.
try:
new_segment, size = self.io.write_delete(key, raise_full=True)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, size = self.io.write_delete(key)
self.compact[new_segment] += size
segments.setdefault(new_segment, 0)
assert segments[segment] == 0, 'Corrupted segment reference count - corrupted index or hints'
unused.append(segment)
pi.show()
pi.finish()
complete_xfer(intermediate=False)
logger.debug('compaction completed.')
def replay_segments(self, index_transaction_id, segments_transaction_id):
# fake an old client, so that in case we do not have an exclusive lock yet, prepare_txn will upgrade the lock:
remember_exclusive = self.exclusive
self.exclusive = None
self.prepare_txn(index_transaction_id, do_cleanup=False)
try:
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg='Replaying segments %3.0f%%',
msgid='repository.replay_segments')
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
objects = self.io.iter_objects(segment)
self._update_index(segment, objects)
pi.finish()
self.write_index()
finally:
self.exclusive = remember_exclusive
self.rollback()
def _update_index(self, segment, objects, report=None):
"""some code shared between replay_segments and check"""
self.segments[segment] = 0
for tag, key, offset, size in objects:
if tag == TAG_PUT:
try:
# If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
s, _ = self.index[key]
self.compact[s] += size
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
self.storage_quota_use += size
elif tag == TAG_DELETE:
try:
# if the deleted PUT is not in the index, there is nothing to clean up
s, offset = self.index.pop(key)
except KeyError:
pass
else:
if self.io.segment_exists(s):
# the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
# is already gone, then it was already compacted.
self.segments[s] -= 1
size = self.io.read(s, offset, key, read_data=False)
self.storage_quota_use -= size
self.compact[s] += size
elif tag == TAG_COMMIT:
continue
else:
msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
if report is None:
raise self.CheckNeeded(msg)
else:
report(msg)
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
def _rebuild_sparse(self, segment):
"""Rebuild sparse bytes count for a single segment relative to the current index."""
self.compact[segment] = 0
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
return
for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
if tag == TAG_PUT:
if self.index.get(key, (-1, -1)) != (segment, offset):
# This PUT is superseded later
self.compact[segment] += size
elif tag == TAG_DELETE:
# The outcome of the DELETE has been recorded in the PUT branch already
self.compact[segment] += size
def check(self, repair=False, save_space=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
if self.append_only and repair:
raise ValueError(self.path + " is in append-only mode")
error_found = False
def report_error(msg):
nonlocal error_found
error_found = True
logger.error(msg)
logger.info('Starting repository check')
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
logger.debug('Read committed index of transaction %d', transaction_id)
except Exception as exc:
transaction_id = self.io.get_segments_transaction_id()
current_index = None
logger.debug('Failed to read committed index (%s)', exc)
if transaction_id is None:
logger.debug('No segments transaction found')
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
logger.debug('No index transaction found, trying latest segment')
transaction_id = self.io.get_latest_segment()
if transaction_id is None:
report_error('This repository contains no valid data.')
return False
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
logger.debug('Segment transaction is %s', segments_transaction_id)
logger.debug('Determined transaction is %s', transaction_id)
self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
segment_count = sum(1 for _ in self.io.segment_iterator())
logger.debug('Found %d segments', segment_count)
pi = ProgressIndicatorPercent(total=segment_count, msg='Checking segments %3.1f%%', step=0.1,
msgid='repository.check')
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if segment > transaction_id:
continue
try:
objects = list(self.io.iter_objects(segment))
except IntegrityError as err:
report_error(str(err))
objects = []
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self._update_index(segment, objects, report_error)
pi.finish()
# self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
logger.info('Starting repository index check')
if current_index and not repair:
# current_index = "as found on disk"
# self.index = "as rebuilt in-memory from segments"
if len(current_index) != len(self.index):
report_error('Index object count mismatch.')
logger.error('committed index: %d objects', len(current_index))
logger.error('rebuilt index: %d objects', len(self.index))
line_format = '%-64s %-16s %-16s'
not_found = '<not found>'
logger.warning(line_format, 'ID', 'rebuilt index', 'committed index')
for key, value in self.index.iteritems():
current_value = current_index.get(key, not_found)
if current_value != value:
logger.warning(line_format, bin_to_hex(key), value, current_value)
for key, current_value in current_index.iteritems():
if key in self.index:
continue
value = self.index.get(key, not_found)
if current_value != value:
logger.warning(line_format, bin_to_hex(key), value, current_value)
elif current_index:
for key, value in self.index.iteritems():
if current_index.get(key, (-1, -1)) != value:
report_error('Index mismatch for key {}. {} != {}'.format(key, value, current_index.get(key, (-1, -1))))
if repair:
self.compact_segments()
self.write_index()
self.rollback()
if error_found:
if repair:
logger.info('Completed repository check, errors found and repaired.')
else:
logger.error('Completed repository check, errors found.')
else:
logger.info('Completed repository check, no problems found.')
return not error_found or repair
def _rollback(self, *, cleanup):
"""
"""
if cleanup:
self.io.cleanup(self.io.get_segments_transaction_id())
self.index = None
self._active_txn = False
self.transaction_doomed = None
def rollback(self):
# note: when used in remote mode, this is time limited, see RemoteRepository.shutdown_time.
self._rollback(cleanup=False)
def __len__(self):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return len(self.index)
def __contains__(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return id in self.index
def list(self, limit=None, marker=None):
"""
list <limit> IDs starting from after id <marker> - in index (pseudo-random) order.
"""
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
def scan(self, limit=None, marker=None):
"""
list <limit> IDs starting from after id <marker> - in on-disk order, so that a client
fetching data in this order does linear reads and reuses stuff from disk cache.
We rely on repository.check() has run already (either now or some time before) and that:
- if we are called from a borg check command, self.index is a valid, fresh, in-sync repo index.
- if we are called from elsewhere, either self.index or the on-disk index is valid and in-sync.
- the repository segments are valid (no CRC errors).
if we encounter CRC errors in segment entry headers, rest of segment is skipped.
"""
if limit is not None and limit < 1:
raise ValueError('please use limit > 0 or limit = None')
if not self.index:
transaction_id = self.get_transaction_id()
self.index = self.open_index(transaction_id)
at_start = marker is None
# smallest valid seg is <uint32> 0, smallest valid offs is <uint32> 8
start_segment, start_offset = (0, 0) if at_start else self.index[marker]
result = []
for segment, filename in self.io.segment_iterator(start_segment):
obj_iterator = self.io.iter_objects(segment, start_offset, read_data=False, include_data=False)
while True:
try:
tag, id, offset, size = next(obj_iterator)
except (StopIteration, IntegrityError):
# either end-of-segment or an error - we can not seek to objects at
# higher offsets than one that has an error in the header fields
break
if start_offset > 0:
# we are using a marker and the marker points to the last object we have already
# returned in the previous scan() call - thus, we need to skip this one object.
# also, for the next segment, we need to start at offset 0.
start_offset = 0
continue
if tag == TAG_PUT and (segment, offset) == self.index.get(id):
# we have found an existing and current object
result.append(id)
if len(result) == limit:
return result
return result
def get(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
try:
segment, offset = self.index[id]
return self.io.read(segment, offset, id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
def put(self, id, data, wait=True):
"""put a repo object
Note: when doing calls with wait=False this gets async and caller must
deal with async results / exceptions later.
"""
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index[id]
except KeyError:
pass
else:
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.storage_quota_use -= size
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
segment, offset = self.io.write_put(id, data)
self.storage_quota_use += len(data) + self.io.put_header_fmt.size
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
if self.storage_quota and self.storage_quota_use > self.storage_quota:
self.transaction_doomed = self.StorageQuotaExceeded(
format_file_size(self.storage_quota), format_file_size(self.storage_quota_use))
raise self.transaction_doomed
def delete(self, id, wait=True):
"""delete a repo object
Note: when doing calls with wait=False this gets async and caller must
deal with async results / exceptions later.
"""
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
self.shadow_index.setdefault(id, []).append(segment)
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.storage_quota_use -= size
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
def async_response(self, wait=True):
"""Get one async result (only applies to remote repositories).
async commands (== calls with wait=False, e.g. delete and put) have no results,
but may raise exceptions. These async exceptions must get collected later via
async_response() calls. Repeat the call until it returns None.
The previous calls might either return one (non-None) result or raise an exception.
If wait=True is given and there are outstanding responses, it will wait for them
to arrive. With wait=False, it will only return already received responses.
"""
def preload(self, ids):
"""Preload objects (only applies to remote repositories)
"""
class LoggedIO:
class SegmentFull(Exception):
"""raised when a segment is full, before opening next"""
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
self.path = path
self.fds = LRUCache(capacity,
dispose=self.close_fd)
self.segment = 0
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
def close(self):
self.close_segment()
self.fds.clear()
self.fds = None # Just to make sure we're disabled
def close_fd(self, fd):
safe_fadvise(fd.fileno(), 0, 0, 'DONTNEED')
fd.close()
def segment_iterator(self, segment=None, reverse=False):
if segment is None:
segment = 0 if not reverse else 2 ** 32 - 1
data_path = os.path.join(self.path, 'data')
start_segment_dir = segment // self.segments_per_dir
dirs = os.listdir(data_path)
if not reverse:
dirs = [dir for dir in dirs if dir.isdigit() and int(dir) >= start_segment_dir]
else:
dirs = [dir for dir in dirs if dir.isdigit() and int(dir) <= start_segment_dir]
dirs = sorted(dirs, key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
if not reverse:
filenames = [filename for filename in filenames if filename.isdigit() and int(filename) >= segment]
else:
filenames = [filename for filename in filenames if filename.isdigit() and int(filename) <= segment]
filenames = sorted(filenames, key=int, reverse=reverse)
for filename in filenames:
# Note: Do not filter out logically deleted segments (see "File system interaction" above),
# since this is used by cleanup and txn state detection as well.
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
def get_segments_transaction_id(self):
"""Return the last committed segment.
"""
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(segment):
return segment
return None
def cleanup(self, transaction_id):
"""Delete segment files left by aborted transactions
"""
self.segment = transaction_id + 1
for segment, filename in self.segment_iterator(reverse=True):
if segment > transaction_id:
truncate_and_unlink(filename)
else:
break
def is_committed_segment(self, segment):
"""Check if segment ends with a COMMIT_TAG tag
"""
try:
iterator = self.iter_objects(segment)
except IntegrityError:
return False
with open(self.segment_filename(segment), 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
if fd.read(self.header_fmt.size) != self.COMMIT:
return False
seen_commit = False
while True:
try:
tag, key, offset, _ = next(iterator)
except IntegrityError:
return False
except StopIteration:
break
if tag == TAG_COMMIT:
seen_commit = True
continue
if seen_commit:
return False
return seen_commit
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False, raise_full=False):
if not no_new and self.offset and self.offset > self.limit:
if raise_full:
raise self.SegmentFull
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
sync_dir(os.path.join(self.path, 'data'))
self._write_fd = SyncFile(self.segment_filename(self.segment), binary=True)
self._write_fd.write(MAGIC)
self.offset = MAGIC_LEN
return self._write_fd
def get_fd(self, segment):
try:
return self.fds[segment]
except KeyError:
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = fd
return fd
def close_segment(self):
# set self._write_fd to None early to guard against reentry from error handling code paths:
fd, self._write_fd = self._write_fd, None
if fd is not None:
self.segment += 1
self.offset = 0
fd.close()
def delete_segment(self, segment):
if segment in self.fds:
del self.fds[segment]
try:
truncate_and_unlink(self.segment_filename(segment))
except FileNotFoundError:
pass
def segment_exists(self, segment):
filename = self.segment_filename(segment)
# When deleting segments, they are first truncated. If truncate(2) and unlink(2) are split
# across FS transactions, then logically deleted segments will show up as truncated.
return os.path.exists(filename) and os.path.getsize(filename)
def segment_size(self, segment):
return os.path.getsize(self.segment_filename(segment))
def iter_objects(self, segment, offset=0, include_data=False, read_data=True):
"""
Return object iterator for *segment*.
If read_data is False then include_data must be False as well.
Integrity checks are skipped: all data obtained from the iterator must be considered informational.
The iterator returns four-tuples of (tag, key, offset, data|size).
"""
fd = self.get_fd(segment)
fd.seek(offset)
if offset == 0:
# we are touching this segment for the first time, check the MAGIC.
# Repository.scan() calls us with segment > 0 when it continues an ongoing iteration
# from a marker position - but then we have checked the magic before already.
if fd.read(MAGIC_LEN) != MAGIC:
raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
offset = MAGIC_LEN
header = fd.read(self.header_fmt.size)
while header:
size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
(TAG_PUT, TAG_DELETE, TAG_COMMIT),
read_data=read_data)
if include_data:
yield tag, key, offset, data
else:
yield tag, key, offset, size
offset += size
# we must get the fd via get_fd() here again as we yielded to our caller and it might
# have triggered closing of the fd we had before (e.g. by calling io.read() for
# different segment(s)).
# by calling get_fd() here again we also make our fd "recently used" so it likely
# does not get kicked out of self.fds LRUcache.
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.header_fmt.size)
def recover_segment(self, segment, filename):
if segment in self.fds:
del self.fds[segment]
with open(filename, 'rb') as fd:
# XXX: Rather use mmap, this loads the entire segment (up to 500 MB by default) into memory.
data = memoryview(fd.read())
os.rename(filename, filename + '.beforerecover')
logger.info('attempting to recover ' + filename)
with open(filename, 'wb') as fd:
fd.write(MAGIC)
while len(data) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(data[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(data):
data = data[1:]
continue
if crc32(data[4:size]) & 0xffffffff != crc:
data = data[1:]
continue
fd.write(data[:size])
data = data[size:]
def read(self, segment, offset, id, read_data=True):
"""
Read entry from *segment* at *offset* with *id*.
If read_data is False the size of the entry is returned instead and integrity checks are skipped.
The return value should thus be considered informational.
"""
if segment == self.segment and self._write_fd:
self._write_fd.sync()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
if id != key:
raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
segment, offset))
return data if read_data else size
def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
# some code shared by read() and iter_objects()
try:
hdr_tuple = fmt.unpack(header)
except struct.error as err:
raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
segment, offset, err)) from None
if fmt is self.put_header_fmt:
crc, size, tag, key = hdr_tuple
elif fmt is self.header_fmt:
crc, size, tag = hdr_tuple
key = None
else:
raise TypeError("_read called with unsupported format")
if size > MAX_OBJECT_SIZE:
# if you get this on an archive made with borg < 1.0.7 and millions of files and
# you need to restore it, you can disable this check by using "if False:" above.
raise IntegrityError('Invalid segment entry size {} - too big [segment {}, offset {}]'.format(
size, segment, offset))
if size < fmt.size:
raise IntegrityError('Invalid segment entry size {} - too small [segment {}, offset {}]'.format(
size, segment, offset))
length = size - fmt.size
if read_data:
data = fd.read(length)
if len(data) != length:
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, len(data)))
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
segment, offset))
if key is None and tag in (TAG_PUT, TAG_DELETE):
key, data = data[:32], data[32:]
else:
if key is None and tag in (TAG_PUT, TAG_DELETE):
key = fd.read(32)
length -= 32
if len(key) != 32:
raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, 32, len(key)))
oldpos = fd.tell()
seeked = fd.seek(length, os.SEEK_CUR) - oldpos
data = None
if seeked != length:
raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, seeked))
if tag not in acceptable_tags:
raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
segment, offset))
return size, tag, key, data
def write_put(self, id, data, raise_full=False):
data_size = len(data)
if data_size > MAX_DATA_SIZE:
# this would push the segment entry size beyond MAX_OBJECT_SIZE.
raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
fd = self.get_write_fd(raise_full=raise_full)
size = data_size + self.put_header_fmt.size
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment, self.put_header_fmt.size
def write_commit(self, intermediate=False):
if intermediate:
# Intermediate commits go directly into the current segment - this makes checking their validity more
# expensive, but is faster and reduces clobber.
fd = self.get_write_fd()
fd.sync()
else:
self.close_segment()
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
return self.segment - 1 # close_segment() increments it
# MAX_OBJECT_SIZE = <20 MiB (MAX_DATA_SIZE) + 41 bytes for a Repository PUT header, which consists of
# a 1 byte tag ID, 4 byte CRC, 4 byte size and 32 bytes for the ID.
MAX_OBJECT_SIZE = MAX_DATA_SIZE + LoggedIO.put_header_fmt.size
assert MAX_OBJECT_SIZE == 20971520 == 20 * 1024 * 1024
| {
"content_hash": "20c1a5c66bb6cd7c4bf7527198c5e886",
"timestamp": "",
"source": "github",
"line_count": 1417,
"max_line_length": 135,
"avg_line_length": 47.91037402964008,
"alnum_prop": 0.5914802103433546,
"repo_name": "edgewood/borg",
"id": "f73e9cf54c9d29ca1072783b9476bd95c10953e6",
"size": "67889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/borg/repository.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "169469"
},
{
"name": "HTML",
"bytes": "66027"
},
{
"name": "Python",
"bytes": "1245891"
},
{
"name": "Shell",
"bytes": "2192"
}
],
"symlink_target": ""
} |
import os
from asyncio import ensure_future
from aiopg.sa import create_engine
from sqlalchemy import (
Column,
Integer,
MetaData,
String,
Table,
)
from urllib import parse
# postgres is not a standard urllib.parse URL
parse.uses_netloc.append("postgres")
metadata = MetaData()
player_stats = Table(
'player_stats',
metadata,
Column('id', Integer, primary_key=True),
Column('open_ts', Integer),
Column('close_ts', Integer),
Column('state', String),
Column('game_id', String),
)
async def create_player_stats_table(conn):
return await conn.execute('''CREATE TABLE IF NOT EXISTS player_stats (
id serial PRIMARY KEY,
open_ts bigint DEFAULT NULL,
close_ts bigint DEFAULT NULL,
state varchar(255) DEFAULT NULL
)''')
async def add_game_id_player_stats(conn):
return await conn.execute('''ALTER TABLE player_stats ADD COLUMN IF NOT EXISTS game_id varchar(255) DEFAULT NULL;
''')
async def async_db_call(fn):
url = parse.urlparse(os.environ.get("DATABASE_URL", "postgres://localhost:5432/supers"))
engine_attrs = {
'database': url.path[1:],
'user': url.username,
'password': url.password,
'host': url.hostname,
'port': url.port,
}
async with create_engine(**engine_attrs) as engine:
async with engine.acquire() as conn:
return await fn(conn)
def setup_and_migrate_db(ioloop):
return all([
ioloop.run_until_complete(ensure_future(async_db_call(create_player_stats_table))),
ioloop.run_until_complete(ensure_future(async_db_call(add_game_id_player_stats))),
])
| {
"content_hash": "826d8b9073ecf74cf7bd340b47af2271",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 117,
"avg_line_length": 27.633333333333333,
"alnum_prop": 0.6550060313630881,
"repo_name": "philkjacobs/superlatives",
"id": "4a21aaa9bcf7f814d085fc8bb7e0a9514be1fc75",
"size": "1658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4701"
},
{
"name": "HTML",
"bytes": "691"
},
{
"name": "JavaScript",
"bytes": "38380"
},
{
"name": "Python",
"bytes": "18605"
}
],
"symlink_target": ""
} |
import os
import unittest
import archinfo
import ailment
import angr
from angr.analyses.decompiler.peephole_optimizations import ConstantDereferences
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
class TestPeepholeOptimizations(unittest.TestCase):
def test_constant_dereference(self):
# a = *(A) :=> a = the variable at at A iff
# - A is a pointer that points to a read-only section.
proj = angr.Project(os.path.join(test_location, "armel", "decompiler", "rm"), auto_load_libs=False)
expr = ailment.Expr.Load(None, ailment.Expr.Const(None, None, 0xa000, proj.arch.bits),
proj.arch.bytes, archinfo.Endness.LE, ins_addr=0x400100)
opt = ConstantDereferences(proj, proj.kb, 0)
optimized = opt.optimize(expr)
assert isinstance(optimized, ailment.Const)
assert optimized.value == 0x183f8
assert optimized.tags.get('ins_addr', None) == 0x400100, "Peephole optimizer lost tags."
# multiple cases that no optimization should happen
# a. Loading a pointer from a writable location
expr = ailment.Expr.Load(None, ailment.Expr.Const(None, None, 0x21df4, proj.arch.bits), 1, archinfo.Endness.LE)
opt = ConstantDereferences(proj, proj.kb, 0)
optimized = opt.optimize(expr)
assert optimized is None
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "5691288783a49ba3e7ac6784a1d1bd39",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 119,
"avg_line_length": 39.432432432432435,
"alnum_prop": 0.6641535298149417,
"repo_name": "angr/angr",
"id": "4275177baedf41f1ab31ef0704dfda58eb058f5e",
"size": "1512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_peephole_optimizations.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
} |
"""Helpers for LCN component."""
from __future__ import annotations
import asyncio
from copy import deepcopy
from itertools import chain
import re
from typing import Tuple, Type, Union, cast
import pypck
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COVERS,
CONF_DEVICES,
CONF_DOMAIN,
CONF_ENTITIES,
CONF_HOST,
CONF_IP_ADDRESS,
CONF_LIGHTS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSORS,
CONF_SOURCE,
CONF_SWITCHES,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .const import (
BINSENSOR_PORTS,
CONF_CLIMATES,
CONF_CONNECTIONS,
CONF_DIM_MODE,
CONF_DOMAIN_DATA,
CONF_HARDWARE_SERIAL,
CONF_HARDWARE_TYPE,
CONF_OUTPUT,
CONF_RESOURCE,
CONF_SCENES,
CONF_SK_NUM_TRIES,
CONF_SOFTWARE_SERIAL,
CONNECTION,
DEFAULT_NAME,
DOMAIN,
LED_PORTS,
LOGICOP_PORTS,
OUTPUT_PORTS,
S0_INPUTS,
SETPOINTS,
THRESHOLDS,
VARIABLES,
)
# typing
AddressType = Tuple[int, int, bool]
DeviceConnectionType = Union[
pypck.module.ModuleConnection, pypck.module.GroupConnection
]
InputType = Type[pypck.inputs.Input]
# Regex for address validation
PATTERN_ADDRESS = re.compile(
"^((?P<conn_id>\\w+)\\.)?s?(?P<seg_id>\\d+)\\.(?P<type>m|g)?(?P<id>\\d+)$"
)
DOMAIN_LOOKUP = {
CONF_BINARY_SENSORS: "binary_sensor",
CONF_CLIMATES: "climate",
CONF_COVERS: "cover",
CONF_LIGHTS: "light",
CONF_SCENES: "scene",
CONF_SENSORS: "sensor",
CONF_SWITCHES: "switch",
}
def get_device_connection(
hass: HomeAssistant, address: AddressType, config_entry: ConfigEntry
) -> DeviceConnectionType | None:
"""Return a lcn device_connection."""
host_connection = hass.data[DOMAIN][config_entry.entry_id][CONNECTION]
addr = pypck.lcn_addr.LcnAddr(*address)
return host_connection.get_address_conn(addr)
def get_resource(domain_name: str, domain_data: ConfigType) -> str:
"""Return the resource for the specified domain_data."""
if domain_name in ("switch", "light"):
return cast(str, domain_data["output"])
if domain_name in ("binary_sensor", "sensor"):
return cast(str, domain_data["source"])
if domain_name == "cover":
return cast(str, domain_data["motor"])
if domain_name == "climate":
return f'{domain_data["source"]}.{domain_data["setpoint"]}'
if domain_name == "scene":
return f'{domain_data["register"]}.{domain_data["scene"]}'
raise ValueError("Unknown domain")
def get_device_model(domain_name: str, domain_data: ConfigType) -> str:
"""Return the model for the specified domain_data."""
if domain_name in ("switch", "light"):
return "Output" if domain_data[CONF_OUTPUT] in OUTPUT_PORTS else "Relay"
if domain_name in ("binary_sensor", "sensor"):
if domain_data[CONF_SOURCE] in BINSENSOR_PORTS:
return "Binary Sensor"
if domain_data[CONF_SOURCE] in chain(
VARIABLES, SETPOINTS, THRESHOLDS, S0_INPUTS
):
return "Variable"
if domain_data[CONF_SOURCE] in LED_PORTS:
return "Led"
if domain_data[CONF_SOURCE] in LOGICOP_PORTS:
return "Logical Operation"
return "Key"
if domain_name == "cover":
return "Motor"
if domain_name == "climate":
return "Regulator"
if domain_name == "scene":
return "Scene"
raise ValueError("Unknown domain")
def generate_unique_id(
entry_id: str,
address: AddressType,
resource: str | None = None,
) -> str:
"""Generate a unique_id from the given parameters."""
unique_id = entry_id
is_group = "g" if address[2] else "m"
unique_id += f"-{is_group}{address[0]:03d}{address[1]:03d}"
if resource:
unique_id += f"-{resource}".lower()
return unique_id
def import_lcn_config(lcn_config: ConfigType) -> list[ConfigType]:
"""Convert lcn settings from configuration.yaml to config_entries data.
Create a list of config_entry data structures like:
"data": {
"host": "pchk",
"ip_address": "192.168.2.41",
"port": 4114,
"username": "lcn",
"password": "lcn,
"sk_num_tries: 0,
"dim_mode: "STEPS200",
"devices": [
{
"address": (0, 7, False)
"name": "",
"hardware_serial": -1,
"software_serial": -1,
"hardware_type": -1
}, ...
],
"entities": [
{
"address": (0, 7, False)
"name": "Light_Output1",
"resource": "output1",
"domain": "light",
"domain_data": {
"output": "OUTPUT1",
"dimmable": True,
"transition": 5000.0
}
}, ...
]
}
"""
data = {}
for connection in lcn_config[CONF_CONNECTIONS]:
host = {
CONF_HOST: connection[CONF_NAME],
CONF_IP_ADDRESS: connection[CONF_HOST],
CONF_PORT: connection[CONF_PORT],
CONF_USERNAME: connection[CONF_USERNAME],
CONF_PASSWORD: connection[CONF_PASSWORD],
CONF_SK_NUM_TRIES: connection[CONF_SK_NUM_TRIES],
CONF_DIM_MODE: connection[CONF_DIM_MODE],
CONF_DEVICES: [],
CONF_ENTITIES: [],
}
data[connection[CONF_NAME]] = host
for confkey, domain_config in lcn_config.items():
if confkey == CONF_CONNECTIONS:
continue
domain = DOMAIN_LOOKUP[confkey]
# loop over entities in configuration.yaml
for domain_data in domain_config:
# remove name and address from domain_data
entity_name = domain_data.pop(CONF_NAME)
address, host_name = domain_data.pop(CONF_ADDRESS)
if host_name is None:
host_name = DEFAULT_NAME
# check if we have a new device config
for device_config in data[host_name][CONF_DEVICES]:
if address == device_config[CONF_ADDRESS]:
break
else: # create new device_config
device_config = {
CONF_ADDRESS: address,
CONF_NAME: "",
CONF_HARDWARE_SERIAL: -1,
CONF_SOFTWARE_SERIAL: -1,
CONF_HARDWARE_TYPE: -1,
}
data[host_name][CONF_DEVICES].append(device_config)
# insert entity config
resource = get_resource(domain, domain_data).lower()
for entity_config in data[host_name][CONF_ENTITIES]:
if (
address == entity_config[CONF_ADDRESS]
and resource == entity_config[CONF_RESOURCE]
and domain == entity_config[CONF_DOMAIN]
):
break
else: # create new entity_config
entity_config = {
CONF_ADDRESS: address,
CONF_NAME: entity_name,
CONF_RESOURCE: resource,
CONF_DOMAIN: domain,
CONF_DOMAIN_DATA: domain_data.copy(),
}
data[host_name][CONF_ENTITIES].append(entity_config)
return list(data.values())
def register_lcn_host_device(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Register LCN host for given config_entry in device registry."""
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.entry_id)},
manufacturer="Issendorff",
name=config_entry.title,
model="PCHK",
)
def register_lcn_address_devices(
hass: HomeAssistant, config_entry: ConfigEntry
) -> None:
"""Register LCN modules and groups defined in config_entry as devices in device registry.
The name of all given device_connections is collected and the devices
are updated.
"""
device_registry = dr.async_get(hass)
host_identifiers = (DOMAIN, config_entry.entry_id)
for device_config in config_entry.data[CONF_DEVICES]:
address = device_config[CONF_ADDRESS]
device_name = device_config[CONF_NAME]
identifiers = {(DOMAIN, generate_unique_id(config_entry.entry_id, address))}
if device_config[CONF_ADDRESS][2]: # is group
device_model = f"LCN group (g{address[0]:03d}{address[1]:03d})"
sw_version = None
else: # is module
hardware_type = device_config[CONF_HARDWARE_TYPE]
if hardware_type in pypck.lcn_defs.HARDWARE_DESCRIPTIONS:
hardware_name = pypck.lcn_defs.HARDWARE_DESCRIPTIONS[hardware_type]
else:
hardware_name = pypck.lcn_defs.HARDWARE_DESCRIPTIONS[-1]
device_model = f"{hardware_name} (m{address[0]:03d}{address[1]:03d})"
sw_version = f"{device_config[CONF_SOFTWARE_SERIAL]:06X}"
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers=identifiers,
via_device=host_identifiers,
manufacturer="Issendorff",
sw_version=sw_version,
name=device_name,
model=device_model,
)
async def async_update_device_config(
device_connection: DeviceConnectionType, device_config: ConfigType
) -> None:
"""Fill missing values in device_config with infos from LCN bus."""
# fetch serial info if device is module
if not (is_group := device_config[CONF_ADDRESS][2]): # is module
await device_connection.serial_known
if device_config[CONF_HARDWARE_SERIAL] == -1:
device_config[CONF_HARDWARE_SERIAL] = device_connection.hardware_serial
if device_config[CONF_SOFTWARE_SERIAL] == -1:
device_config[CONF_SOFTWARE_SERIAL] = device_connection.software_serial
if device_config[CONF_HARDWARE_TYPE] == -1:
device_config[CONF_HARDWARE_TYPE] = device_connection.hardware_type.value
# fetch name if device is module
if device_config[CONF_NAME] != "":
return
device_name = ""
if not is_group:
device_name = await device_connection.request_name()
if is_group or device_name == "":
module_type = "Group" if is_group else "Module"
device_name = (
f"{module_type} "
f"{device_config[CONF_ADDRESS][0]:03d}/"
f"{device_config[CONF_ADDRESS][1]:03d}"
)
device_config[CONF_NAME] = device_name
async def async_update_config_entry(
hass: HomeAssistant, config_entry: ConfigEntry
) -> None:
"""Fill missing values in config_entry with infos from LCN bus."""
device_configs = deepcopy(config_entry.data[CONF_DEVICES])
coros = []
for device_config in device_configs:
device_connection = get_device_connection(
hass, device_config[CONF_ADDRESS], config_entry
)
coros.append(async_update_device_config(device_connection, device_config))
await asyncio.gather(*coros)
new_data = {**config_entry.data, CONF_DEVICES: device_configs}
# schedule config_entry for save
hass.config_entries.async_update_entry(config_entry, data=new_data)
def has_unique_host_names(hosts: list[ConfigType]) -> list[ConfigType]:
"""Validate that all connection names are unique.
Use 'pchk' as default connection_name (or add a numeric suffix if
pchk' is already in use.
"""
suffix = 0
for host in hosts:
if host.get(CONF_NAME) is None:
if suffix == 0:
host[CONF_NAME] = DEFAULT_NAME
else:
host[CONF_NAME] = f"{DEFAULT_NAME}{suffix:d}"
suffix += 1
schema = vol.Schema(vol.Unique())
schema([host.get(CONF_NAME) for host in hosts])
return hosts
def is_address(value: str) -> tuple[AddressType, str]:
"""Validate the given address string.
Examples for S000M005 at myhome:
myhome.s000.m005
myhome.s0.m5
myhome.0.5 ("m" is implicit if missing)
Examples for s000g011
myhome.0.g11
myhome.s0.g11
"""
if matcher := PATTERN_ADDRESS.match(value):
is_group = matcher.group("type") == "g"
addr = (int(matcher.group("seg_id")), int(matcher.group("id")), is_group)
conn_id = matcher.group("conn_id")
return addr, conn_id
raise ValueError(f"{value} is not a valid address string")
def is_states_string(states_string: str) -> list[str]:
"""Validate the given states string and return states list."""
if len(states_string) != 8:
raise ValueError("Invalid length of states string")
states = {"1": "ON", "0": "OFF", "T": "TOGGLE", "-": "NOCHANGE"}
return [states[state_string] for state_string in states_string]
| {
"content_hash": "85d5752951debb14156c19f587a84203",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 93,
"avg_line_length": 33.090225563909776,
"alnum_prop": 0.5966068317806559,
"repo_name": "jawilson/home-assistant",
"id": "b879c2d3f725b48f1fc7fb0b712a453878e665e7",
"size": "13203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/lcn/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from ..constants import Frequency
from ..constants import Day
# TODO: would like to implement the commented out choices below. See:
# https://github.com/infoagetech/django-recurrence/issues/1
FREQUENCY_CHOICES = (
(Frequency.ONCE, _('Never')),
(Frequency.DAILY, _('Daily')),
(Frequency.WEEKLY, _('Weekly')),
(Frequency.MONTHLY, _('Monthly')),
# ('semi_annual', _('Semi-Annual')),
(Frequency.YEARLY, _('Yearly')),
# ('every_other_week', _('Every Other Week')),
# ('same_day_of_month', _('Same Day of the Month')),
# ('first_day_of_month', _('First Day of the Month')),
# ('last_day_of_month', _('Last Day of the Month')),
)
WEEKDAY_CHOICES = (
(Day.SUNDAY, 'Sunday'),
(Day.MONDAY, 'Monday'),
(Day.TUESDAY, 'Tuesday'),
(Day.WEDNESDAY, 'Wednesday'),
(Day.THURSDAY, 'Thursday'),
(Day.FRIDAY, 'Friday'),
(Day.SATURDAY, 'Saturday'),
)
| {
"content_hash": "2e8dd48abc5451d78a24fd8c45361cfd",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.626008064516129,
"repo_name": "InfoAgeTech/django-recurrences",
"id": "52a550355bf1df32fdf2069374573b7bd26207ab",
"size": "992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_recurrences/forms/choices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2042"
},
{
"name": "JavaScript",
"bytes": "3574"
},
{
"name": "Makefile",
"bytes": "5591"
},
{
"name": "Python",
"bytes": "89763"
},
{
"name": "Shell",
"bytes": "5109"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/100) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def calc_cov(X, Y):
return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)
def angular_coef(X,Y):
return calc_cov(X,Y)/calc_cov(X,X)
def linear_coef(a, X, Y):
return np.average(Y) - a*np.average(X)
def kg_coef(est, measurement):
return est / (est + measurement)
def kg_iter(prev, measurement):
return prev + kg_coef(prev, measurement) * (measurement - prev)
def calc_cov_step(X, Y, X_avg, Y_avg, conv_sum, data_size):
return (conv_sum + ((X - X_avg)*(Y - Y_avg))) / (data_size - 1)
def angular_coef_step(X, Y, X_sum, Y_sum, XY_conv_sum, XX_conv_sum, data_size):
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
return calc_cov_step(X, Y, X_avg, Y_avg, XY_conv_sum, data_size)/calc_cov_step(X, X, X_avg, X_avg, XX_conv_sum, data_size)
def linear_coef_step(a, X_sum, Y_sum, data_size):
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
return Y_avg - a*X_avg
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
preds = []
kg_preds = []
preds_step = []
kg_preds_step = []
X_sum = time[0]
Y_sum = data[0]
data_size = 1
XY_conv_sum = 0
XX_conv_sum = 0
a_step = 0
b_step = 0
for i in range(1, count):
#Update data sum
X_sum += time[i-1]
Y_sum += data[i-1]
#Calculate AVG
X_avg = X_sum/data_size
Y_avg = Y_sum/data_size
#Calculate angular and linear coeficient using iterative function
a_step = angular_coef_step(time[i-1], data[i-1], X_sum, Y_sum, XY_conv_sum, XX_conv_sum, data_size)
b_step = linear_coef_step(a_step, X_sum, Y_sum, data_size)
#Update XY and XX conv sum
XY_conv_sum += ((time[i-1] - X_avg)*(data[i-1] - Y_avg))
XX_conv_sum += ((time[i-1] - X_avg)*(time[i-1] - X_avg))
#Calculate angular and linear coeficient using old function
a = angular_coef(time[:i], data[:i])
b = linear_coef(a, time[:i], data[:i])
#Calculating prediction using Min. Quad.
prediction = (time[i]+delta)*a + b
prediction_step = (time[i-1]+delta)*a_step + b_step
#Calculating prediction using Kalman Filter
kg_prediction = kg_iter(prediction, data[i-1])
kg_prediction_step = kg_iter(prediction_step, data[i-1])
#Creating arrays of data to create Graph
preds.append(prediction)
kg_preds.append(kg_prediction)
preds_step.append(prediction_step)
kg_preds_step.append(kg_prediction_step)
#Update data size
data_size += 1
#calculate Min. Qua. line
minQuadLine = time*a + b
#calculate Min. Qua. line
minQuadLineWithDecay = time*a_step + b_step
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], preds, label="Est. Min. Quad.", color="#62B21C")
plt.scatter(time[1:], kg_preds, label="Est. Kalman", color="#C000FF")
plt.scatter(time[1:], preds_step, label="Est. Min. Quad. Iterativo", color="#1CB262")
plt.scatter(time[1:], kg_preds_step, label="Est. Kalman Iterativo", color="#FF00C0")
plt.plot(time, minQuadLine, label="Min. Quad. Final", color="#36A1FF")
plt.plot(time, minQuadLineWithDecay, label="Min. Quad. Final Iterativo", color="#FFA136")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Kalman Filter Iterativo Sem Memorização")
# Place a legend to the right of this smaller subplot.
plt.legend()
plt.show() | {
"content_hash": "b7ded47bd87b9373bb26b0862c59b3f0",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 126,
"avg_line_length": 30.43089430894309,
"alnum_prop": 0.6246326476088699,
"repo_name": "Raphael-C-Almeida/Wireless-Sensor-Network",
"id": "63a0f9990b68a3bf75e0260c6761a7abb8819c3c",
"size": "3749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Data Fusion Test/Kalman Filter Iterativo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45333"
},
{
"name": "JavaScript",
"bytes": "1084"
},
{
"name": "Jupyter Notebook",
"bytes": "803836"
},
{
"name": "Python",
"bytes": "42754"
}
],
"symlink_target": ""
} |
"""
giraffe-facts.py: process a GAM file from the new minimizer-based mapper (vg giraffe) and report runtime statistics by filter.
"""
import argparse
import os
import sys
import time
import subprocess
import collections
import io
import itertools
import json
import random
import math
# Force output to UTF-8. Eventually we can use reconfigure() if we drop 3.6
# and earlier.
# We need to do this before these streams get picked as any argument default
# values.
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf8')
# We depend on our local histogram.py
import histogram
FACTS = ["Giraffes are the tallest living terrestrial animal.",
"There are nine subspecies of giraffe, each occupying separate regions of Africa. Some researchers consider some subspecies to be separate species entirely.",
"Giraffes' horn-like structures are called 'ossicones'. They consist mostly of ossified cartilage.",
"Male giraffes compete for dominance in fights in which they strike each other with their necks.",
"There are more than 1600 giraffes in captivity worldwide.",
"The name 'giraffe' has roots in the Arabic 'zarafah', meaning 'fast-walker'.",
"Before the name 'giraffe' came into standard use, giraffes were commonly called 'camelopards'.",
"There are 10 known extinct species of giraffe.",
"The closest living relative to the giraffe is the okapi, an endangered hoofed mammal from the Congo.",
"Full grown giraffes are usually between 14 and 18 feet tall.",
"The tallest recorded giraffe was 19.3 feet tall.",
"Adult male giraffes weigh an average of 2628 lbs., whereas females weight 1825 lbs.",
"Giraffes have the ability to close their nostrils to protect against sandstorms and ants.",
"Giraffes have 18-inch-long prehensile tongues, which they use for grasping foliage and for grooming.",
"Male giraffes' spots grow darker as they age.",
"Under their fur coat, giraffes have grey skin.",
"Female giraffes have hair on their ossicones, whereas males' ossicones are bald.",
"Giraffes use the weight of their head to maintain their balance when they gallop.",
"Giraffes can run at 37 miles per hour for short distances, and 31 miles per hour for several miles.",
"Giraffes sleep for about half an hour a day.",
"Giraffes have the same number of vertebrae as most mammals. The length of their neck comes from longer vertebrae (over 10 inches each).",
"Giraffes' neck is fairly short at birth, probably to make birthing easier for mothers.",
"A giraffe's heart can weigh more than 25 lbs.",
"Giraffes have structures like check valves in their necks' veins to prevent blood from rushing to their head when they bend down to drink.",
"Giraffes have a four-chambered stomach similar to cattle.",
"An adult girafffe can eat 75 lbs. of foliage per day.",
"While generally herbivorous, giraffes have been observed eating meat and bone from carcasses.",
"The giraffe's gestation period is 14 months.",
"Newborn giraffes are about 6 feet tall.",
"Giraffes are lions' most common prey.",
"Most of giraffes' mounting behavior is between two males, often after a fight for dominance.",
"Giraffes allow red-billed oxpeckers (a bird species) to perch on them to feed on ticks.",
"Egyptian heiroglyphs use the giraffe as a character, pronounced 'sr'.",
"Designers of suits for fighter pilots studied giraffe skin, since figher pilots are also at risk of passing out when blood rushes to the legs.",
"The Humr people of Sudan use giraffe liver to create a putatively hallucinogenic drink called 'umm nyolokh'. The drink's psychoactive properties may come from the giraffe's diet of acacia plants.",
"The giraffe is the national animal of Tanzania.",
"There are around 100,000 giraffes in the wild as of 2016.",
"Giraffes only need to drink every few days. Most of their water comes from the vegetation they eat.",
"Giraffes give birth standing up, so newborn giraffes fall over 5 feet upon being born.",
"Giraffes usually sleep standing upright.",
"Male giraffes detect oestrus in females by tasting their urine.",
"June 21 is World Giraffe Day.",
"Toys R' Us has used Geoffrey the Giraffe as its mascot since 1965, although earlier advertisements in the 1950's used another giraffe: Dr. G. Raffe.",
"Giraffe hooves are 1 foot in diameter.",
"About 50% of giraffe calves die in their first year, mostly due to predation.",
"Kirahvi sanoo öri öri öri öri öri öri.",
"The giraffe's average walking speed is 10 miles per hour.",
"The giraffe's tongue is colored dark blue.",
"Some of giraffes' vocalizations are too low to be heard by human ears.",
"Giraffes have never been observed swimming.",
"Mozambique requires power lines to be 39 feet high so giraffes can safely pass underneath."]
def parse_args(args):
"""
Takes in the command-line arguments list (args), and returns a nice argparse
result with fields for all the options.
Borrows heavily from the argparse documentation examples:
<http://docs.python.org/library/argparse.html>
"""
# Construct the parser (which is stored in parser)
# Module docstring lives in __doc__
# See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847
# And a formatter class so our examples in the docstring look good. Isn't it
# convenient how we already wrapped it to 80 characters?
# See http://docs.python.org/library/argparse.html#formatter-class
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--input", type=argparse.FileType('r'), default=sys.stdin,
help="line-oriented JSON GAM to process")
parser.add_argument("outdir",
help="directory to place output in")
# The command line arguments start with the program name, which we don't
# want to treat as an argument for argparse. So we remove it.
args = args[1:]
return parser.parse_args(args)
def sniff_params(read):
"""
Given a read dict parsed from JSON, compute a mapping parameter dict for the read.
The read will have param_XXX annotations. Turn those into a dict from XXX to value.
These should be the same for every read.
"""
# This is the annotation dict from the read
annot = read.get('annotation', {})
# This is the params dict to fill in
params = {}
for annot_name in annot.keys():
if annot_name.startswith('param_'):
# Split the param annotations on underscore
(_, param_name) = annot_name.split('_')
# Save the values under the name
params[param_name] = annot[annot_name]
return params
def make_stats(read):
"""
Given a read dict parsed from JSON, compute a stats OrderedDict for the read.
Run on an empty dict, makes a zero-value stats dict.
A stats dict maps from filter name to a Counter of filter stats.
The filter stats include:
- 'passed_count_total' which is the count of results passing the
filter.
- 'failed_count_total' which is the count of results failing the
filter.
- 'passed_count_correct' which is the count of correct results passing
the filter.
- 'failed_count_correct' which is the count of correct results failing
the filter.
Additionally, each of these '_count_' stats has a '_size_' version,
describing the total size of all items meeting the specified criteria (as
opposed to the number of items).
For the 'seed' stage, correctness information is not yet available, so only
the '_total' values will be defined. '_correct' values will be set to None
(instead of 0).
The Counter for a filter also has sub-Counters embedded in it for
expressing distributions of filter statistic values, to assist in filter
design.
- 'statistic_distribution_correct': statistic value counts for items
deemed correct
- 'statistic_distribution_noncorrect': statistic value counts for items
not deemed correct
NaN values of the statistics are filtered out.
Filters appear in the OrderedDict in an order corresponding to their filter
number in the GAM.
"""
# This is the annotation dict from the read
annot = read.get('annotation', {})
# This will map from filter number int to filter name
filters_by_index = {}
# This will map from filter name to Counter of filter stats
filter_stats = collections.defaultdict(collections.Counter)
for annot_name in annot.keys():
# For each annotation
if annot_name.startswith('filter_'):
# If it is an individual filter info item
# Names look like 'filter_2_cluster-score-threshold_cluster_passed_size_correct'
# Break into components on underscores
(_, filter_num, filter_name, filter_stage, filter_status, filter_accounting, filter_metric) = annot_name.split('_')
# Collect integers
filter_num = int(filter_num)
filter_stat_value = annot[annot_name]
# Record the filter being at this index if not known already
filters_by_index[filter_num] = filter_name
if filter_stage == 'minimizer':
# Wer are filtering items produced by the minimizer stage.
# At the minimizer stage, correct and incorrect are not defined yet.
if filter_metric == 'correct':
# Make sure we didn't get any counts
assert filter_stat_value == 0
# None out the correct stat so we can detect this when making the table
filter_stat_value = None
# Record the stat value
filter_stats[filter_name]['{}_{}_{}'.format(filter_status, filter_accounting, filter_metric)] = filter_stat_value
elif annot_name.startswith('filterstats_'):
# It is a whole collection of correct or not-necessarily-correct filter statistic distribution values, for plotting.
# Break into components on underscores (correctness will be 'correct' or 'noncorrect'
(_, filter_num, filter_name, filter_stage, filter_correctness) = annot_name.split('_')
distribution = collections.Counter()
for item in annot[annot_name]:
# Parse all the statistic vlues
item = float(item)
if math.isnan(item):
# Discard NANs
continue
# Count all instances of the same value
distribution[item] += 1
# Save the statistic distribution
filter_stats[filter_name]['statistic_distribution_{}'.format(filter_correctness)] = distribution
elif annot_name.startswith('last_correct_stage'):
stage = annot[annot_name]
if stage == 'none':
filter_stats['hard-hit-cap']['last_correct_stage'] = 1
elif stage == 'cluster':
filter_stats['cluster-coverage']['last_correct_stage'] = 1
elif stage == 'extend':
filter_stats['extension-set']['last_correct_stage'] = 1
elif stage == 'align':
filter_stats['max-alignments']['last_correct_stage'] = 1
# Now put them all in this OrderedDict in order
ordered_stats = collections.OrderedDict()
for filter_index in sorted(filters_by_index.keys()):
filter_name = filters_by_index[filter_index]
ordered_stats[filter_name] = filter_stats[filter_name]
return ordered_stats
def add_in_stats(destination, addend):
"""
Add the addend stats dict into the destination stats dict.
Implements += for stats dicts.
"""
for k, v in addend.items():
if v is None:
# None will replace anything and propagate through
destination[k] = None
elif isinstance(v, dict):
# Recurse into dict
add_in_stats(destination[k], v)
else:
# Use real += and hope it works
destination[k] += v
def read_line_oriented_json(lines):
"""
For each line in the given stream, yield it as a parsed JSON object.
"""
for line in lines:
yield json.loads(line)
class Table(object):
"""
Format a table of output nicely in fixed-width text.
"""
# Interface
def __init__(self, widths, out=sys.stdout):
"""
Start a table with the given column widths (a list of integers) in
characters, printing to the given stream.
"""
# Remember the base widths
self.widths = widths
# Remember the out stream
self.out = out
# Remember the previous actual column widths used, if any.
# None if no wor has been produced.
self.last_widths = None
# Remember if we need a dividing line
self.need_line = False
def line(self):
"""
Say to divide the previous row from the next row.
"""
self.need_line = True
def row(self, values, justify='l', merge=None, line_top=False, line_bottom=False):
"""
Given a list of values, one per column, for up to the number of columns
in the table, draw a table row.
Justify can be 'l', 'r', 'c' or a list/string of those per value.
If merge is given, it must be a list of the number of cells to merge
horizontally for each value.
Different merge values without a line_top separator will look bad.
If line_top is set, divide from the previous row.
If line_bottom is set, divide from the next row.
"""
# Compute new merged widths
merged_widths = self.compute_merges(merge)
# Start or continue the table
if self.last_widths is None:
# Start the table
self.start(merged_widths)
elif self.need_line or line_top:
# Divide from the previous row.
self.sep(self.last_widths, merged_widths)
# Print the actual row
self.cells(values, justify, merged_widths)
# Remember this row's widths for next time.
self.last_widths = merged_widths
# Remember if we need a line
self.need_line = line_bottom
def close(self):
"""
Close off the table at the bottom.
"""
if self.last_widths is None:
self.last_widths = self.widths
self.end(self.last_widths)
self.last_widths = None
def inner_width(self):
"""
Get the total width of the table across all columns, between the outer edges.
"""
return sum(self.widths) + len(self.widths) - 1
# Internal methods
def box(self, part):
"""
Return the box-drawing character to draw the given part of a box.
Parts are {(t)op, (m)iddle, (b)ottom} crossed with {(l)eft, (m)iddle,
(r)ight} as two-character strings, plus (v)ertical and (h)orizontal as one-character strings.
"""
skin = {
'tl': '┌',
'tm': '┬',
'tr': '┐',
'bl': '└',
'bm': '┴',
'br': '┘',
'ml': '├',
'mm': '┼',
'mr': '┤',
'v': '│',
'h': '─'
}
return skin[part]
def horizontal(self, left, junction, right, column, widths=None):
"""
Print a line across (either top, middle, or bottom).
Takes the leftmost, between-column, rightmost, and in-column characters
as box() character ID strings.
Can use a specified widths list, usually self.widths.
"""
if widths is None:
widths = self.widths
# Start edge
self.out.write(self.box(left))
for i, width in enumerate(widths):
# For each column
# Do its top line
self.out.write(self.box(column) * width)
if i + 1 != len(widths):
# Do the separator
self.out.write(self.box(junction))
# End edge
self.out.write(self.box(right))
self.out.write('\n')
def start(self, widths_after):
"""
Print an opening line at the top of the table.
Needs to know the widths of the cells on the next table line.
"""
self.horizontal('tl', 'tm', 'tr', 'h', widths_after)
def end(self, widths_before):
"""
Print a closing line at the bottom of the table.
Needs to know the widths of the cells on the previous table line.
"""
self.horizontal('bl', 'bm', 'br', 'h', widths_before)
def sep(self, widths_before, widths_after):
"""
Print a middle separator line across the table.
Needs to know the widths of the cells on the previous and next table lines.
Both sets of widths must describe a table of the same total width.
"""
# Start edge
self.out.write(self.box('ml'))
# Compute total width (cells and separators), not counting final border
total_width = sum(widths_before) + len(widths_before) - 1
# Track what cell we are in on top
before_cursor = 0
# And what column its trailing border is at
before_border = widths_before[before_cursor]
# Track what cell we are in on the bottom
after_cursor = 0
# And what column its trailing border is at
after_border = widths_after[after_cursor]
# Track what column of internal table width we are in.
col = 0
while col < total_width:
if col == before_border:
if col == after_border:
# Junction on both sides
char = self.box('mm')
# Advance after
after_cursor += 1
after_border += widths_after[after_cursor] + 1
else:
# Junction on top only
char = self.box('bm')
# Advance before
before_cursor += 1
before_border += widths_before[before_cursor] + 1
elif col == after_border:
# Junction on bottom only
char = self.box('tm')
# Advance after
after_cursor += 1
after_border += widths_after[after_cursor] + 1
else:
# No junction
char = self.box('h')
# Print the character
self.out.write(char)
# Go to the next column
col += 1
# End edge
self.out.write(self.box('mr'))
self.out.write('\n')
def compute_merges(self, merges=None):
"""
Given a list of cell counts to merge horizontally, compute new widths from self.widths.
If merges is None, use self.widths.
"""
widths = self.widths
if merges is not None:
new_widths = []
width_cursor = 0
for merge in merges:
# Compute a new column by merging the given number of old columns.
merged_width = 0
for i in range(merge):
# Take the widths of all cells
merged_width += widths[width_cursor]
width_cursor += 1
# Take the separating columns between cells
merged_width += merge - 1
new_widths.append(merged_width)
while width_cursor < len(widths):
# Copy any unmerged columns
new_widths.append(widths[i])
widths = new_widths
return widths
def cells(self, values, justify, widths):
"""
Given a list of values, one per column, for up to the number of columns
in the table, draw a table row.
Justify can be 'l', 'r', 'c', or a list/string of those per value.
Column count/widths must be passed.
"""
# Start the row
self.out.write(self.box('v'))
for i, (value, width) in enumerate(itertools.zip_longest(values, widths)):
# For each item and its column and width...
if width is None:
# Too many items
raise RuntimeError("Ran out of table width values ({}) for {} columns".format(
len(widths), len(values)))
# Compute the item string
item_string = str(value) if value is not None else ''
# Decide on justification for this item
if justify == 'l':
item_just = 'l'
elif justify == 'r':
item_just = 'r'
if justify == 'c':
item_just = 'c'
elif i < len(justify):
item_just = justify[i]
else:
item_just = 'l'
# Actually justify it in a field of the necessary width
if item_just == 'l':
justified_item = item_string.ljust(width)
elif item_just == 'r':
justified_item = item_string.rjust(width)
elif item_just == 'c':
justified_item = item_string.center(width)
else:
raise RuntimeError('Invalid justification: {}'.format(item_just))
# Output the content
self.out.write(justified_item)
if (i + 1 != len(widths)):
# This isn't the last item. Do a separator.
self.out.write(self.box('v'))
# End the row
# TODO: Same as the separator
self.out.write(self.box('v'))
self.out.write('\n')
def print_table(read_count, stats_total, params=None, out=sys.stdout):
"""
Take the read count, the accumulated total stats dict, and an optional dict
of mapping parameters corresponding to values for filters.
Print a nicely formatted table to the given stream.
"""
if stats_total is None:
# Handle the empty case
assert(read_count == 0)
out.write('No reads.\n')
return
# Now do a table
# First header line for each column
headers = []
# Second header line for wach column
headers2 = []
# Column min widths from headers
header_widths = []
# Compute filter row headings
filter_headings = stats_total.keys()
if params is not None:
# Annotate each filter with its parameter value
annotated_headings = []
for heading in filter_headings:
# For each filter
# It may be a compound thing||thing filter
parts = heading.split('||')
# We will fill this with all the relevant filter cutoff values
filter_values = []
for part in parts:
if part in params:
filter_values.append(params[part])
if len(filter_values) == 0:
# No parameters
annotated_headings.append(heading)
else:
# Annotate with the parameters
annotated_headings.append('{} ({})'.format(heading, ', '.join((str(x) for x in filter_values))))
filter_headings = annotated_headings
# How long is the longest filter name
filter_width = max((len(x) for x in filter_headings))
# Leave room for the header
filter_header = "Filter"
filter_width = max(filter_width, len(filter_header))
# And for the "Overall" entry
filter_overall = "Overall"
filter_width = max(filter_width, len(filter_overall))
headers.append(filter_header)
headers2.append('')
header_widths.append(filter_width)
# And the passing count columns (average)
passing_header = "Passing"
passing_header2 = "(/Read)"
passing_width = max(len(passing_header), len(passing_header2))
headers.append(passing_header)
headers2.append(passing_header2)
header_widths.append(passing_width)
# And the failing count columns (average)
failing_header = "Failing"
failing_header2 = "(/Read)"
failing_width = max(len(failing_header), len(failing_header2))
headers.append(failing_header)
headers2.append(failing_header2)
header_widths.append(failing_width)
# And the number of correct reads lost at each stage
lost_stage_header = "Lost"
lost_stage_header2 = "reads"
lost_stage_reads = [x for x in (stats_total[filter_name].get('last_correct_stage', 0) for filter_name in stats_total.keys()) if x is not None]
max_stage = max(lost_stage_reads)
overall_lost_stage = sum(lost_stage_reads)
lost_stage_width = max(len(lost_stage_header), len(lost_stage_header2), len(str(max_stage)), len(str(overall_lost_stage)))
headers.append(lost_stage_header)
headers2.append(lost_stage_header2)
header_widths.append(lost_stage_width)
# And the correct result lost count header
lost_header = "Lost"
lost_header2 = ""
# How big a number will we need to hold?
# Look at the reads lost at all filters
# Account for None values for stages that don't have correctness defined yet.
lost_reads = [x for x in (stats_total[filter_name]['failed_count_correct'] for filter_name in stats_total.keys()) if x is not None]
max_filter_stop = max(lost_reads)
# How many correct reads are lost overall by filters?
overall_lost = sum(lost_reads)
lost_width = max(len(lost_header), len(lost_header2), len(str(max_filter_stop)), len(str(overall_lost)))
headers.append(lost_header)
headers2.append(lost_header2)
header_widths.append(lost_width)
# And the total rejected count header
rejected_header = "Cut"
rejected_header2 = ""
# How big a number will we need to hold?
# Look at the reads rejected at all filters
rejected_reads = [stats_total[filter_name]['failed_count_total'] for filter_name in stats_total.keys()]
max_filter_stop = max(rejected_reads)
# How many incorrect reads are rejected overall by filters?
overall_rejected = sum(rejected_reads)
rejected_width = max(len(rejected_header), len(rejected_header2), len(str(max_filter_stop)), len(str(overall_rejected)))
headers.append(rejected_header)
headers2.append(rejected_header2)
header_widths.append(rejected_width)
# Now do precision and recall
# How should we format them?
pr_format = '{:.2f}'
precision_header = "P"
precision_header2 = ""
precision_width = max(len(precision_header), len(precision_header2), len(pr_format.format(1.0)), len('N/A'))
headers.append(precision_header)
headers2.append(precision_header2)
header_widths.append(precision_width)
recall_header = "R"
recall_header2 = ""
recall_width = max(len(recall_header), len(recall_header2), len(pr_format.format(1.0)), len('N/A'))
headers.append(recall_header)
headers2.append(recall_header2)
header_widths.append(recall_width)
# Start the table
table = Table(header_widths)
table.row(["Giraffe Facts"], 'c', merge=[len(header_widths)])
table.line()
table.row(['Reads' + str(read_count).rjust(table.inner_width() - 5)], merge=[len(header_widths)])
table.line()
table.row(headers, 'c')
table.row(headers2, 'c')
table.line()
for i, filter_name in enumerate(stats_total.keys()):
# Grab average results passing this filter per read
total_passing = stats_total[filter_name]['passed_count_total']
average_passing = total_passing / read_count if read_count != 0 else float('NaN')
# Grab average results failing this filter per read
total_failing = stats_total[filter_name]['failed_count_total']
average_failing = total_failing / read_count if read_count != 0 else float('NaN')
# Grab reads that are lost.
# No reads are lost at the final stage.
lost = stats_total[filter_name]['failed_count_correct']
lost_stage = stats_total[filter_name]['last_correct_stage']
# And reads that are rejected at all
rejected = stats_total[filter_name]['failed_count_total']
if lost is None:
# Correctness is not defined yet.
# TODO: have a way to see if the correct mapping never shows up.
lost = 'N/A'
# Compute precision
try:
precision = pr_format.format(stats_total[filter_name]['passed_count_correct'] /
stats_total[filter_name]['passed_count_total'])
except:
precision = 'N/A'
# Compute recall
try:
recall = pr_format.format(stats_total[filter_name]['passed_count_correct'] /
(stats_total[filter_name]['passed_count_correct'] +
stats_total[filter_name]['failed_count_correct']))
except:
recall = 'N/A'
row = [filter_headings[i]]
align = 'c'
# Add the provenance columns
row += ['{:.2f}'.format(average_passing), '{:.2f}'.format(average_failing), lost_stage, lost, rejected,
precision, recall]
align += 'rrrrrr'
# Output the finished row
table.row(row, align)
table.line()
# Compose the overall row
row = [filter_overall]
align = 'c'
# Add the provenance columns
row += ['', '', overall_lost_stage, overall_lost, overall_rejected, '', '']
align += 'rr'
table.row(row, align)
# Close off table
table.close()
def plot_filter_statistic_histograms(out_dir, stats_total):
"""
For each filter in the stats dict, see if it has nonempty
'statistic_distribution_correct' and/or 'statistic_distribution_noncorrect'
Counters. Then if so, plot a histogram comparing correct and noncorrect
distributions, or just the noncorrect distribution if that is the only one
available (because correctness isn't known).
Store histograms in out_dir.
"""
for filter_name in stats_total.keys():
correct_counter = stats_total[filter_name]['statistic_distribution_correct']
noncorrect_counter = stats_total[filter_name]['statistic_distribution_noncorrect']
if not ((isinstance(correct_counter, dict) and len(correct_counter) > 0) or
(isinstance(noncorrect_counter, dict) and len(noncorrect_counter) > 0)):
# No stats to plot
continue
# Open a TSV file to draw a histogram from
tsv_path = os.path.join(out_dir, 'stat_{}.tsv'.format(filter_name))
tsv = open(tsv_path, 'w')
# Some stages don't have correctness annotation. So we track if we saw
# correct and noncorrect things to identify them.
have_correct = False
have_noncorrect = False
if isinstance(correct_counter, dict) and len(correct_counter) > 0:
# We have correct item stats.
have_correct = True
for value, count in correct_counter.items():
# Output format: label, value, repeats
tsv.write('correct\t{}\t{}\n'.format(value, count))
if isinstance(noncorrect_counter, dict) and len(noncorrect_counter) > 0:
# We have noncorrect item stats.
have_noncorrect = True
for value, count in noncorrect_counter.items():
# Output format: label, value, repeats
tsv.write('noncorrect\t{}\t{}\n'.format(value, count))
tsv.close()
# Now make the plot
svg_path = os.path.join(out_dir, 'stat_{}.svg'.format(filter_name))
args = ['histogram.py', tsv_path, '--save', svg_path,
'--title', '{} Statistic Histogram'.format(filter_name),
'--x_label', 'Statistic Value',
'--bins', '20',
'--y_label', 'Frequency']
if have_correct and have_noncorrect:
args.append('--legend_overlay')
args.append('best')
args.append('--categories')
args.append('correct')
args.append('noncorrect')
histogram.main(args)
def main(args):
"""
Parses command line arguments and do the work of the program.
"args" specifies the program arguments, with args[0] being the executable
name. The return value should be used as the program's exit code.
"""
print(random.choice(FACTS), file = sys.stderr)
options = parse_args(args) # This holds the nicely-parsed options object
# Make the output directory if it doesn't exist
os.makedirs(options.outdir, exist_ok=True)
# Make a place to total up all the stats
stats_total = None
# Count all the reads
read_count = 0
# Record mapping parameters from at least one read
params = None
for read in read_line_oriented_json(options.input):
if params is None:
# Go get the mapping parameters
params = sniff_params(read)
# For the stats dict for each read
stats = make_stats(read)
if stats_total is None:
stats_total = stats
else:
# Sum up all the stats
add_in_stats(stats_total, stats)
# Count the read
read_count += 1
# After processing all the reads
# Print the table now in case plotting fails
print_table(read_count, stats_total, params)
# Make filter statistic histograms
plot_filter_statistic_histograms(options.outdir, stats_total)
def entrypoint():
"""
0-argument entry point for setuptools to call.
"""
# Provide main with its arguments and handle exit codes
sys.exit(main(sys.argv))
if __name__ == "__main__" :
entrypoint()
| {
"content_hash": "adb41317284a37bd4ea70b1327600924",
"timestamp": "",
"source": "github",
"line_count": 927,
"max_line_length": 207,
"avg_line_length": 38.47033441208198,
"alnum_prop": 0.5875161236049576,
"repo_name": "ekg/vg",
"id": "312137731451c60f786bd3a92f9e4f21a792aa2f",
"size": "35712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/giraffe-facts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1011958"
},
{
"name": "Makefile",
"bytes": "16607"
},
{
"name": "Protocol Buffer",
"bytes": "5795"
},
{
"name": "Python",
"bytes": "1227"
},
{
"name": "Shell",
"bytes": "51806"
}
],
"symlink_target": ""
} |
import pytest
from polyaxon import settings
from polyaxon.proxies.schemas.gateway import get_base_config
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.proxies_mark
class TestGatewayBase(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_gateway_base_config(self):
expected = r"""
listen 8000;
error_log /polyaxon/logs/error.log warn;
gzip on;
gzip_disable "msie6";
gzip_types *;
gzip_proxied any;
charset utf-8;
client_max_body_size 0;
client_body_buffer_size 50m;
client_body_in_file_only clean;
sendfile on;
send_timeout 650;
keepalive_timeout 650;
uwsgi_read_timeout 650;
uwsgi_send_timeout 650;
client_header_timeout 650;
proxy_read_timeout 650;
keepalive_requests 10000;
error_page 500 502 503 504 /static/errors/50x.html;
error_page 401 403 /static/errors/permission.html;
error_page 404 /static/errors/404.html;
location = /robots.txt {
rewrite ^ /static/robots.txt;
}
location = /favicon.ico {
rewrite ^ /static/images/favicon.ico;
}
location /healthz/ {
access_log off;
return 200 "healthy";
}
location /streams/ {
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location /k8s/ {
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location = / {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = False
assert get_base_config() == expected
def test_gateway_base_config_with_auth_and_dns(self):
expected = r"""
listen 8000;
error_log /polyaxon/logs/error.log warn;
gzip on;
gzip_disable "msie6";
gzip_types *;
gzip_proxied any;
charset utf-8;
client_max_body_size 0;
client_body_buffer_size 50m;
client_body_in_file_only clean;
sendfile on;
send_timeout 650;
keepalive_timeout 650;
uwsgi_read_timeout 650;
uwsgi_send_timeout 650;
client_header_timeout 650;
proxy_read_timeout 650;
keepalive_requests 10000;
error_page 500 502 503 504 /static/errors/50x.html;
error_page 401 403 /static/errors/permission.html;
error_page 404 /static/errors/404.html;
location = /robots.txt {
rewrite ^ /static/robots.txt;
}
location = /favicon.ico {
rewrite ^ /static/images/favicon.ico;
}
location /healthz/ {
access_log off;
return 200 "healthy";
}
location = /auth/v1/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
proxy_set_header Host $http_host;
internal;
}
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location /k8s/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/rewrite-external/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4-ext.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_hide_header X-Frame-Options;
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location = / {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /api/v1/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /ui/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /sso/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /static/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
location /_admin/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.ui_admin_enabled = True
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.auth_use_resolver = True
settings.PROXIES_CONFIG.api_use_resolver = True
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_base_config() == expected
| {
"content_hash": "d583e1c8119c65c1a16a2399018deb23",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 102,
"avg_line_length": 28.84095860566449,
"alnum_prop": 0.6555370901948935,
"repo_name": "polyaxon/polyaxon",
"id": "61955ab8b8483898f4ec31c426af68d8f6be5a9c",
"size": "13842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/tests/test_proxies/test_gateway/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
import logging
from settings import *
from time import sleep
from request.queue import StopRequestQueue
from app.component.modifier.blinker import blinker_pool
from board import States
logger = logging.getLogger(__name__)
class Program:
board = None
scheduler = None
service = None
queue = None
def __init__(self, board, scheduler):
self.board = board
self.scheduler = scheduler
def set_service(self, service):
self.service = service
def enqueue(self, factory):
self.queue = StopRequestQueue(self.service)
self.queue.add_request(factory.get_approaching(INBOUND_STOP_1, INBOUND_STOP_1_PIN, INBOUND_STOP_1_PIN_CLUSTER))
self.queue.add_request(factory.get_proximity(INBOUND_STOP_2, INBOUND_STOP_2_PIN))
self.queue.add_request(factory.get_proximity(INBOUND_STOP_3, INBOUND_STOP_3_PIN))
self.queue.add_request(factory.get_approaching(OUTBOUND_STOP_1, OUTBOUND_STOP_1_PIN, OUTBOUND_STOP_1_PIN_CLUSTER))
self.queue.add_request(factory.get_proximity(OUTBOUND_STOP_2, OUTBOUND_STOP_2_PIN))
self.queue.add_request(factory.get_proximity(OUTBOUND_STOP_3, OUTBOUND_STOP_3_PIN))
def start(self):
self.board.run_startup_seq()
self.board.reset()
while self.scheduler.check():
if self.service.resource_available():
logger.info('Service available.')
if self.board.is_in_error_state():
self.board.reset()
self.queue.start()
else:
logger.info('Service unavailable.')
# TODO: Have a limit to unavailability response before exiting?
blinker_pool.stop_all()
self.board.run_resource_error()
sleep(60)
self.stop()
# exit.
# allow cron job to restart process at 6am to do its job :)
def stop(self):
blinker_pool.drain()
if not self.queue == None:
self.queue.stop()
self.board.run_shutdown_seq()
| {
"content_hash": "7da3f61c52a29a96e121ff13592ae9f9",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 118,
"avg_line_length": 32.70175438596491,
"alnum_prop": 0.6925965665236051,
"repo_name": "infrared5/massroute-pi",
"id": "e6007ad9aa373decee9518881416beb7ce718412",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/program.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31809"
},
{
"name": "Shell",
"bytes": "892"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_cluster_snapshots_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_resource_group_request,
build_list_request,
build_update_tags_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClusterSnapshotsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_08_03_preview.aio.ContainerServiceClient`'s
:attr:`managed_cluster_snapshots` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.ManagedClusterSnapshot"]:
"""Gets a list of managed cluster snapshots in the specified subscription.
Gets a list of managed cluster snapshots in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshot or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterSnapshotListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedclustersnapshots"} # type: ignore
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.ManagedClusterSnapshot"]:
"""Lists managed cluster snapshots in the specified subscription and resource group.
Lists managed cluster snapshots in the specified subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterSnapshot or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterSnapshotListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedClusterSnapshotListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots"} # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, resource_name: str, **kwargs: Any) -> _models.ManagedClusterSnapshot:
"""Gets a managed cluster snapshot.
Gets a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterSnapshot]
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.ManagedClusterSnapshot,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: Union[_models.ManagedClusterSnapshot, IO],
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Creates or updates a managed cluster snapshot.
Creates or updates a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: The managed cluster snapshot to create or update. Is either a model type or
a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterSnapshot]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedClusterSnapshot")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"} # type: ignore
@overload
async def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: _models.TagsObject,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Required.
:type parameters: ~azure.mgmt.containerservice.v2022_08_03_preview.models.TagsObject
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update_tags(
self, resource_group_name: str, resource_name: str, parameters: Union[_models.TagsObject, IO], **kwargs: Any
) -> _models.ManagedClusterSnapshot:
"""Updates tags on a managed cluster snapshot.
Updates tags on a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param parameters: Parameters supplied to the Update managed cluster snapshot Tags operation.
Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.containerservice.v2022_08_03_preview.models.TagsObject or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterSnapshot or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_08_03_preview.models.ManagedClusterSnapshot
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedClusterSnapshot]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TagsObject")
request = build_update_tags_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update_tags.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedClusterSnapshot", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> None:
"""Deletes a managed cluster snapshot.
Deletes a managed cluster snapshot.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-08-03-preview")
) # type: Literal["2022-08-03-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedclustersnapshots/{resourceName}"} # type: ignore
| {
"content_hash": "64b8d624d6d6b78c6c66d4a8e67cb849",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 201,
"avg_line_length": 44.17530487804878,
"alnum_prop": 0.6409814003243728,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d1a8ca104afea8cf07db7da6c1c7229a0c805e97",
"size": "29479",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_08_03_preview/aio/operations/_managed_cluster_snapshots_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
'''Wrapper for pulse
Generated with:
tools/genwrappers.py pulseaudio
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('pulse')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
# /usr/include/pulse/version.h:40
pa_get_library_version = _lib.pa_get_library_version
pa_get_library_version.restype = c_char_p
pa_get_library_version.argtypes = []
PA_API_VERSION = 12 # /usr/include/pulse/version.h:46
PA_PROTOCOL_VERSION = 30 # /usr/include/pulse/version.h:50
PA_MAJOR = 6 # /usr/include/pulse/version.h:53
PA_MINOR = 0 # /usr/include/pulse/version.h:56
PA_MICRO = 0 # /usr/include/pulse/version.h:59
PA_CHANNELS_MAX = 32 # /usr/include/pulse/sample.h:128
PA_RATE_MAX = 192000 # /usr/include/pulse/sample.h:131
enum_pa_sample_format = c_int
PA_SAMPLE_U8 = 0
PA_SAMPLE_ALAW = 1
PA_SAMPLE_ULAW = 2
PA_SAMPLE_S16LE = 3
PA_SAMPLE_S16BE = 4
PA_SAMPLE_FLOAT32LE = 5
PA_SAMPLE_FLOAT32BE = 6
PA_SAMPLE_S32LE = 7
PA_SAMPLE_S32BE = 8
PA_SAMPLE_S24LE = 9
PA_SAMPLE_S24BE = 10
PA_SAMPLE_S24_32LE = 11
PA_SAMPLE_S24_32BE = 12
PA_SAMPLE_MAX = 13
PA_SAMPLE_INVALID = -1
pa_sample_format_t = enum_pa_sample_format # /usr/include/pulse/sample.h:179
class struct_pa_sample_spec(Structure):
__slots__ = [
'format',
'rate',
'channels',
]
struct_pa_sample_spec._fields_ = [
('format', pa_sample_format_t),
('rate', c_uint32),
('channels', c_uint8),
]
pa_sample_spec = struct_pa_sample_spec # /usr/include/pulse/sample.h:257
pa_usec_t = c_uint64 # /usr/include/pulse/sample.h:260
# /usr/include/pulse/sample.h:263
pa_bytes_per_second = _lib.pa_bytes_per_second
pa_bytes_per_second.restype = c_size_t
pa_bytes_per_second.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:266
pa_frame_size = _lib.pa_frame_size
pa_frame_size.restype = c_size_t
pa_frame_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:269
pa_sample_size = _lib.pa_sample_size
pa_sample_size.restype = c_size_t
pa_sample_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:273
pa_sample_size_of_format = _lib.pa_sample_size_of_format
pa_sample_size_of_format.restype = c_size_t
pa_sample_size_of_format.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:278
pa_bytes_to_usec = _lib.pa_bytes_to_usec
pa_bytes_to_usec.restype = pa_usec_t
pa_bytes_to_usec.argtypes = [c_uint64, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:283
pa_usec_to_bytes = _lib.pa_usec_to_bytes
pa_usec_to_bytes.restype = c_size_t
pa_usec_to_bytes.argtypes = [pa_usec_t, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:288
pa_sample_spec_init = _lib.pa_sample_spec_init
pa_sample_spec_init.restype = POINTER(pa_sample_spec)
pa_sample_spec_init.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:291
pa_sample_format_valid = _lib.pa_sample_format_valid
pa_sample_format_valid.restype = c_int
pa_sample_format_valid.argtypes = [c_uint]
# /usr/include/pulse/sample.h:294
pa_sample_rate_valid = _lib.pa_sample_rate_valid
pa_sample_rate_valid.restype = c_int
pa_sample_rate_valid.argtypes = [c_uint32]
# /usr/include/pulse/sample.h:298
pa_channels_valid = _lib.pa_channels_valid
pa_channels_valid.restype = c_int
pa_channels_valid.argtypes = [c_uint8]
# /usr/include/pulse/sample.h:301
pa_sample_spec_valid = _lib.pa_sample_spec_valid
pa_sample_spec_valid.restype = c_int
pa_sample_spec_valid.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:304
pa_sample_spec_equal = _lib.pa_sample_spec_equal
pa_sample_spec_equal.restype = c_int
pa_sample_spec_equal.argtypes = [POINTER(pa_sample_spec), POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:307
pa_sample_format_to_string = _lib.pa_sample_format_to_string
pa_sample_format_to_string.restype = c_char_p
pa_sample_format_to_string.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:310
pa_parse_sample_format = _lib.pa_parse_sample_format
pa_parse_sample_format.restype = pa_sample_format_t
pa_parse_sample_format.argtypes = [c_char_p]
PA_SAMPLE_SPEC_SNPRINT_MAX = 32 # /usr/include/pulse/sample.h:317
# /usr/include/pulse/sample.h:320
pa_sample_spec_snprint = _lib.pa_sample_spec_snprint
pa_sample_spec_snprint.restype = c_char_p
pa_sample_spec_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_sample_spec)]
PA_BYTES_SNPRINT_MAX = 11 # /usr/include/pulse/sample.h:327
# /usr/include/pulse/sample.h:330
pa_bytes_snprint = _lib.pa_bytes_snprint
pa_bytes_snprint.restype = c_char_p
pa_bytes_snprint.argtypes = [c_char_p, c_size_t, c_uint]
# /usr/include/pulse/sample.h:334
pa_sample_format_is_le = _lib.pa_sample_format_is_le
pa_sample_format_is_le.restype = c_int
pa_sample_format_is_le.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:338
pa_sample_format_is_be = _lib.pa_sample_format_is_be
pa_sample_format_is_be.restype = c_int
pa_sample_format_is_be.argtypes = [pa_sample_format_t]
enum_pa_context_state = c_int
PA_CONTEXT_UNCONNECTED = 0
PA_CONTEXT_CONNECTING = 1
PA_CONTEXT_AUTHORIZING = 2
PA_CONTEXT_SETTING_NAME = 3
PA_CONTEXT_READY = 4
PA_CONTEXT_FAILED = 5
PA_CONTEXT_TERMINATED = 6
pa_context_state_t = enum_pa_context_state # /usr/include/pulse/def.h:45
enum_pa_stream_state = c_int
PA_STREAM_UNCONNECTED = 0
PA_STREAM_CREATING = 1
PA_STREAM_READY = 2
PA_STREAM_FAILED = 3
PA_STREAM_TERMINATED = 4
pa_stream_state_t = enum_pa_stream_state # /usr/include/pulse/def.h:74
enum_pa_operation_state = c_int
PA_OPERATION_RUNNING = 0
PA_OPERATION_DONE = 1
PA_OPERATION_CANCELLED = 2
pa_operation_state_t = enum_pa_operation_state # /usr/include/pulse/def.h:102
enum_pa_context_flags = c_int
PA_CONTEXT_NOFLAGS = 0
PA_CONTEXT_NOAUTOSPAWN = 1
PA_CONTEXT_NOFAIL = 2
pa_context_flags_t = enum_pa_context_flags # /usr/include/pulse/def.h:122
enum_pa_direction = c_int
PA_DIRECTION_OUTPUT = 1
PA_DIRECTION_INPUT = 2
pa_direction_t = enum_pa_direction # /usr/include/pulse/def.h:137
enum_pa_device_type = c_int
PA_DEVICE_TYPE_SINK = 0
PA_DEVICE_TYPE_SOURCE = 1
pa_device_type_t = enum_pa_device_type # /usr/include/pulse/def.h:148
enum_pa_stream_direction = c_int
PA_STREAM_NODIRECTION = 0
PA_STREAM_PLAYBACK = 1
PA_STREAM_RECORD = 2
PA_STREAM_UPLOAD = 3
pa_stream_direction_t = enum_pa_stream_direction # /usr/include/pulse/def.h:161
enum_pa_stream_flags = c_int
PA_STREAM_NOFLAGS = 0
PA_STREAM_START_CORKED = 1
PA_STREAM_INTERPOLATE_TIMING = 2
PA_STREAM_NOT_MONOTONIC = 4
PA_STREAM_AUTO_TIMING_UPDATE = 8
PA_STREAM_NO_REMAP_CHANNELS = 16
PA_STREAM_NO_REMIX_CHANNELS = 32
PA_STREAM_FIX_FORMAT = 64
PA_STREAM_FIX_RATE = 128
PA_STREAM_FIX_CHANNELS = 256
PA_STREAM_DONT_MOVE = 512
PA_STREAM_VARIABLE_RATE = 1024
PA_STREAM_PEAK_DETECT = 2048
PA_STREAM_START_MUTED = 4096
PA_STREAM_ADJUST_LATENCY = 8192
PA_STREAM_EARLY_REQUESTS = 16384
PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND = 32768
PA_STREAM_START_UNMUTED = 65536
PA_STREAM_FAIL_ON_SUSPEND = 131072
PA_STREAM_RELATIVE_VOLUME = 262144
PA_STREAM_PASSTHROUGH = 524288
pa_stream_flags_t = enum_pa_stream_flags # /usr/include/pulse/def.h:355
class struct_pa_buffer_attr(Structure):
__slots__ = [
'maxlength',
'tlength',
'prebuf',
'minreq',
'fragsize',
]
struct_pa_buffer_attr._fields_ = [
('maxlength', c_uint32),
('tlength', c_uint32),
('prebuf', c_uint32),
('minreq', c_uint32),
('fragsize', c_uint32),
]
pa_buffer_attr = struct_pa_buffer_attr # /usr/include/pulse/def.h:452
enum_pa_error_code = c_int
PA_OK = 0
PA_ERR_ACCESS = 1
PA_ERR_COMMAND = 2
PA_ERR_INVALID = 3
PA_ERR_EXIST = 4
PA_ERR_NOENTITY = 5
PA_ERR_CONNECTIONREFUSED = 6
PA_ERR_PROTOCOL = 7
PA_ERR_TIMEOUT = 8
PA_ERR_AUTHKEY = 9
PA_ERR_INTERNAL = 10
PA_ERR_CONNECTIONTERMINATED = 11
PA_ERR_KILLED = 12
PA_ERR_INVALIDSERVER = 13
PA_ERR_MODINITFAILED = 14
PA_ERR_BADSTATE = 15
PA_ERR_NODATA = 16
PA_ERR_VERSION = 17
PA_ERR_TOOLARGE = 18
PA_ERR_NOTSUPPORTED = 19
PA_ERR_UNKNOWN = 20
PA_ERR_NOEXTENSION = 21
PA_ERR_OBSOLETE = 22
PA_ERR_NOTIMPLEMENTED = 23
PA_ERR_FORKED = 24
PA_ERR_IO = 25
PA_ERR_BUSY = 26
PA_ERR_MAX = 27
pa_error_code_t = enum_pa_error_code # /usr/include/pulse/def.h:484
enum_pa_subscription_mask = c_int
PA_SUBSCRIPTION_MASK_NULL = 0
PA_SUBSCRIPTION_MASK_SINK = 1
PA_SUBSCRIPTION_MASK_SOURCE = 2
PA_SUBSCRIPTION_MASK_SINK_INPUT = 4
PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT = 8
PA_SUBSCRIPTION_MASK_MODULE = 16
PA_SUBSCRIPTION_MASK_CLIENT = 32
PA_SUBSCRIPTION_MASK_SAMPLE_CACHE = 64
PA_SUBSCRIPTION_MASK_SERVER = 128
PA_SUBSCRIPTION_MASK_AUTOLOAD = 256
PA_SUBSCRIPTION_MASK_CARD = 512
PA_SUBSCRIPTION_MASK_ALL = 767
pa_subscription_mask_t = enum_pa_subscription_mask # /usr/include/pulse/def.h:554
enum_pa_subscription_event_type = c_int
PA_SUBSCRIPTION_EVENT_SINK = 0
PA_SUBSCRIPTION_EVENT_SOURCE = 1
PA_SUBSCRIPTION_EVENT_SINK_INPUT = 2
PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT = 3
PA_SUBSCRIPTION_EVENT_MODULE = 4
PA_SUBSCRIPTION_EVENT_CLIENT = 5
PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE = 6
PA_SUBSCRIPTION_EVENT_SERVER = 7
PA_SUBSCRIPTION_EVENT_AUTOLOAD = 8
PA_SUBSCRIPTION_EVENT_CARD = 9
PA_SUBSCRIPTION_EVENT_FACILITY_MASK = 15
PA_SUBSCRIPTION_EVENT_NEW = 0
PA_SUBSCRIPTION_EVENT_CHANGE = 16
PA_SUBSCRIPTION_EVENT_REMOVE = 32
PA_SUBSCRIPTION_EVENT_TYPE_MASK = 48
pa_subscription_event_type_t = enum_pa_subscription_event_type # /usr/include/pulse/def.h:605
class struct_pa_timing_info(Structure):
__slots__ = [
'timestamp',
'synchronized_clocks',
'sink_usec',
'source_usec',
'transport_usec',
'playing',
'write_index_corrupt',
'write_index',
'read_index_corrupt',
'read_index',
'configured_sink_usec',
'configured_source_usec',
'since_underrun',
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
struct_pa_timing_info._fields_ = [
('timestamp', struct_timeval),
('synchronized_clocks', c_int),
('sink_usec', pa_usec_t),
('source_usec', pa_usec_t),
('transport_usec', pa_usec_t),
('playing', c_int),
('write_index_corrupt', c_int),
('write_index', c_int64),
('read_index_corrupt', c_int),
('read_index', c_int64),
('configured_sink_usec', pa_usec_t),
('configured_source_usec', pa_usec_t),
('since_underrun', c_int64),
]
pa_timing_info = struct_pa_timing_info # /usr/include/pulse/def.h:725
class struct_pa_spawn_api(Structure):
__slots__ = [
'prefork',
'postfork',
'atfork',
]
struct_pa_spawn_api._fields_ = [
('prefork', POINTER(CFUNCTYPE(None))),
('postfork', POINTER(CFUNCTYPE(None))),
('atfork', POINTER(CFUNCTYPE(None))),
]
pa_spawn_api = struct_pa_spawn_api # /usr/include/pulse/def.h:749
enum_pa_seek_mode = c_int
PA_SEEK_RELATIVE = 0
PA_SEEK_ABSOLUTE = 1
PA_SEEK_RELATIVE_ON_READ = 2
PA_SEEK_RELATIVE_END = 3
pa_seek_mode_t = enum_pa_seek_mode # /usr/include/pulse/def.h:764
enum_pa_sink_flags = c_int
PA_SINK_NOFLAGS = 0
PA_SINK_HW_VOLUME_CTRL = 1
PA_SINK_LATENCY = 2
PA_SINK_HARDWARE = 4
PA_SINK_NETWORK = 8
PA_SINK_HW_MUTE_CTRL = 16
PA_SINK_DECIBEL_VOLUME = 32
PA_SINK_FLAT_VOLUME = 64
PA_SINK_DYNAMIC_LATENCY = 128
PA_SINK_SET_FORMATS = 256
pa_sink_flags_t = enum_pa_sink_flags # /usr/include/pulse/def.h:829
enum_pa_sink_state = c_int
PA_SINK_INVALID_STATE = -1
PA_SINK_RUNNING = 0
PA_SINK_IDLE = 1
PA_SINK_SUSPENDED = 2
PA_SINK_INIT = -2
PA_SINK_UNLINKED = -3
pa_sink_state_t = enum_pa_sink_state # /usr/include/pulse/def.h:875
enum_pa_source_flags = c_int
PA_SOURCE_NOFLAGS = 0
PA_SOURCE_HW_VOLUME_CTRL = 1
PA_SOURCE_LATENCY = 2
PA_SOURCE_HARDWARE = 4
PA_SOURCE_NETWORK = 8
PA_SOURCE_HW_MUTE_CTRL = 16
PA_SOURCE_DECIBEL_VOLUME = 32
PA_SOURCE_DYNAMIC_LATENCY = 64
PA_SOURCE_FLAT_VOLUME = 128
pa_source_flags_t = enum_pa_source_flags # /usr/include/pulse/def.h:946
enum_pa_source_state = c_int
PA_SOURCE_INVALID_STATE = -1
PA_SOURCE_RUNNING = 0
PA_SOURCE_IDLE = 1
PA_SOURCE_SUSPENDED = 2
PA_SOURCE_INIT = -2
PA_SOURCE_UNLINKED = -3
pa_source_state_t = enum_pa_source_state # /usr/include/pulse/def.h:991
pa_free_cb_t = CFUNCTYPE(None, POINTER(None)) # /usr/include/pulse/def.h:1014
enum_pa_port_available = c_int
PA_PORT_AVAILABLE_UNKNOWN = 0
PA_PORT_AVAILABLE_NO = 1
PA_PORT_AVAILABLE_YES = 2
pa_port_available_t = enum_pa_port_available # /usr/include/pulse/def.h:1040
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
pa_mainloop_api = struct_pa_mainloop_api # /usr/include/pulse/mainloop-api.h:47
enum_pa_io_event_flags = c_int
PA_IO_EVENT_NULL = 0
PA_IO_EVENT_INPUT = 1
PA_IO_EVENT_OUTPUT = 2
PA_IO_EVENT_HANGUP = 4
PA_IO_EVENT_ERROR = 8
pa_io_event_flags_t = enum_pa_io_event_flags # /usr/include/pulse/mainloop-api.h:56
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
pa_io_event = struct_pa_io_event # /usr/include/pulse/mainloop-api.h:59
pa_io_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_io_event), c_int, pa_io_event_flags_t, POINTER(None)) # /usr/include/pulse/mainloop-api.h:61
pa_io_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_io_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:63
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event = struct_pa_time_event # /usr/include/pulse/mainloop-api.h:66
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_time_event), POINTER(struct_timeval), POINTER(None)) # /usr/include/pulse/mainloop-api.h:68
pa_time_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_time_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:70
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
pa_defer_event = struct_pa_defer_event # /usr/include/pulse/mainloop-api.h:73
pa_defer_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_defer_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:75
pa_defer_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_defer_event), POINTER(None)) # /usr/include/pulse/mainloop-api.h:77
# /usr/include/pulse/mainloop-api.h:120
pa_mainloop_api_once = _lib.pa_mainloop_api_once
pa_mainloop_api_once.restype = None
pa_mainloop_api_once.argtypes = [POINTER(pa_mainloop_api), CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(None)), POINTER(None)]
enum_pa_channel_position = c_int
PA_CHANNEL_POSITION_INVALID = -1
PA_CHANNEL_POSITION_MONO = 0
PA_CHANNEL_POSITION_FRONT_LEFT = 1
PA_CHANNEL_POSITION_FRONT_RIGHT = 2
PA_CHANNEL_POSITION_FRONT_CENTER = 3
PA_CHANNEL_POSITION_LEFT = 0
PA_CHANNEL_POSITION_RIGHT = 0
PA_CHANNEL_POSITION_CENTER = 0
PA_CHANNEL_POSITION_REAR_CENTER = 1
PA_CHANNEL_POSITION_REAR_LEFT = 2
PA_CHANNEL_POSITION_REAR_RIGHT = 3
PA_CHANNEL_POSITION_LFE = 4
PA_CHANNEL_POSITION_SUBWOOFER = 0
PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER = 1
PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER = 2
PA_CHANNEL_POSITION_SIDE_LEFT = 3
PA_CHANNEL_POSITION_SIDE_RIGHT = 4
PA_CHANNEL_POSITION_AUX0 = 5
PA_CHANNEL_POSITION_AUX1 = 6
PA_CHANNEL_POSITION_AUX2 = 7
PA_CHANNEL_POSITION_AUX3 = 8
PA_CHANNEL_POSITION_AUX4 = 9
PA_CHANNEL_POSITION_AUX5 = 10
PA_CHANNEL_POSITION_AUX6 = 11
PA_CHANNEL_POSITION_AUX7 = 12
PA_CHANNEL_POSITION_AUX8 = 13
PA_CHANNEL_POSITION_AUX9 = 14
PA_CHANNEL_POSITION_AUX10 = 15
PA_CHANNEL_POSITION_AUX11 = 16
PA_CHANNEL_POSITION_AUX12 = 17
PA_CHANNEL_POSITION_AUX13 = 18
PA_CHANNEL_POSITION_AUX14 = 19
PA_CHANNEL_POSITION_AUX15 = 20
PA_CHANNEL_POSITION_AUX16 = 21
PA_CHANNEL_POSITION_AUX17 = 22
PA_CHANNEL_POSITION_AUX18 = 23
PA_CHANNEL_POSITION_AUX19 = 24
PA_CHANNEL_POSITION_AUX20 = 25
PA_CHANNEL_POSITION_AUX21 = 26
PA_CHANNEL_POSITION_AUX22 = 27
PA_CHANNEL_POSITION_AUX23 = 28
PA_CHANNEL_POSITION_AUX24 = 29
PA_CHANNEL_POSITION_AUX25 = 30
PA_CHANNEL_POSITION_AUX26 = 31
PA_CHANNEL_POSITION_AUX27 = 32
PA_CHANNEL_POSITION_AUX28 = 33
PA_CHANNEL_POSITION_AUX29 = 34
PA_CHANNEL_POSITION_AUX30 = 35
PA_CHANNEL_POSITION_AUX31 = 36
PA_CHANNEL_POSITION_TOP_CENTER = 37
PA_CHANNEL_POSITION_TOP_FRONT_LEFT = 38
PA_CHANNEL_POSITION_TOP_FRONT_RIGHT = 39
PA_CHANNEL_POSITION_TOP_FRONT_CENTER = 40
PA_CHANNEL_POSITION_TOP_REAR_LEFT = 41
PA_CHANNEL_POSITION_TOP_REAR_RIGHT = 42
PA_CHANNEL_POSITION_TOP_REAR_CENTER = 43
PA_CHANNEL_POSITION_MAX = 44
pa_channel_position_t = enum_pa_channel_position # /usr/include/pulse/channelmap.h:147
pa_channel_position_mask_t = c_uint64 # /usr/include/pulse/channelmap.h:210
enum_pa_channel_map_def = c_int
PA_CHANNEL_MAP_AIFF = 0
PA_CHANNEL_MAP_ALSA = 1
PA_CHANNEL_MAP_AUX = 2
PA_CHANNEL_MAP_WAVEEX = 3
PA_CHANNEL_MAP_OSS = 4
PA_CHANNEL_MAP_DEF_MAX = 5
PA_CHANNEL_MAP_DEFAULT = 0
pa_channel_map_def_t = enum_pa_channel_map_def # /usr/include/pulse/channelmap.h:247
class struct_pa_channel_map(Structure):
__slots__ = [
'channels',
'map',
]
struct_pa_channel_map._fields_ = [
('channels', c_uint8),
('map', pa_channel_position_t * 32),
]
pa_channel_map = struct_pa_channel_map # /usr/include/pulse/channelmap.h:268
# /usr/include/pulse/channelmap.h:273
pa_channel_map_init = _lib.pa_channel_map_init
pa_channel_map_init.restype = POINTER(pa_channel_map)
pa_channel_map_init.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:276
pa_channel_map_init_mono = _lib.pa_channel_map_init_mono
pa_channel_map_init_mono.restype = POINTER(pa_channel_map)
pa_channel_map_init_mono.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:279
pa_channel_map_init_stereo = _lib.pa_channel_map_init_stereo
pa_channel_map_init_stereo.restype = POINTER(pa_channel_map)
pa_channel_map_init_stereo.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:285
pa_channel_map_init_auto = _lib.pa_channel_map_init_auto
pa_channel_map_init_auto.restype = POINTER(pa_channel_map)
pa_channel_map_init_auto.argtypes = [POINTER(pa_channel_map), c_uint, pa_channel_map_def_t]
# /usr/include/pulse/channelmap.h:291
pa_channel_map_init_extend = _lib.pa_channel_map_init_extend
pa_channel_map_init_extend.restype = POINTER(pa_channel_map)
pa_channel_map_init_extend.argtypes = [POINTER(pa_channel_map), c_uint, pa_channel_map_def_t]
# /usr/include/pulse/channelmap.h:294
pa_channel_position_to_string = _lib.pa_channel_position_to_string
pa_channel_position_to_string.restype = c_char_p
pa_channel_position_to_string.argtypes = [pa_channel_position_t]
# /usr/include/pulse/channelmap.h:297
pa_channel_position_from_string = _lib.pa_channel_position_from_string
pa_channel_position_from_string.restype = pa_channel_position_t
pa_channel_position_from_string.argtypes = [c_char_p]
# /usr/include/pulse/channelmap.h:300
pa_channel_position_to_pretty_string = _lib.pa_channel_position_to_pretty_string
pa_channel_position_to_pretty_string.restype = c_char_p
pa_channel_position_to_pretty_string.argtypes = [pa_channel_position_t]
PA_CHANNEL_MAP_SNPRINT_MAX = 336 # /usr/include/pulse/channelmap.h:307
# /usr/include/pulse/channelmap.h:310
pa_channel_map_snprint = _lib.pa_channel_map_snprint
pa_channel_map_snprint.restype = c_char_p
pa_channel_map_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:316
pa_channel_map_parse = _lib.pa_channel_map_parse
pa_channel_map_parse.restype = POINTER(pa_channel_map)
pa_channel_map_parse.argtypes = [POINTER(pa_channel_map), c_char_p]
# /usr/include/pulse/channelmap.h:319
pa_channel_map_equal = _lib.pa_channel_map_equal
pa_channel_map_equal.restype = c_int
pa_channel_map_equal.argtypes = [POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:322
pa_channel_map_valid = _lib.pa_channel_map_valid
pa_channel_map_valid.restype = c_int
pa_channel_map_valid.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:326
pa_channel_map_compatible = _lib.pa_channel_map_compatible
pa_channel_map_compatible.restype = c_int
pa_channel_map_compatible.argtypes = [POINTER(pa_channel_map), POINTER(pa_sample_spec)]
# /usr/include/pulse/channelmap.h:329
pa_channel_map_superset = _lib.pa_channel_map_superset
pa_channel_map_superset.restype = c_int
pa_channel_map_superset.argtypes = [POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:334
pa_channel_map_can_balance = _lib.pa_channel_map_can_balance
pa_channel_map_can_balance.restype = c_int
pa_channel_map_can_balance.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:339
pa_channel_map_can_fade = _lib.pa_channel_map_can_fade
pa_channel_map_can_fade.restype = c_int
pa_channel_map_can_fade.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:345
pa_channel_map_to_name = _lib.pa_channel_map_to_name
pa_channel_map_to_name.restype = c_char_p
pa_channel_map_to_name.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:350
pa_channel_map_to_pretty_name = _lib.pa_channel_map_to_pretty_name
pa_channel_map_to_pretty_name.restype = c_char_p
pa_channel_map_to_pretty_name.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:354
pa_channel_map_has_position = _lib.pa_channel_map_has_position
pa_channel_map_has_position.restype = c_int
pa_channel_map_has_position.argtypes = [POINTER(pa_channel_map), pa_channel_position_t]
# /usr/include/pulse/channelmap.h:357
pa_channel_map_mask = _lib.pa_channel_map_mask
pa_channel_map_mask.restype = pa_channel_position_mask_t
pa_channel_map_mask.argtypes = [POINTER(pa_channel_map)]
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
pa_operation = struct_pa_operation # /usr/include/pulse/operation.h:33
pa_operation_notify_cb_t = CFUNCTYPE(None, POINTER(pa_operation), POINTER(None)) # /usr/include/pulse/operation.h:36
# /usr/include/pulse/operation.h:39
pa_operation_ref = _lib.pa_operation_ref
pa_operation_ref.restype = POINTER(pa_operation)
pa_operation_ref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:42
pa_operation_unref = _lib.pa_operation_unref
pa_operation_unref.restype = None
pa_operation_unref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:49
pa_operation_cancel = _lib.pa_operation_cancel
pa_operation_cancel.restype = None
pa_operation_cancel.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:52
pa_operation_get_state = _lib.pa_operation_get_state
pa_operation_get_state.restype = pa_operation_state_t
pa_operation_get_state.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:60
pa_operation_set_state_callback = _lib.pa_operation_set_state_callback
pa_operation_set_state_callback.restype = None
pa_operation_set_state_callback.argtypes = [POINTER(pa_operation), pa_operation_notify_cb_t, POINTER(None)]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
pa_context = struct_pa_context # /usr/include/pulse/context.h:154
pa_context_notify_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(None)) # /usr/include/pulse/context.h:157
pa_context_success_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_int, POINTER(None)) # /usr/include/pulse/context.h:160
class struct_pa_proplist(Structure):
__slots__ = [
]
struct_pa_proplist._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_proplist(Structure):
__slots__ = [
]
struct_pa_proplist._fields_ = [
('_opaque_struct', c_int)
]
pa_proplist = struct_pa_proplist # /usr/include/pulse/proplist.h:272
pa_context_event_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_char_p, POINTER(pa_proplist), POINTER(None)) # /usr/include/pulse/context.h:167
# /usr/include/pulse/context.h:172
pa_context_new = _lib.pa_context_new
pa_context_new.restype = POINTER(pa_context)
pa_context_new.argtypes = [POINTER(pa_mainloop_api), c_char_p]
# /usr/include/pulse/context.h:177
pa_context_new_with_proplist = _lib.pa_context_new_with_proplist
pa_context_new_with_proplist.restype = POINTER(pa_context)
pa_context_new_with_proplist.argtypes = [POINTER(pa_mainloop_api), c_char_p, POINTER(pa_proplist)]
# /usr/include/pulse/context.h:180
pa_context_unref = _lib.pa_context_unref
pa_context_unref.restype = None
pa_context_unref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:183
pa_context_ref = _lib.pa_context_ref
pa_context_ref.restype = POINTER(pa_context)
pa_context_ref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:186
pa_context_set_state_callback = _lib.pa_context_set_state_callback
pa_context_set_state_callback.restype = None
pa_context_set_state_callback.argtypes = [POINTER(pa_context), pa_context_notify_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:190
pa_context_set_event_callback = _lib.pa_context_set_event_callback
pa_context_set_event_callback.restype = None
pa_context_set_event_callback.argtypes = [POINTER(pa_context), pa_context_event_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:193
pa_context_errno = _lib.pa_context_errno
pa_context_errno.restype = c_int
pa_context_errno.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:196
pa_context_is_pending = _lib.pa_context_is_pending
pa_context_is_pending.restype = c_int
pa_context_is_pending.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:199
pa_context_get_state = _lib.pa_context_get_state
pa_context_get_state.restype = pa_context_state_t
pa_context_get_state.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:209
pa_context_connect = _lib.pa_context_connect
pa_context_connect.restype = c_int
pa_context_connect.argtypes = [POINTER(pa_context), c_char_p, pa_context_flags_t, POINTER(pa_spawn_api)]
# /usr/include/pulse/context.h:212
pa_context_disconnect = _lib.pa_context_disconnect
pa_context_disconnect.restype = None
pa_context_disconnect.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:215
pa_context_drain = _lib.pa_context_drain
pa_context_drain.restype = POINTER(pa_operation)
pa_context_drain.argtypes = [POINTER(pa_context), pa_context_notify_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:220
pa_context_exit_daemon = _lib.pa_context_exit_daemon
pa_context_exit_daemon.restype = POINTER(pa_operation)
pa_context_exit_daemon.argtypes = [POINTER(pa_context), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:223
pa_context_set_default_sink = _lib.pa_context_set_default_sink
pa_context_set_default_sink.restype = POINTER(pa_operation)
pa_context_set_default_sink.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:226
pa_context_set_default_source = _lib.pa_context_set_default_source
pa_context_set_default_source.restype = POINTER(pa_operation)
pa_context_set_default_source.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:229
pa_context_is_local = _lib.pa_context_is_local
pa_context_is_local.restype = c_int
pa_context_is_local.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:232
pa_context_set_name = _lib.pa_context_set_name
pa_context_set_name.restype = POINTER(pa_operation)
pa_context_set_name.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:235
pa_context_get_server = _lib.pa_context_get_server
pa_context_get_server.restype = c_char_p
pa_context_get_server.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:238
pa_context_get_protocol_version = _lib.pa_context_get_protocol_version
pa_context_get_protocol_version.restype = c_uint32
pa_context_get_protocol_version.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:241
pa_context_get_server_protocol_version = _lib.pa_context_get_server_protocol_version
pa_context_get_server_protocol_version.restype = c_uint32
pa_context_get_server_protocol_version.argtypes = [POINTER(pa_context)]
enum_pa_update_mode = c_int
PA_UPDATE_SET = 0
PA_UPDATE_MERGE = 1
PA_UPDATE_REPLACE = 2
pa_update_mode_t = enum_pa_update_mode # /usr/include/pulse/proplist.h:337
# /usr/include/pulse/context.h:248
pa_context_proplist_update = _lib.pa_context_proplist_update
pa_context_proplist_update.restype = POINTER(pa_operation)
pa_context_proplist_update.argtypes = [POINTER(pa_context), pa_update_mode_t, POINTER(pa_proplist), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:251
pa_context_proplist_remove = _lib.pa_context_proplist_remove
pa_context_proplist_remove.restype = POINTER(pa_operation)
pa_context_proplist_remove.argtypes = [POINTER(pa_context), POINTER(c_char_p), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:256
pa_context_get_index = _lib.pa_context_get_index
pa_context_get_index.restype = c_uint32
pa_context_get_index.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:260
pa_context_rttime_new = _lib.pa_context_rttime_new
pa_context_rttime_new.restype = POINTER(pa_time_event)
pa_context_rttime_new.argtypes = [POINTER(pa_context), pa_usec_t, pa_time_event_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:264
pa_context_rttime_restart = _lib.pa_context_rttime_restart
pa_context_rttime_restart.restype = None
pa_context_rttime_restart.argtypes = [POINTER(pa_context), POINTER(pa_time_event), pa_usec_t]
# /usr/include/pulse/context.h:279
pa_context_get_tile_size = _lib.pa_context_get_tile_size
pa_context_get_tile_size.restype = c_size_t
pa_context_get_tile_size.argtypes = [POINTER(pa_context), POINTER(pa_sample_spec)]
# /usr/include/pulse/context.h:287
pa_context_load_cookie_from_file = _lib.pa_context_load_cookie_from_file
pa_context_load_cookie_from_file.restype = c_int
pa_context_load_cookie_from_file.argtypes = [POINTER(pa_context), c_char_p]
pa_volume_t = c_uint32 # /usr/include/pulse/volume.h:120
class struct_pa_cvolume(Structure):
__slots__ = [
'channels',
'values',
]
struct_pa_cvolume._fields_ = [
('channels', c_uint8),
('values', pa_volume_t * 32),
]
pa_cvolume = struct_pa_cvolume # /usr/include/pulse/volume.h:151
# /usr/include/pulse/volume.h:154
pa_cvolume_equal = _lib.pa_cvolume_equal
pa_cvolume_equal.restype = c_int
pa_cvolume_equal.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:159
pa_cvolume_init = _lib.pa_cvolume_init
pa_cvolume_init.restype = POINTER(pa_cvolume)
pa_cvolume_init.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:168
pa_cvolume_set = _lib.pa_cvolume_set
pa_cvolume_set.restype = POINTER(pa_cvolume)
pa_cvolume_set.argtypes = [POINTER(pa_cvolume), c_uint, pa_volume_t]
PA_CVOLUME_SNPRINT_MAX = 320 # /usr/include/pulse/volume.h:175
# /usr/include/pulse/volume.h:178
pa_cvolume_snprint = _lib.pa_cvolume_snprint
pa_cvolume_snprint.restype = c_char_p
pa_cvolume_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume)]
PA_SW_CVOLUME_SNPRINT_DB_MAX = 448 # /usr/include/pulse/volume.h:185
# /usr/include/pulse/volume.h:188
pa_sw_cvolume_snprint_dB = _lib.pa_sw_cvolume_snprint_dB
pa_sw_cvolume_snprint_dB.restype = c_char_p
pa_sw_cvolume_snprint_dB.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume)]
PA_CVOLUME_SNPRINT_VERBOSE_MAX = 1984 # /usr/include/pulse/volume.h:194
# /usr/include/pulse/volume.h:200
pa_cvolume_snprint_verbose = _lib.pa_cvolume_snprint_verbose
pa_cvolume_snprint_verbose.restype = c_char_p
pa_cvolume_snprint_verbose.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume), POINTER(pa_channel_map), c_int]
PA_VOLUME_SNPRINT_MAX = 10 # /usr/include/pulse/volume.h:207
# /usr/include/pulse/volume.h:210
pa_volume_snprint = _lib.pa_volume_snprint
pa_volume_snprint.restype = c_char_p
pa_volume_snprint.argtypes = [c_char_p, c_size_t, pa_volume_t]
PA_SW_VOLUME_SNPRINT_DB_MAX = 11 # /usr/include/pulse/volume.h:217
# /usr/include/pulse/volume.h:220
pa_sw_volume_snprint_dB = _lib.pa_sw_volume_snprint_dB
pa_sw_volume_snprint_dB.restype = c_char_p
pa_sw_volume_snprint_dB.argtypes = [c_char_p, c_size_t, pa_volume_t]
PA_VOLUME_SNPRINT_VERBOSE_MAX = 35 # /usr/include/pulse/volume.h:226
# /usr/include/pulse/volume.h:231
pa_volume_snprint_verbose = _lib.pa_volume_snprint_verbose
pa_volume_snprint_verbose.restype = c_char_p
pa_volume_snprint_verbose.argtypes = [c_char_p, c_size_t, pa_volume_t, c_int]
# /usr/include/pulse/volume.h:234
pa_cvolume_avg = _lib.pa_cvolume_avg
pa_cvolume_avg.restype = pa_volume_t
pa_cvolume_avg.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:241
pa_cvolume_avg_mask = _lib.pa_cvolume_avg_mask
pa_cvolume_avg_mask.restype = pa_volume_t
pa_cvolume_avg_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:244
pa_cvolume_max = _lib.pa_cvolume_max
pa_cvolume_max.restype = pa_volume_t
pa_cvolume_max.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:251
pa_cvolume_max_mask = _lib.pa_cvolume_max_mask
pa_cvolume_max_mask.restype = pa_volume_t
pa_cvolume_max_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:254
pa_cvolume_min = _lib.pa_cvolume_min
pa_cvolume_min.restype = pa_volume_t
pa_cvolume_min.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:261
pa_cvolume_min_mask = _lib.pa_cvolume_min_mask
pa_cvolume_min_mask.restype = pa_volume_t
pa_cvolume_min_mask.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:264
pa_cvolume_valid = _lib.pa_cvolume_valid
pa_cvolume_valid.restype = c_int
pa_cvolume_valid.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:267
pa_cvolume_channels_equal_to = _lib.pa_cvolume_channels_equal_to
pa_cvolume_channels_equal_to.restype = c_int
pa_cvolume_channels_equal_to.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:278
pa_sw_volume_multiply = _lib.pa_sw_volume_multiply
pa_sw_volume_multiply.restype = pa_volume_t
pa_sw_volume_multiply.argtypes = [pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:283
pa_sw_cvolume_multiply = _lib.pa_sw_cvolume_multiply
pa_sw_cvolume_multiply.restype = POINTER(pa_cvolume)
pa_sw_cvolume_multiply.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:289
pa_sw_cvolume_multiply_scalar = _lib.pa_sw_cvolume_multiply_scalar
pa_sw_cvolume_multiply_scalar.restype = POINTER(pa_cvolume)
pa_sw_cvolume_multiply_scalar.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:295
pa_sw_volume_divide = _lib.pa_sw_volume_divide
pa_sw_volume_divide.restype = pa_volume_t
pa_sw_volume_divide.argtypes = [pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:300
pa_sw_cvolume_divide = _lib.pa_sw_cvolume_divide
pa_sw_cvolume_divide.restype = POINTER(pa_cvolume)
pa_sw_cvolume_divide.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:306
pa_sw_cvolume_divide_scalar = _lib.pa_sw_cvolume_divide_scalar
pa_sw_cvolume_divide_scalar.restype = POINTER(pa_cvolume)
pa_sw_cvolume_divide_scalar.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:309
pa_sw_volume_from_dB = _lib.pa_sw_volume_from_dB
pa_sw_volume_from_dB.restype = pa_volume_t
pa_sw_volume_from_dB.argtypes = [c_double]
# /usr/include/pulse/volume.h:312
pa_sw_volume_to_dB = _lib.pa_sw_volume_to_dB
pa_sw_volume_to_dB.restype = c_double
pa_sw_volume_to_dB.argtypes = [pa_volume_t]
# /usr/include/pulse/volume.h:316
pa_sw_volume_from_linear = _lib.pa_sw_volume_from_linear
pa_sw_volume_from_linear.restype = pa_volume_t
pa_sw_volume_from_linear.argtypes = [c_double]
# /usr/include/pulse/volume.h:319
pa_sw_volume_to_linear = _lib.pa_sw_volume_to_linear
pa_sw_volume_to_linear.restype = c_double
pa_sw_volume_to_linear.argtypes = [pa_volume_t]
# /usr/include/pulse/volume.h:329
pa_cvolume_remap = _lib.pa_cvolume_remap
pa_cvolume_remap.restype = POINTER(pa_cvolume)
pa_cvolume_remap.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:333
pa_cvolume_compatible = _lib.pa_cvolume_compatible
pa_cvolume_compatible.restype = c_int
pa_cvolume_compatible.argtypes = [POINTER(pa_cvolume), POINTER(pa_sample_spec)]
# /usr/include/pulse/volume.h:337
pa_cvolume_compatible_with_channel_map = _lib.pa_cvolume_compatible_with_channel_map
pa_cvolume_compatible_with_channel_map.restype = c_int
pa_cvolume_compatible_with_channel_map.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:344
pa_cvolume_get_balance = _lib.pa_cvolume_get_balance
pa_cvolume_get_balance.restype = c_float
pa_cvolume_get_balance.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:355
pa_cvolume_set_balance = _lib.pa_cvolume_set_balance
pa_cvolume_set_balance.restype = POINTER(pa_cvolume)
pa_cvolume_set_balance.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), c_float]
# /usr/include/pulse/volume.h:362
pa_cvolume_get_fade = _lib.pa_cvolume_get_fade
pa_cvolume_get_fade.restype = c_float
pa_cvolume_get_fade.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map)]
# /usr/include/pulse/volume.h:373
pa_cvolume_set_fade = _lib.pa_cvolume_set_fade
pa_cvolume_set_fade.restype = POINTER(pa_cvolume)
pa_cvolume_set_fade.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), c_float]
# /usr/include/pulse/volume.h:378
pa_cvolume_scale = _lib.pa_cvolume_scale
pa_cvolume_scale.restype = POINTER(pa_cvolume)
pa_cvolume_scale.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:384
pa_cvolume_scale_mask = _lib.pa_cvolume_scale_mask
pa_cvolume_scale_mask.restype = POINTER(pa_cvolume)
pa_cvolume_scale_mask.argtypes = [POINTER(pa_cvolume), pa_volume_t, POINTER(pa_channel_map), pa_channel_position_mask_t]
# /usr/include/pulse/volume.h:391
pa_cvolume_set_position = _lib.pa_cvolume_set_position
pa_cvolume_set_position.restype = POINTER(pa_cvolume)
pa_cvolume_set_position.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_t, pa_volume_t]
# /usr/include/pulse/volume.h:397
pa_cvolume_get_position = _lib.pa_cvolume_get_position
pa_cvolume_get_position.restype = pa_volume_t
pa_cvolume_get_position.argtypes = [POINTER(pa_cvolume), POINTER(pa_channel_map), pa_channel_position_t]
# /usr/include/pulse/volume.h:402
pa_cvolume_merge = _lib.pa_cvolume_merge
pa_cvolume_merge.restype = POINTER(pa_cvolume)
pa_cvolume_merge.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:406
pa_cvolume_inc_clamp = _lib.pa_cvolume_inc_clamp
pa_cvolume_inc_clamp.restype = POINTER(pa_cvolume)
pa_cvolume_inc_clamp.argtypes = [POINTER(pa_cvolume), pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:410
pa_cvolume_inc = _lib.pa_cvolume_inc
pa_cvolume_inc.restype = POINTER(pa_cvolume)
pa_cvolume_inc.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:414
pa_cvolume_dec = _lib.pa_cvolume_dec
pa_cvolume_dec.restype = POINTER(pa_cvolume)
pa_cvolume_dec.argtypes = [POINTER(pa_cvolume), pa_volume_t]
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
pa_stream = struct_pa_stream # /usr/include/pulse/stream.h:335
pa_stream_success_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_int, POINTER(None)) # /usr/include/pulse/stream.h:338
pa_stream_request_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_size_t, POINTER(None)) # /usr/include/pulse/stream.h:341
pa_stream_notify_cb_t = CFUNCTYPE(None, POINTER(pa_stream), POINTER(None)) # /usr/include/pulse/stream.h:344
pa_stream_event_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_char_p, POINTER(pa_proplist), POINTER(None)) # /usr/include/pulse/stream.h:352
# /usr/include/pulse/stream.h:357
pa_stream_new = _lib.pa_stream_new
pa_stream_new.restype = POINTER(pa_stream)
pa_stream_new.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_sample_spec), POINTER(pa_channel_map)]
# /usr/include/pulse/stream.h:366
pa_stream_new_with_proplist = _lib.pa_stream_new_with_proplist
pa_stream_new_with_proplist.restype = POINTER(pa_stream)
pa_stream_new_with_proplist.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_sample_spec), POINTER(pa_channel_map), POINTER(pa_proplist)]
class struct_pa_format_info(Structure):
__slots__ = [
'encoding',
'plist',
]
enum_pa_encoding = c_int
PA_ENCODING_ANY = 0
PA_ENCODING_PCM = 1
PA_ENCODING_AC3_IEC61937 = 2
PA_ENCODING_EAC3_IEC61937 = 3
PA_ENCODING_MPEG_IEC61937 = 4
PA_ENCODING_DTS_IEC61937 = 5
PA_ENCODING_MPEG2_AAC_IEC61937 = 6
PA_ENCODING_MAX = 7
PA_ENCODING_INVALID = -1
pa_encoding_t = enum_pa_encoding # /usr/include/pulse/format.h:64
struct_pa_format_info._fields_ = [
('encoding', pa_encoding_t),
('plist', POINTER(pa_proplist)),
]
pa_format_info = struct_pa_format_info # /usr/include/pulse/format.h:91
# /usr/include/pulse/stream.h:377
pa_stream_new_extended = _lib.pa_stream_new_extended
pa_stream_new_extended.restype = POINTER(pa_stream)
pa_stream_new_extended.argtypes = [POINTER(pa_context), c_char_p, POINTER(POINTER(pa_format_info)), c_uint, POINTER(pa_proplist)]
# /usr/include/pulse/stream.h:385
pa_stream_unref = _lib.pa_stream_unref
pa_stream_unref.restype = None
pa_stream_unref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:388
pa_stream_ref = _lib.pa_stream_ref
pa_stream_ref.restype = POINTER(pa_stream)
pa_stream_ref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:391
pa_stream_get_state = _lib.pa_stream_get_state
pa_stream_get_state.restype = pa_stream_state_t
pa_stream_get_state.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:394
pa_stream_get_context = _lib.pa_stream_get_context
pa_stream_get_context.restype = POINTER(pa_context)
pa_stream_get_context.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:400
pa_stream_get_index = _lib.pa_stream_get_index
pa_stream_get_index.restype = c_uint32
pa_stream_get_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:411
pa_stream_get_device_index = _lib.pa_stream_get_device_index
pa_stream_get_device_index.restype = c_uint32
pa_stream_get_device_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:422
pa_stream_get_device_name = _lib.pa_stream_get_device_name
pa_stream_get_device_name.restype = c_char_p
pa_stream_get_device_name.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:428
pa_stream_is_suspended = _lib.pa_stream_is_suspended
pa_stream_is_suspended.restype = c_int
pa_stream_is_suspended.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:432
pa_stream_is_corked = _lib.pa_stream_is_corked
pa_stream_is_corked.restype = c_int
pa_stream_is_corked.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:458
pa_stream_connect_playback = _lib.pa_stream_connect_playback
pa_stream_connect_playback.restype = c_int
pa_stream_connect_playback.argtypes = [POINTER(pa_stream), c_char_p, POINTER(pa_buffer_attr), pa_stream_flags_t, POINTER(pa_cvolume), POINTER(pa_stream)]
# /usr/include/pulse/stream.h:467
pa_stream_connect_record = _lib.pa_stream_connect_record
pa_stream_connect_record.restype = c_int
pa_stream_connect_record.argtypes = [POINTER(pa_stream), c_char_p, POINTER(pa_buffer_attr), pa_stream_flags_t]
# /usr/include/pulse/stream.h:474
pa_stream_disconnect = _lib.pa_stream_disconnect
pa_stream_disconnect.restype = c_int
pa_stream_disconnect.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:508
pa_stream_begin_write = _lib.pa_stream_begin_write
pa_stream_begin_write.restype = c_int
pa_stream_begin_write.argtypes = [POINTER(pa_stream), POINTER(POINTER(None)), POINTER(c_size_t)]
# /usr/include/pulse/stream.h:522
pa_stream_cancel_write = _lib.pa_stream_cancel_write
pa_stream_cancel_write.restype = c_int
pa_stream_cancel_write.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:547
pa_stream_write = _lib.pa_stream_write
pa_stream_write.restype = c_int
pa_stream_write.argtypes = [POINTER(pa_stream), POINTER(None), c_size_t, pa_free_cb_t, c_int64, pa_seek_mode_t]
# /usr/include/pulse/stream.h:557
pa_stream_write_ext_free = _lib.pa_stream_write_ext_free
pa_stream_write_ext_free.restype = c_int
pa_stream_write_ext_free.argtypes = [POINTER(pa_stream), POINTER(None), c_size_t, pa_free_cb_t, POINTER(None), c_int64, pa_seek_mode_t]
# /usr/include/pulse/stream.h:582
pa_stream_peek = _lib.pa_stream_peek
pa_stream_peek.restype = c_int
pa_stream_peek.argtypes = [POINTER(pa_stream), POINTER(POINTER(None)), POINTER(c_size_t)]
# /usr/include/pulse/stream.h:589
pa_stream_drop = _lib.pa_stream_drop
pa_stream_drop.restype = c_int
pa_stream_drop.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:592
pa_stream_writable_size = _lib.pa_stream_writable_size
pa_stream_writable_size.restype = c_size_t
pa_stream_writable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:595
pa_stream_readable_size = _lib.pa_stream_readable_size
pa_stream_readable_size.restype = c_size_t
pa_stream_readable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:601
pa_stream_drain = _lib.pa_stream_drain
pa_stream_drain.restype = POINTER(pa_operation)
pa_stream_drain.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:607
pa_stream_update_timing_info = _lib.pa_stream_update_timing_info
pa_stream_update_timing_info.restype = POINTER(pa_operation)
pa_stream_update_timing_info.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:610
pa_stream_set_state_callback = _lib.pa_stream_set_state_callback
pa_stream_set_state_callback.restype = None
pa_stream_set_state_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:614
pa_stream_set_write_callback = _lib.pa_stream_set_write_callback
pa_stream_set_write_callback.restype = None
pa_stream_set_write_callback.argtypes = [POINTER(pa_stream), pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:617
pa_stream_set_read_callback = _lib.pa_stream_set_read_callback
pa_stream_set_read_callback.restype = None
pa_stream_set_read_callback.argtypes = [POINTER(pa_stream), pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:620
pa_stream_set_overflow_callback = _lib.pa_stream_set_overflow_callback
pa_stream_set_overflow_callback.restype = None
pa_stream_set_overflow_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:626
pa_stream_get_underflow_index = _lib.pa_stream_get_underflow_index
pa_stream_get_underflow_index.restype = c_int64
pa_stream_get_underflow_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:629
pa_stream_set_underflow_callback = _lib.pa_stream_set_underflow_callback
pa_stream_set_underflow_callback.restype = None
pa_stream_set_underflow_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:636
pa_stream_set_started_callback = _lib.pa_stream_set_started_callback
pa_stream_set_started_callback.restype = None
pa_stream_set_started_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:641
pa_stream_set_latency_update_callback = _lib.pa_stream_set_latency_update_callback
pa_stream_set_latency_update_callback.restype = None
pa_stream_set_latency_update_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:648
pa_stream_set_moved_callback = _lib.pa_stream_set_moved_callback
pa_stream_set_moved_callback.restype = None
pa_stream_set_moved_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:658
pa_stream_set_suspended_callback = _lib.pa_stream_set_suspended_callback
pa_stream_set_suspended_callback.restype = None
pa_stream_set_suspended_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:662
pa_stream_set_event_callback = _lib.pa_stream_set_event_callback
pa_stream_set_event_callback.restype = None
pa_stream_set_event_callback.argtypes = [POINTER(pa_stream), pa_stream_event_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:669
pa_stream_set_buffer_attr_callback = _lib.pa_stream_set_buffer_attr_callback
pa_stream_set_buffer_attr_callback.restype = None
pa_stream_set_buffer_attr_callback.argtypes = [POINTER(pa_stream), pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:681
pa_stream_cork = _lib.pa_stream_cork
pa_stream_cork.restype = POINTER(pa_operation)
pa_stream_cork.argtypes = [POINTER(pa_stream), c_int, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:686
pa_stream_flush = _lib.pa_stream_flush
pa_stream_flush.restype = POINTER(pa_operation)
pa_stream_flush.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:690
pa_stream_prebuf = _lib.pa_stream_prebuf
pa_stream_prebuf.restype = POINTER(pa_operation)
pa_stream_prebuf.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:695
pa_stream_trigger = _lib.pa_stream_trigger
pa_stream_trigger.restype = POINTER(pa_operation)
pa_stream_trigger.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:698
pa_stream_set_name = _lib.pa_stream_set_name
pa_stream_set_name.restype = POINTER(pa_operation)
pa_stream_set_name.argtypes = [POINTER(pa_stream), c_char_p, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:731
pa_stream_get_time = _lib.pa_stream_get_time
pa_stream_get_time.restype = c_int
pa_stream_get_time.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t)]
# /usr/include/pulse/stream.h:745
pa_stream_get_latency = _lib.pa_stream_get_latency
pa_stream_get_latency.restype = c_int
pa_stream_get_latency.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t), POINTER(c_int)]
# /usr/include/pulse/stream.h:761
pa_stream_get_timing_info = _lib.pa_stream_get_timing_info
pa_stream_get_timing_info.restype = POINTER(pa_timing_info)
pa_stream_get_timing_info.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:764
pa_stream_get_sample_spec = _lib.pa_stream_get_sample_spec
pa_stream_get_sample_spec.restype = POINTER(pa_sample_spec)
pa_stream_get_sample_spec.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:767
pa_stream_get_channel_map = _lib.pa_stream_get_channel_map
pa_stream_get_channel_map.restype = POINTER(pa_channel_map)
pa_stream_get_channel_map.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:770
pa_stream_get_format_info = _lib.pa_stream_get_format_info
pa_stream_get_format_info.restype = POINTER(pa_format_info)
pa_stream_get_format_info.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:780
pa_stream_get_buffer_attr = _lib.pa_stream_get_buffer_attr
pa_stream_get_buffer_attr.restype = POINTER(pa_buffer_attr)
pa_stream_get_buffer_attr.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:790
pa_stream_set_buffer_attr = _lib.pa_stream_set_buffer_attr
pa_stream_set_buffer_attr.restype = POINTER(pa_operation)
pa_stream_set_buffer_attr.argtypes = [POINTER(pa_stream), POINTER(pa_buffer_attr), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:797
pa_stream_update_sample_rate = _lib.pa_stream_update_sample_rate
pa_stream_update_sample_rate.restype = POINTER(pa_operation)
pa_stream_update_sample_rate.argtypes = [POINTER(pa_stream), c_uint32, pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:805
pa_stream_proplist_update = _lib.pa_stream_proplist_update
pa_stream_proplist_update.restype = POINTER(pa_operation)
pa_stream_proplist_update.argtypes = [POINTER(pa_stream), pa_update_mode_t, POINTER(pa_proplist), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:809
pa_stream_proplist_remove = _lib.pa_stream_proplist_remove
pa_stream_proplist_remove.restype = POINTER(pa_operation)
pa_stream_proplist_remove.argtypes = [POINTER(pa_stream), POINTER(c_char_p), pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:815
pa_stream_set_monitor_stream = _lib.pa_stream_set_monitor_stream
pa_stream_set_monitor_stream.restype = c_int
pa_stream_set_monitor_stream.argtypes = [POINTER(pa_stream), c_uint32]
# /usr/include/pulse/stream.h:820
pa_stream_get_monitor_stream = _lib.pa_stream_get_monitor_stream
pa_stream_get_monitor_stream.restype = c_uint32
pa_stream_get_monitor_stream.argtypes = [POINTER(pa_stream)]
class struct_pa_sink_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
]
struct_pa_sink_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
]
pa_sink_port_info = struct_pa_sink_port_info # /usr/include/pulse/introspect.h:232
class struct_pa_sink_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_source',
'monitor_source_name',
'latency',
'driver',
'flags',
'proplist',
'configured_latency',
'base_volume',
'state',
'n_volume_steps',
'card',
'n_ports',
'ports',
'active_port',
'n_formats',
'formats',
]
struct_pa_sink_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_source', c_uint32),
('monitor_source_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_sink_flags_t),
('proplist', POINTER(pa_proplist)),
('configured_latency', pa_usec_t),
('base_volume', pa_volume_t),
('state', pa_sink_state_t),
('n_volume_steps', c_uint32),
('card', c_uint32),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_sink_port_info))),
('active_port', POINTER(pa_sink_port_info)),
('n_formats', c_uint8),
('formats', POINTER(POINTER(pa_format_info))),
]
pa_sink_info = struct_pa_sink_info # /usr/include/pulse/introspect.h:262
pa_sink_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sink_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:265
# /usr/include/pulse/introspect.h:268
pa_context_get_sink_info_by_name = _lib.pa_context_get_sink_info_by_name
pa_context_get_sink_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:271
pa_context_get_sink_info_by_index = _lib.pa_context_get_sink_info_by_index
pa_context_get_sink_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:274
pa_context_get_sink_info_list = _lib.pa_context_get_sink_info_list
pa_context_get_sink_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_info_list.argtypes = [POINTER(pa_context), pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:277
pa_context_set_sink_volume_by_index = _lib.pa_context_set_sink_volume_by_index
pa_context_set_sink_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_index.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:280
pa_context_set_sink_volume_by_name = _lib.pa_context_set_sink_volume_by_name
pa_context_set_sink_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_name.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:283
pa_context_set_sink_mute_by_index = _lib.pa_context_set_sink_mute_by_index
pa_context_set_sink_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:286
pa_context_set_sink_mute_by_name = _lib.pa_context_set_sink_mute_by_name
pa_context_set_sink_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:289
pa_context_suspend_sink_by_name = _lib.pa_context_suspend_sink_by_name
pa_context_suspend_sink_by_name.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:292
pa_context_suspend_sink_by_index = _lib.pa_context_suspend_sink_by_index
pa_context_suspend_sink_by_index.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:295
pa_context_set_sink_port_by_index = _lib.pa_context_set_sink_port_by_index
pa_context_set_sink_port_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_port_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:298
pa_context_set_sink_port_by_name = _lib.pa_context_set_sink_port_by_name
pa_context_set_sink_port_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_port_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
class struct_pa_source_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
]
struct_pa_source_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
]
pa_source_port_info = struct_pa_source_port_info # /usr/include/pulse/introspect.h:312
class struct_pa_source_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_of_sink',
'monitor_of_sink_name',
'latency',
'driver',
'flags',
'proplist',
'configured_latency',
'base_volume',
'state',
'n_volume_steps',
'card',
'n_ports',
'ports',
'active_port',
'n_formats',
'formats',
]
struct_pa_source_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_of_sink', c_uint32),
('monitor_of_sink_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_source_flags_t),
('proplist', POINTER(pa_proplist)),
('configured_latency', pa_usec_t),
('base_volume', pa_volume_t),
('state', pa_source_state_t),
('n_volume_steps', c_uint32),
('card', c_uint32),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_source_port_info))),
('active_port', POINTER(pa_source_port_info)),
('n_formats', c_uint8),
('formats', POINTER(POINTER(pa_format_info))),
]
pa_source_info = struct_pa_source_info # /usr/include/pulse/introspect.h:342
pa_source_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_source_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:345
# /usr/include/pulse/introspect.h:348
pa_context_get_source_info_by_name = _lib.pa_context_get_source_info_by_name
pa_context_get_source_info_by_name.restype = POINTER(pa_operation)
pa_context_get_source_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:351
pa_context_get_source_info_by_index = _lib.pa_context_get_source_info_by_index
pa_context_get_source_info_by_index.restype = POINTER(pa_operation)
pa_context_get_source_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:354
pa_context_get_source_info_list = _lib.pa_context_get_source_info_list
pa_context_get_source_info_list.restype = POINTER(pa_operation)
pa_context_get_source_info_list.argtypes = [POINTER(pa_context), pa_source_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:357
pa_context_set_source_volume_by_index = _lib.pa_context_set_source_volume_by_index
pa_context_set_source_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_index.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:360
pa_context_set_source_volume_by_name = _lib.pa_context_set_source_volume_by_name
pa_context_set_source_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_name.argtypes = [POINTER(pa_context), c_char_p, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:363
pa_context_set_source_mute_by_index = _lib.pa_context_set_source_mute_by_index
pa_context_set_source_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:366
pa_context_set_source_mute_by_name = _lib.pa_context_set_source_mute_by_name
pa_context_set_source_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:369
pa_context_suspend_source_by_name = _lib.pa_context_suspend_source_by_name
pa_context_suspend_source_by_name.restype = POINTER(pa_operation)
pa_context_suspend_source_by_name.argtypes = [POINTER(pa_context), c_char_p, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:372
pa_context_suspend_source_by_index = _lib.pa_context_suspend_source_by_index
pa_context_suspend_source_by_index.restype = POINTER(pa_operation)
pa_context_suspend_source_by_index.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:375
pa_context_set_source_port_by_index = _lib.pa_context_set_source_port_by_index
pa_context_set_source_port_by_index.restype = POINTER(pa_operation)
pa_context_set_source_port_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:378
pa_context_set_source_port_by_name = _lib.pa_context_set_source_port_by_name
pa_context_set_source_port_by_name.restype = POINTER(pa_operation)
pa_context_set_source_port_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
class struct_pa_server_info(Structure):
__slots__ = [
'user_name',
'host_name',
'server_version',
'server_name',
'sample_spec',
'default_sink_name',
'default_source_name',
'cookie',
'channel_map',
]
struct_pa_server_info._fields_ = [
('user_name', c_char_p),
('host_name', c_char_p),
('server_version', c_char_p),
('server_name', c_char_p),
('sample_spec', pa_sample_spec),
('default_sink_name', c_char_p),
('default_source_name', c_char_p),
('cookie', c_uint32),
('channel_map', pa_channel_map),
]
pa_server_info = struct_pa_server_info # /usr/include/pulse/introspect.h:397
pa_server_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_server_info), POINTER(None)) # /usr/include/pulse/introspect.h:400
# /usr/include/pulse/introspect.h:403
pa_context_get_server_info = _lib.pa_context_get_server_info
pa_context_get_server_info.restype = POINTER(pa_operation)
pa_context_get_server_info.argtypes = [POINTER(pa_context), pa_server_info_cb_t, POINTER(None)]
class struct_pa_module_info(Structure):
__slots__ = [
'index',
'name',
'argument',
'n_used',
'auto_unload',
'proplist',
]
struct_pa_module_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('argument', c_char_p),
('n_used', c_uint32),
('auto_unload', c_int),
('proplist', POINTER(pa_proplist)),
]
pa_module_info = struct_pa_module_info # /usr/include/pulse/introspect.h:421
pa_module_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_module_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:424
# /usr/include/pulse/introspect.h:427
pa_context_get_module_info = _lib.pa_context_get_module_info
pa_context_get_module_info.restype = POINTER(pa_operation)
pa_context_get_module_info.argtypes = [POINTER(pa_context), c_uint32, pa_module_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:430
pa_context_get_module_info_list = _lib.pa_context_get_module_info_list
pa_context_get_module_info_list.restype = POINTER(pa_operation)
pa_context_get_module_info_list.argtypes = [POINTER(pa_context), pa_module_info_cb_t, POINTER(None)]
pa_context_index_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_uint32, POINTER(None)) # /usr/include/pulse/introspect.h:433
# /usr/include/pulse/introspect.h:436
pa_context_load_module = _lib.pa_context_load_module
pa_context_load_module.restype = POINTER(pa_operation)
pa_context_load_module.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:439
pa_context_unload_module = _lib.pa_context_unload_module
pa_context_unload_module.restype = POINTER(pa_operation)
pa_context_unload_module.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_client_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'driver',
'proplist',
]
struct_pa_client_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('driver', c_char_p),
('proplist', POINTER(pa_proplist)),
]
pa_client_info = struct_pa_client_info # /usr/include/pulse/introspect.h:454
pa_client_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_client_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:457
# /usr/include/pulse/introspect.h:460
pa_context_get_client_info = _lib.pa_context_get_client_info
pa_context_get_client_info.restype = POINTER(pa_operation)
pa_context_get_client_info.argtypes = [POINTER(pa_context), c_uint32, pa_client_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:463
pa_context_get_client_info_list = _lib.pa_context_get_client_info_list
pa_context_get_client_info_list.restype = POINTER(pa_operation)
pa_context_get_client_info_list.argtypes = [POINTER(pa_context), pa_client_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:466
pa_context_kill_client = _lib.pa_context_kill_client
pa_context_kill_client.restype = POINTER(pa_operation)
pa_context_kill_client.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_card_profile_info(Structure):
__slots__ = [
'name',
'description',
'n_sinks',
'n_sources',
'priority',
]
struct_pa_card_profile_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('n_sinks', c_uint32),
('n_sources', c_uint32),
('priority', c_uint32),
]
pa_card_profile_info = struct_pa_card_profile_info # /usr/include/pulse/introspect.h:479
class struct_pa_card_profile_info2(Structure):
__slots__ = [
'name',
'description',
'n_sinks',
'n_sources',
'priority',
'available',
]
struct_pa_card_profile_info2._fields_ = [
('name', c_char_p),
('description', c_char_p),
('n_sinks', c_uint32),
('n_sources', c_uint32),
('priority', c_uint32),
('available', c_int),
]
pa_card_profile_info2 = struct_pa_card_profile_info2 # /usr/include/pulse/introspect.h:496
class struct_pa_card_port_info(Structure):
__slots__ = [
'name',
'description',
'priority',
'available',
'direction',
'n_profiles',
'profiles',
'proplist',
'latency_offset',
'profiles2',
]
struct_pa_card_port_info._fields_ = [
('name', c_char_p),
('description', c_char_p),
('priority', c_uint32),
('available', c_int),
('direction', c_int),
('n_profiles', c_uint32),
('profiles', POINTER(POINTER(pa_card_profile_info))),
('proplist', POINTER(pa_proplist)),
('latency_offset', c_int64),
('profiles2', POINTER(POINTER(pa_card_profile_info2))),
]
pa_card_port_info = struct_pa_card_port_info # /usr/include/pulse/introspect.h:512
class struct_pa_card_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'driver',
'n_profiles',
'profiles',
'active_profile',
'proplist',
'n_ports',
'ports',
'profiles2',
'active_profile2',
]
struct_pa_card_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('driver', c_char_p),
('n_profiles', c_uint32),
('profiles', POINTER(pa_card_profile_info)),
('active_profile', POINTER(pa_card_profile_info)),
('proplist', POINTER(pa_proplist)),
('n_ports', c_uint32),
('ports', POINTER(POINTER(pa_card_port_info))),
('profiles2', POINTER(POINTER(pa_card_profile_info2))),
('active_profile2', POINTER(pa_card_profile_info2)),
]
pa_card_info = struct_pa_card_info # /usr/include/pulse/introspect.h:530
pa_card_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_card_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:533
# /usr/include/pulse/introspect.h:536
pa_context_get_card_info_by_index = _lib.pa_context_get_card_info_by_index
pa_context_get_card_info_by_index.restype = POINTER(pa_operation)
pa_context_get_card_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:539
pa_context_get_card_info_by_name = _lib.pa_context_get_card_info_by_name
pa_context_get_card_info_by_name.restype = POINTER(pa_operation)
pa_context_get_card_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:542
pa_context_get_card_info_list = _lib.pa_context_get_card_info_list
pa_context_get_card_info_list.restype = POINTER(pa_operation)
pa_context_get_card_info_list.argtypes = [POINTER(pa_context), pa_card_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:545
pa_context_set_card_profile_by_index = _lib.pa_context_set_card_profile_by_index
pa_context_set_card_profile_by_index.restype = POINTER(pa_operation)
pa_context_set_card_profile_by_index.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:548
pa_context_set_card_profile_by_name = _lib.pa_context_set_card_profile_by_name
pa_context_set_card_profile_by_name.restype = POINTER(pa_operation)
pa_context_set_card_profile_by_name.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:551
pa_context_set_port_latency_offset = _lib.pa_context_set_port_latency_offset
pa_context_set_port_latency_offset.restype = POINTER(pa_operation)
pa_context_set_port_latency_offset.argtypes = [POINTER(pa_context), c_char_p, c_char_p, c_int64, pa_context_success_cb_t, POINTER(None)]
class struct_pa_sink_input_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'sink',
'sample_spec',
'channel_map',
'volume',
'buffer_usec',
'sink_usec',
'resample_method',
'driver',
'mute',
'proplist',
'corked',
'has_volume',
'volume_writable',
'format',
]
struct_pa_sink_input_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('sink', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('volume', pa_cvolume),
('buffer_usec', pa_usec_t),
('sink_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
('mute', c_int),
('proplist', POINTER(pa_proplist)),
('corked', c_int),
('has_volume', c_int),
('volume_writable', c_int),
('format', POINTER(pa_format_info)),
]
pa_sink_input_info = struct_pa_sink_input_info # /usr/include/pulse/introspect.h:579
pa_sink_input_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sink_input_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:582
# /usr/include/pulse/introspect.h:585
pa_context_get_sink_input_info = _lib.pa_context_get_sink_input_info
pa_context_get_sink_input_info.restype = POINTER(pa_operation)
pa_context_get_sink_input_info.argtypes = [POINTER(pa_context), c_uint32, pa_sink_input_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:588
pa_context_get_sink_input_info_list = _lib.pa_context_get_sink_input_info_list
pa_context_get_sink_input_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_input_info_list.argtypes = [POINTER(pa_context), pa_sink_input_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:591
pa_context_move_sink_input_by_name = _lib.pa_context_move_sink_input_by_name
pa_context_move_sink_input_by_name.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_name.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:594
pa_context_move_sink_input_by_index = _lib.pa_context_move_sink_input_by_index
pa_context_move_sink_input_by_index.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_index.argtypes = [POINTER(pa_context), c_uint32, c_uint32, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:597
pa_context_set_sink_input_volume = _lib.pa_context_set_sink_input_volume
pa_context_set_sink_input_volume.restype = POINTER(pa_operation)
pa_context_set_sink_input_volume.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:600
pa_context_set_sink_input_mute = _lib.pa_context_set_sink_input_mute
pa_context_set_sink_input_mute.restype = POINTER(pa_operation)
pa_context_set_sink_input_mute.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:603
pa_context_kill_sink_input = _lib.pa_context_kill_sink_input
pa_context_kill_sink_input.restype = POINTER(pa_operation)
pa_context_kill_sink_input.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_source_output_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'source',
'sample_spec',
'channel_map',
'buffer_usec',
'source_usec',
'resample_method',
'driver',
'proplist',
'corked',
'volume',
'mute',
'has_volume',
'volume_writable',
'format',
]
struct_pa_source_output_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('source', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('buffer_usec', pa_usec_t),
('source_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
('proplist', POINTER(pa_proplist)),
('corked', c_int),
('volume', pa_cvolume),
('mute', c_int),
('has_volume', c_int),
('volume_writable', c_int),
('format', POINTER(pa_format_info)),
]
pa_source_output_info = struct_pa_source_output_info # /usr/include/pulse/introspect.h:631
pa_source_output_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_source_output_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:634
# /usr/include/pulse/introspect.h:637
pa_context_get_source_output_info = _lib.pa_context_get_source_output_info
pa_context_get_source_output_info.restype = POINTER(pa_operation)
pa_context_get_source_output_info.argtypes = [POINTER(pa_context), c_uint32, pa_source_output_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:640
pa_context_get_source_output_info_list = _lib.pa_context_get_source_output_info_list
pa_context_get_source_output_info_list.restype = POINTER(pa_operation)
pa_context_get_source_output_info_list.argtypes = [POINTER(pa_context), pa_source_output_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:643
pa_context_move_source_output_by_name = _lib.pa_context_move_source_output_by_name
pa_context_move_source_output_by_name.restype = POINTER(pa_operation)
pa_context_move_source_output_by_name.argtypes = [POINTER(pa_context), c_uint32, c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:646
pa_context_move_source_output_by_index = _lib.pa_context_move_source_output_by_index
pa_context_move_source_output_by_index.restype = POINTER(pa_operation)
pa_context_move_source_output_by_index.argtypes = [POINTER(pa_context), c_uint32, c_uint32, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:649
pa_context_set_source_output_volume = _lib.pa_context_set_source_output_volume
pa_context_set_source_output_volume.restype = POINTER(pa_operation)
pa_context_set_source_output_volume.argtypes = [POINTER(pa_context), c_uint32, POINTER(pa_cvolume), pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:652
pa_context_set_source_output_mute = _lib.pa_context_set_source_output_mute
pa_context_set_source_output_mute.restype = POINTER(pa_operation)
pa_context_set_source_output_mute.argtypes = [POINTER(pa_context), c_uint32, c_int, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:655
pa_context_kill_source_output = _lib.pa_context_kill_source_output
pa_context_kill_source_output.restype = POINTER(pa_operation)
pa_context_kill_source_output.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
class struct_pa_stat_info(Structure):
__slots__ = [
'memblock_total',
'memblock_total_size',
'memblock_allocated',
'memblock_allocated_size',
'scache_size',
]
struct_pa_stat_info._fields_ = [
('memblock_total', c_uint32),
('memblock_total_size', c_uint32),
('memblock_allocated', c_uint32),
('memblock_allocated_size', c_uint32),
('scache_size', c_uint32),
]
pa_stat_info = struct_pa_stat_info # /usr/include/pulse/introspect.h:670
pa_stat_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_stat_info), POINTER(None)) # /usr/include/pulse/introspect.h:673
# /usr/include/pulse/introspect.h:676
pa_context_stat = _lib.pa_context_stat
pa_context_stat.restype = POINTER(pa_operation)
pa_context_stat.argtypes = [POINTER(pa_context), pa_stat_info_cb_t, POINTER(None)]
class struct_pa_sample_info(Structure):
__slots__ = [
'index',
'name',
'volume',
'sample_spec',
'channel_map',
'duration',
'bytes',
'lazy',
'filename',
'proplist',
]
struct_pa_sample_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('volume', pa_cvolume),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('duration', pa_usec_t),
('bytes', c_uint32),
('lazy', c_int),
('filename', c_char_p),
('proplist', POINTER(pa_proplist)),
]
pa_sample_info = struct_pa_sample_info # /usr/include/pulse/introspect.h:696
pa_sample_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sample_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:699
# /usr/include/pulse/introspect.h:702
pa_context_get_sample_info_by_name = _lib.pa_context_get_sample_info_by_name
pa_context_get_sample_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_sample_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:705
pa_context_get_sample_info_by_index = _lib.pa_context_get_sample_info_by_index
pa_context_get_sample_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_sample_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:708
pa_context_get_sample_info_list = _lib.pa_context_get_sample_info_list
pa_context_get_sample_info_list.restype = POINTER(pa_operation)
pa_context_get_sample_info_list.argtypes = [POINTER(pa_context), pa_sample_info_cb_t, POINTER(None)]
enum_pa_autoload_type = c_int
PA_AUTOLOAD_SINK = 0
PA_AUTOLOAD_SOURCE = 1
pa_autoload_type_t = enum_pa_autoload_type # /usr/include/pulse/introspect.h:720
class struct_pa_autoload_info(Structure):
__slots__ = [
'index',
'name',
'type',
'module',
'argument',
]
struct_pa_autoload_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('type', pa_autoload_type_t),
('module', c_char_p),
('argument', c_char_p),
]
pa_autoload_info = struct_pa_autoload_info # /usr/include/pulse/introspect.h:731
pa_autoload_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_autoload_info), c_int, POINTER(None)) # /usr/include/pulse/introspect.h:734
# /usr/include/pulse/introspect.h:737
pa_context_get_autoload_info_by_name = _lib.pa_context_get_autoload_info_by_name
pa_context_get_autoload_info_by_name.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:740
pa_context_get_autoload_info_by_index = _lib.pa_context_get_autoload_info_by_index
pa_context_get_autoload_info_by_index.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:743
pa_context_get_autoload_info_list = _lib.pa_context_get_autoload_info_list
pa_context_get_autoload_info_list.restype = POINTER(pa_operation)
pa_context_get_autoload_info_list.argtypes = [POINTER(pa_context), pa_autoload_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:746
pa_context_add_autoload = _lib.pa_context_add_autoload
pa_context_add_autoload.restype = POINTER(pa_operation)
pa_context_add_autoload.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, c_char_p, c_char_p, pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:749
pa_context_remove_autoload_by_name = _lib.pa_context_remove_autoload_by_name
pa_context_remove_autoload_by_name.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_name.argtypes = [POINTER(pa_context), c_char_p, pa_autoload_type_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:752
pa_context_remove_autoload_by_index = _lib.pa_context_remove_autoload_by_index
pa_context_remove_autoload_by_index.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_index.argtypes = [POINTER(pa_context), c_uint32, pa_context_success_cb_t, POINTER(None)]
pa_context_subscribe_cb_t = CFUNCTYPE(None, POINTER(pa_context), pa_subscription_event_type_t, c_uint32, POINTER(None)) # /usr/include/pulse/subscribe.h:73
# /usr/include/pulse/subscribe.h:76
pa_context_subscribe = _lib.pa_context_subscribe
pa_context_subscribe.restype = POINTER(pa_operation)
pa_context_subscribe.argtypes = [POINTER(pa_context), pa_subscription_mask_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/subscribe.h:79
pa_context_set_subscribe_callback = _lib.pa_context_set_subscribe_callback
pa_context_set_subscribe_callback.restype = None
pa_context_set_subscribe_callback.argtypes = [POINTER(pa_context), pa_context_subscribe_cb_t, POINTER(None)]
pa_context_play_sample_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_uint32, POINTER(None)) # /usr/include/pulse/scache.h:85
# /usr/include/pulse/scache.h:88
pa_stream_connect_upload = _lib.pa_stream_connect_upload
pa_stream_connect_upload.restype = c_int
pa_stream_connect_upload.argtypes = [POINTER(pa_stream), c_size_t]
# /usr/include/pulse/scache.h:93
pa_stream_finish_upload = _lib.pa_stream_finish_upload
pa_stream_finish_upload.restype = c_int
pa_stream_finish_upload.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/scache.h:96
pa_context_remove_sample = _lib.pa_context_remove_sample
pa_context_remove_sample.restype = POINTER(pa_operation)
pa_context_remove_sample.argtypes = [POINTER(pa_context), c_char_p, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/scache.h:101
pa_context_play_sample = _lib.pa_context_play_sample
pa_context_play_sample.restype = POINTER(pa_operation)
pa_context_play_sample.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_volume_t, pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/scache.h:113
pa_context_play_sample_with_proplist = _lib.pa_context_play_sample_with_proplist
pa_context_play_sample_with_proplist.restype = POINTER(pa_operation)
pa_context_play_sample_with_proplist.argtypes = [POINTER(pa_context), c_char_p, c_char_p, pa_volume_t, POINTER(pa_proplist), pa_context_play_sample_cb_t, POINTER(None)]
# /usr/include/pulse/error.h:33
pa_strerror = _lib.pa_strerror
pa_strerror.restype = c_char_p
pa_strerror.argtypes = [c_int]
# /usr/include/pulse/xmalloc.h:39
pa_xmalloc = _lib.pa_xmalloc
pa_xmalloc.restype = POINTER(c_void)
pa_xmalloc.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:42
pa_xmalloc0 = _lib.pa_xmalloc0
pa_xmalloc0.restype = POINTER(c_void)
pa_xmalloc0.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:45
pa_xrealloc = _lib.pa_xrealloc
pa_xrealloc.restype = POINTER(c_void)
pa_xrealloc.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/xmalloc.h:48
pa_xfree = _lib.pa_xfree
pa_xfree.restype = None
pa_xfree.argtypes = [POINTER(None)]
# /usr/include/pulse/xmalloc.h:51
pa_xstrdup = _lib.pa_xstrdup
pa_xstrdup.restype = c_char_p
pa_xstrdup.argtypes = [c_char_p]
# /usr/include/pulse/xmalloc.h:54
pa_xstrndup = _lib.pa_xstrndup
pa_xstrndup.restype = c_char_p
pa_xstrndup.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/xmalloc.h:57
pa_xmemdup = _lib.pa_xmemdup
pa_xmemdup.restype = POINTER(c_void)
pa_xmemdup.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/utf8.h:35
pa_utf8_valid = _lib.pa_utf8_valid
pa_utf8_valid.restype = c_char_p
pa_utf8_valid.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:38
pa_ascii_valid = _lib.pa_ascii_valid
pa_ascii_valid.restype = c_char_p
pa_ascii_valid.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:41
pa_utf8_filter = _lib.pa_utf8_filter
pa_utf8_filter.restype = c_char_p
pa_utf8_filter.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:44
pa_ascii_filter = _lib.pa_ascii_filter
pa_ascii_filter.restype = c_char_p
pa_ascii_filter.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:47
pa_utf8_to_locale = _lib.pa_utf8_to_locale
pa_utf8_to_locale.restype = c_char_p
pa_utf8_to_locale.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:50
pa_locale_to_utf8 = _lib.pa_locale_to_utf8
pa_locale_to_utf8.restype = c_char_p
pa_locale_to_utf8.argtypes = [c_char_p]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
pa_threaded_mainloop = struct_pa_threaded_mainloop # /usr/include/pulse/thread-mainloop.h:246
# /usr/include/pulse/thread-mainloop.h:251
pa_threaded_mainloop_new = _lib.pa_threaded_mainloop_new
pa_threaded_mainloop_new.restype = POINTER(pa_threaded_mainloop)
pa_threaded_mainloop_new.argtypes = []
# /usr/include/pulse/thread-mainloop.h:256
pa_threaded_mainloop_free = _lib.pa_threaded_mainloop_free
pa_threaded_mainloop_free.restype = None
pa_threaded_mainloop_free.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:259
pa_threaded_mainloop_start = _lib.pa_threaded_mainloop_start
pa_threaded_mainloop_start.restype = c_int
pa_threaded_mainloop_start.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:263
pa_threaded_mainloop_stop = _lib.pa_threaded_mainloop_stop
pa_threaded_mainloop_stop.restype = None
pa_threaded_mainloop_stop.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:271
pa_threaded_mainloop_lock = _lib.pa_threaded_mainloop_lock
pa_threaded_mainloop_lock.restype = None
pa_threaded_mainloop_lock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:274
pa_threaded_mainloop_unlock = _lib.pa_threaded_mainloop_unlock
pa_threaded_mainloop_unlock.restype = None
pa_threaded_mainloop_unlock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:285
pa_threaded_mainloop_wait = _lib.pa_threaded_mainloop_wait
pa_threaded_mainloop_wait.restype = None
pa_threaded_mainloop_wait.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:292
pa_threaded_mainloop_signal = _lib.pa_threaded_mainloop_signal
pa_threaded_mainloop_signal.restype = None
pa_threaded_mainloop_signal.argtypes = [POINTER(pa_threaded_mainloop), c_int]
# /usr/include/pulse/thread-mainloop.h:298
pa_threaded_mainloop_accept = _lib.pa_threaded_mainloop_accept
pa_threaded_mainloop_accept.restype = None
pa_threaded_mainloop_accept.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:302
pa_threaded_mainloop_get_retval = _lib.pa_threaded_mainloop_get_retval
pa_threaded_mainloop_get_retval.restype = c_int
pa_threaded_mainloop_get_retval.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:307
pa_threaded_mainloop_get_api = _lib.pa_threaded_mainloop_get_api
pa_threaded_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_threaded_mainloop_get_api.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:310
pa_threaded_mainloop_in_thread = _lib.pa_threaded_mainloop_in_thread
pa_threaded_mainloop_in_thread.restype = c_int
pa_threaded_mainloop_in_thread.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:313
pa_threaded_mainloop_set_name = _lib.pa_threaded_mainloop_set_name
pa_threaded_mainloop_set_name.restype = None
pa_threaded_mainloop_set_name.argtypes = [POINTER(pa_threaded_mainloop), c_char_p]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
pa_mainloop = struct_pa_mainloop # /usr/include/pulse/mainloop.h:78
# /usr/include/pulse/mainloop.h:81
pa_mainloop_new = _lib.pa_mainloop_new
pa_mainloop_new.restype = POINTER(pa_mainloop)
pa_mainloop_new.argtypes = []
# /usr/include/pulse/mainloop.h:84
pa_mainloop_free = _lib.pa_mainloop_free
pa_mainloop_free.restype = None
pa_mainloop_free.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:89
pa_mainloop_prepare = _lib.pa_mainloop_prepare
pa_mainloop_prepare.restype = c_int
pa_mainloop_prepare.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:92
pa_mainloop_poll = _lib.pa_mainloop_poll
pa_mainloop_poll.restype = c_int
pa_mainloop_poll.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:96
pa_mainloop_dispatch = _lib.pa_mainloop_dispatch
pa_mainloop_dispatch.restype = c_int
pa_mainloop_dispatch.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:99
pa_mainloop_get_retval = _lib.pa_mainloop_get_retval
pa_mainloop_get_retval.restype = c_int
pa_mainloop_get_retval.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:107
pa_mainloop_iterate = _lib.pa_mainloop_iterate
pa_mainloop_iterate.restype = c_int
pa_mainloop_iterate.argtypes = [POINTER(pa_mainloop), c_int, POINTER(c_int)]
# /usr/include/pulse/mainloop.h:110
pa_mainloop_run = _lib.pa_mainloop_run
pa_mainloop_run.restype = c_int
pa_mainloop_run.argtypes = [POINTER(pa_mainloop), POINTER(c_int)]
# /usr/include/pulse/mainloop.h:115
pa_mainloop_get_api = _lib.pa_mainloop_get_api
pa_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_mainloop_get_api.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:118
pa_mainloop_quit = _lib.pa_mainloop_quit
pa_mainloop_quit.restype = None
pa_mainloop_quit.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:121
pa_mainloop_wakeup = _lib.pa_mainloop_wakeup
pa_mainloop_wakeup.restype = None
pa_mainloop_wakeup.argtypes = [POINTER(pa_mainloop)]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
pa_poll_func = CFUNCTYPE(c_int, POINTER(struct_pollfd), c_ulong, c_int, POINTER(None)) # /usr/include/pulse/mainloop.h:124
# /usr/include/pulse/mainloop.h:127
pa_mainloop_set_poll_func = _lib.pa_mainloop_set_poll_func
pa_mainloop_set_poll_func.restype = None
pa_mainloop_set_poll_func.argtypes = [POINTER(pa_mainloop), pa_poll_func, POINTER(None)]
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
pa_signal_event = struct_pa_signal_event # /usr/include/pulse/mainloop-signal.h:39
pa_signal_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_signal_event), c_int, POINTER(None)) # /usr/include/pulse/mainloop-signal.h:42
pa_signal_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api), POINTER(pa_signal_event), POINTER(None)) # /usr/include/pulse/mainloop-signal.h:45
# /usr/include/pulse/mainloop-signal.h:48
pa_signal_init = _lib.pa_signal_init
pa_signal_init.restype = c_int
pa_signal_init.argtypes = [POINTER(pa_mainloop_api)]
# /usr/include/pulse/mainloop-signal.h:51
pa_signal_done = _lib.pa_signal_done
pa_signal_done.restype = None
pa_signal_done.argtypes = []
# /usr/include/pulse/mainloop-signal.h:54
pa_signal_new = _lib.pa_signal_new
pa_signal_new.restype = POINTER(pa_signal_event)
pa_signal_new.argtypes = [c_int, pa_signal_cb_t, POINTER(None)]
# /usr/include/pulse/mainloop-signal.h:57
pa_signal_free = _lib.pa_signal_free
pa_signal_free.restype = None
pa_signal_free.argtypes = [POINTER(pa_signal_event)]
# /usr/include/pulse/mainloop-signal.h:60
pa_signal_set_destroy = _lib.pa_signal_set_destroy
pa_signal_set_destroy.restype = None
pa_signal_set_destroy.argtypes = [POINTER(pa_signal_event), pa_signal_destroy_cb_t]
# /usr/include/pulse/util.h:35
pa_get_user_name = _lib.pa_get_user_name
pa_get_user_name.restype = c_char_p
pa_get_user_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:38
pa_get_host_name = _lib.pa_get_host_name
pa_get_host_name.restype = c_char_p
pa_get_host_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:41
pa_get_fqdn = _lib.pa_get_fqdn
pa_get_fqdn.restype = c_char_p
pa_get_fqdn.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:44
pa_get_home_dir = _lib.pa_get_home_dir
pa_get_home_dir.restype = c_char_p
pa_get_home_dir.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:48
pa_get_binary_name = _lib.pa_get_binary_name
pa_get_binary_name.restype = c_char_p
pa_get_binary_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:52
pa_path_get_filename = _lib.pa_path_get_filename
pa_path_get_filename.restype = c_char_p
pa_path_get_filename.argtypes = [c_char_p]
# /usr/include/pulse/util.h:55
pa_msleep = _lib.pa_msleep
pa_msleep.restype = c_int
pa_msleep.argtypes = [c_ulong]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:61
pa_gettimeofday = _lib.pa_gettimeofday
pa_gettimeofday.restype = POINTER(struct_timeval)
pa_gettimeofday.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:65
pa_timeval_diff = _lib.pa_timeval_diff
pa_timeval_diff.restype = pa_usec_t
pa_timeval_diff.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:68
pa_timeval_cmp = _lib.pa_timeval_cmp
pa_timeval_cmp.restype = c_int
pa_timeval_cmp.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:71
pa_timeval_age = _lib.pa_timeval_age
pa_timeval_age.restype = pa_usec_t
pa_timeval_age.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:74
pa_timeval_add = _lib.pa_timeval_add
pa_timeval_add.restype = POINTER(struct_timeval)
pa_timeval_add.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:77
pa_timeval_sub = _lib.pa_timeval_sub
pa_timeval_sub.restype = POINTER(struct_timeval)
pa_timeval_sub.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:80
pa_timeval_store = _lib.pa_timeval_store
pa_timeval_store.restype = POINTER(struct_timeval)
pa_timeval_store.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:83
pa_timeval_load = _lib.pa_timeval_load
pa_timeval_load.restype = pa_usec_t
pa_timeval_load.argtypes = [POINTER(struct_timeval)]
__all__ = ['pa_get_library_version', 'PA_API_VERSION', 'PA_PROTOCOL_VERSION',
'PA_MAJOR', 'PA_MINOR', 'PA_MICRO', 'PA_CHANNELS_MAX', 'PA_RATE_MAX',
'pa_sample_format_t', 'PA_SAMPLE_U8', 'PA_SAMPLE_ALAW', 'PA_SAMPLE_ULAW',
'PA_SAMPLE_S16LE', 'PA_SAMPLE_S16BE', 'PA_SAMPLE_FLOAT32LE',
'PA_SAMPLE_FLOAT32BE', 'PA_SAMPLE_S32LE', 'PA_SAMPLE_S32BE',
'PA_SAMPLE_S24LE', 'PA_SAMPLE_S24BE', 'PA_SAMPLE_S24_32LE',
'PA_SAMPLE_S24_32BE', 'PA_SAMPLE_MAX', 'PA_SAMPLE_INVALID', 'pa_sample_spec',
'pa_usec_t', 'pa_bytes_per_second', 'pa_frame_size', 'pa_sample_size',
'pa_sample_size_of_format', 'pa_bytes_to_usec', 'pa_usec_to_bytes',
'pa_sample_spec_init', 'pa_sample_format_valid', 'pa_sample_rate_valid',
'pa_channels_valid', 'pa_sample_spec_valid', 'pa_sample_spec_equal',
'pa_sample_format_to_string', 'pa_parse_sample_format',
'PA_SAMPLE_SPEC_SNPRINT_MAX', 'pa_sample_spec_snprint',
'PA_BYTES_SNPRINT_MAX', 'pa_bytes_snprint', 'pa_sample_format_is_le',
'pa_sample_format_is_be', 'pa_context_state_t', 'PA_CONTEXT_UNCONNECTED',
'PA_CONTEXT_CONNECTING', 'PA_CONTEXT_AUTHORIZING', 'PA_CONTEXT_SETTING_NAME',
'PA_CONTEXT_READY', 'PA_CONTEXT_FAILED', 'PA_CONTEXT_TERMINATED',
'pa_stream_state_t', 'PA_STREAM_UNCONNECTED', 'PA_STREAM_CREATING',
'PA_STREAM_READY', 'PA_STREAM_FAILED', 'PA_STREAM_TERMINATED',
'pa_operation_state_t', 'PA_OPERATION_RUNNING', 'PA_OPERATION_DONE',
'PA_OPERATION_CANCELLED', 'pa_context_flags_t', 'PA_CONTEXT_NOFLAGS',
'PA_CONTEXT_NOAUTOSPAWN', 'PA_CONTEXT_NOFAIL', 'pa_direction_t',
'PA_DIRECTION_OUTPUT', 'PA_DIRECTION_INPUT', 'pa_device_type_t',
'PA_DEVICE_TYPE_SINK', 'PA_DEVICE_TYPE_SOURCE', 'pa_stream_direction_t',
'PA_STREAM_NODIRECTION', 'PA_STREAM_PLAYBACK', 'PA_STREAM_RECORD',
'PA_STREAM_UPLOAD', 'pa_stream_flags_t', 'PA_STREAM_NOFLAGS',
'PA_STREAM_START_CORKED', 'PA_STREAM_INTERPOLATE_TIMING',
'PA_STREAM_NOT_MONOTONIC', 'PA_STREAM_AUTO_TIMING_UPDATE',
'PA_STREAM_NO_REMAP_CHANNELS', 'PA_STREAM_NO_REMIX_CHANNELS',
'PA_STREAM_FIX_FORMAT', 'PA_STREAM_FIX_RATE', 'PA_STREAM_FIX_CHANNELS',
'PA_STREAM_DONT_MOVE', 'PA_STREAM_VARIABLE_RATE', 'PA_STREAM_PEAK_DETECT',
'PA_STREAM_START_MUTED', 'PA_STREAM_ADJUST_LATENCY',
'PA_STREAM_EARLY_REQUESTS', 'PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND',
'PA_STREAM_START_UNMUTED', 'PA_STREAM_FAIL_ON_SUSPEND',
'PA_STREAM_RELATIVE_VOLUME', 'PA_STREAM_PASSTHROUGH', 'pa_buffer_attr',
'pa_error_code_t', 'PA_OK', 'PA_ERR_ACCESS', 'PA_ERR_COMMAND',
'PA_ERR_INVALID', 'PA_ERR_EXIST', 'PA_ERR_NOENTITY',
'PA_ERR_CONNECTIONREFUSED', 'PA_ERR_PROTOCOL', 'PA_ERR_TIMEOUT',
'PA_ERR_AUTHKEY', 'PA_ERR_INTERNAL', 'PA_ERR_CONNECTIONTERMINATED',
'PA_ERR_KILLED', 'PA_ERR_INVALIDSERVER', 'PA_ERR_MODINITFAILED',
'PA_ERR_BADSTATE', 'PA_ERR_NODATA', 'PA_ERR_VERSION', 'PA_ERR_TOOLARGE',
'PA_ERR_NOTSUPPORTED', 'PA_ERR_UNKNOWN', 'PA_ERR_NOEXTENSION',
'PA_ERR_OBSOLETE', 'PA_ERR_NOTIMPLEMENTED', 'PA_ERR_FORKED', 'PA_ERR_IO',
'PA_ERR_BUSY', 'PA_ERR_MAX', 'pa_subscription_mask_t',
'PA_SUBSCRIPTION_MASK_NULL', 'PA_SUBSCRIPTION_MASK_SINK',
'PA_SUBSCRIPTION_MASK_SOURCE', 'PA_SUBSCRIPTION_MASK_SINK_INPUT',
'PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT', 'PA_SUBSCRIPTION_MASK_MODULE',
'PA_SUBSCRIPTION_MASK_CLIENT', 'PA_SUBSCRIPTION_MASK_SAMPLE_CACHE',
'PA_SUBSCRIPTION_MASK_SERVER', 'PA_SUBSCRIPTION_MASK_AUTOLOAD',
'PA_SUBSCRIPTION_MASK_CARD', 'PA_SUBSCRIPTION_MASK_ALL',
'pa_subscription_event_type_t', 'PA_SUBSCRIPTION_EVENT_SINK',
'PA_SUBSCRIPTION_EVENT_SOURCE', 'PA_SUBSCRIPTION_EVENT_SINK_INPUT',
'PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT', 'PA_SUBSCRIPTION_EVENT_MODULE',
'PA_SUBSCRIPTION_EVENT_CLIENT', 'PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE',
'PA_SUBSCRIPTION_EVENT_SERVER', 'PA_SUBSCRIPTION_EVENT_AUTOLOAD',
'PA_SUBSCRIPTION_EVENT_CARD', 'PA_SUBSCRIPTION_EVENT_FACILITY_MASK',
'PA_SUBSCRIPTION_EVENT_NEW', 'PA_SUBSCRIPTION_EVENT_CHANGE',
'PA_SUBSCRIPTION_EVENT_REMOVE', 'PA_SUBSCRIPTION_EVENT_TYPE_MASK',
'pa_timing_info', 'pa_spawn_api', 'pa_seek_mode_t', 'PA_SEEK_RELATIVE',
'PA_SEEK_ABSOLUTE', 'PA_SEEK_RELATIVE_ON_READ', 'PA_SEEK_RELATIVE_END',
'pa_sink_flags_t', 'PA_SINK_NOFLAGS', 'PA_SINK_HW_VOLUME_CTRL',
'PA_SINK_LATENCY', 'PA_SINK_HARDWARE', 'PA_SINK_NETWORK',
'PA_SINK_HW_MUTE_CTRL', 'PA_SINK_DECIBEL_VOLUME', 'PA_SINK_FLAT_VOLUME',
'PA_SINK_DYNAMIC_LATENCY', 'PA_SINK_SET_FORMATS', 'pa_sink_state_t',
'PA_SINK_INVALID_STATE', 'PA_SINK_RUNNING', 'PA_SINK_IDLE',
'PA_SINK_SUSPENDED', 'PA_SINK_INIT', 'PA_SINK_UNLINKED', 'pa_source_flags_t',
'PA_SOURCE_NOFLAGS', 'PA_SOURCE_HW_VOLUME_CTRL', 'PA_SOURCE_LATENCY',
'PA_SOURCE_HARDWARE', 'PA_SOURCE_NETWORK', 'PA_SOURCE_HW_MUTE_CTRL',
'PA_SOURCE_DECIBEL_VOLUME', 'PA_SOURCE_DYNAMIC_LATENCY',
'PA_SOURCE_FLAT_VOLUME', 'pa_source_state_t', 'PA_SOURCE_INVALID_STATE',
'PA_SOURCE_RUNNING', 'PA_SOURCE_IDLE', 'PA_SOURCE_SUSPENDED',
'PA_SOURCE_INIT', 'PA_SOURCE_UNLINKED', 'pa_free_cb_t', 'pa_port_available_t',
'PA_PORT_AVAILABLE_UNKNOWN', 'PA_PORT_AVAILABLE_NO', 'PA_PORT_AVAILABLE_YES',
'pa_mainloop_api', 'pa_io_event_flags_t', 'PA_IO_EVENT_NULL',
'PA_IO_EVENT_INPUT', 'PA_IO_EVENT_OUTPUT', 'PA_IO_EVENT_HANGUP',
'PA_IO_EVENT_ERROR', 'pa_io_event', 'pa_io_event_cb_t',
'pa_io_event_destroy_cb_t', 'pa_time_event', 'pa_time_event_cb_t',
'pa_time_event_destroy_cb_t', 'pa_defer_event', 'pa_defer_event_cb_t',
'pa_defer_event_destroy_cb_t', 'pa_mainloop_api_once',
'pa_channel_position_t', 'PA_CHANNEL_POSITION_INVALID',
'PA_CHANNEL_POSITION_MONO', 'PA_CHANNEL_POSITION_FRONT_LEFT',
'PA_CHANNEL_POSITION_FRONT_RIGHT', 'PA_CHANNEL_POSITION_FRONT_CENTER',
'PA_CHANNEL_POSITION_LEFT', 'PA_CHANNEL_POSITION_RIGHT',
'PA_CHANNEL_POSITION_CENTER', 'PA_CHANNEL_POSITION_REAR_CENTER',
'PA_CHANNEL_POSITION_REAR_LEFT', 'PA_CHANNEL_POSITION_REAR_RIGHT',
'PA_CHANNEL_POSITION_LFE', 'PA_CHANNEL_POSITION_SUBWOOFER',
'PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER',
'PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER', 'PA_CHANNEL_POSITION_SIDE_LEFT',
'PA_CHANNEL_POSITION_SIDE_RIGHT', 'PA_CHANNEL_POSITION_AUX0',
'PA_CHANNEL_POSITION_AUX1', 'PA_CHANNEL_POSITION_AUX2',
'PA_CHANNEL_POSITION_AUX3', 'PA_CHANNEL_POSITION_AUX4',
'PA_CHANNEL_POSITION_AUX5', 'PA_CHANNEL_POSITION_AUX6',
'PA_CHANNEL_POSITION_AUX7', 'PA_CHANNEL_POSITION_AUX8',
'PA_CHANNEL_POSITION_AUX9', 'PA_CHANNEL_POSITION_AUX10',
'PA_CHANNEL_POSITION_AUX11', 'PA_CHANNEL_POSITION_AUX12',
'PA_CHANNEL_POSITION_AUX13', 'PA_CHANNEL_POSITION_AUX14',
'PA_CHANNEL_POSITION_AUX15', 'PA_CHANNEL_POSITION_AUX16',
'PA_CHANNEL_POSITION_AUX17', 'PA_CHANNEL_POSITION_AUX18',
'PA_CHANNEL_POSITION_AUX19', 'PA_CHANNEL_POSITION_AUX20',
'PA_CHANNEL_POSITION_AUX21', 'PA_CHANNEL_POSITION_AUX22',
'PA_CHANNEL_POSITION_AUX23', 'PA_CHANNEL_POSITION_AUX24',
'PA_CHANNEL_POSITION_AUX25', 'PA_CHANNEL_POSITION_AUX26',
'PA_CHANNEL_POSITION_AUX27', 'PA_CHANNEL_POSITION_AUX28',
'PA_CHANNEL_POSITION_AUX29', 'PA_CHANNEL_POSITION_AUX30',
'PA_CHANNEL_POSITION_AUX31', 'PA_CHANNEL_POSITION_TOP_CENTER',
'PA_CHANNEL_POSITION_TOP_FRONT_LEFT', 'PA_CHANNEL_POSITION_TOP_FRONT_RIGHT',
'PA_CHANNEL_POSITION_TOP_FRONT_CENTER', 'PA_CHANNEL_POSITION_TOP_REAR_LEFT',
'PA_CHANNEL_POSITION_TOP_REAR_RIGHT', 'PA_CHANNEL_POSITION_TOP_REAR_CENTER',
'PA_CHANNEL_POSITION_MAX', 'pa_channel_position_mask_t',
'pa_channel_map_def_t', 'PA_CHANNEL_MAP_AIFF', 'PA_CHANNEL_MAP_ALSA',
'PA_CHANNEL_MAP_AUX', 'PA_CHANNEL_MAP_WAVEEX', 'PA_CHANNEL_MAP_OSS',
'PA_CHANNEL_MAP_DEF_MAX', 'PA_CHANNEL_MAP_DEFAULT', 'pa_channel_map',
'pa_channel_map_init', 'pa_channel_map_init_mono',
'pa_channel_map_init_stereo', 'pa_channel_map_init_auto',
'pa_channel_map_init_extend', 'pa_channel_position_to_string',
'pa_channel_position_from_string', 'pa_channel_position_to_pretty_string',
'PA_CHANNEL_MAP_SNPRINT_MAX', 'pa_channel_map_snprint',
'pa_channel_map_parse', 'pa_channel_map_equal', 'pa_channel_map_valid',
'pa_channel_map_compatible', 'pa_channel_map_superset',
'pa_channel_map_can_balance', 'pa_channel_map_can_fade',
'pa_channel_map_to_name', 'pa_channel_map_to_pretty_name',
'pa_channel_map_has_position', 'pa_channel_map_mask', 'pa_operation',
'pa_operation_notify_cb_t', 'pa_operation_ref', 'pa_operation_unref',
'pa_operation_cancel', 'pa_operation_get_state',
'pa_operation_set_state_callback', 'pa_context', 'pa_context_notify_cb_t',
'pa_context_success_cb_t', 'pa_context_event_cb_t', 'pa_context_new',
'pa_context_new_with_proplist', 'pa_context_unref', 'pa_context_ref',
'pa_context_set_state_callback', 'pa_context_set_event_callback',
'pa_context_errno', 'pa_context_is_pending', 'pa_context_get_state',
'pa_context_connect', 'pa_context_disconnect', 'pa_context_drain',
'pa_context_exit_daemon', 'pa_context_set_default_sink',
'pa_context_set_default_source', 'pa_context_is_local', 'pa_context_set_name',
'pa_context_get_server', 'pa_context_get_protocol_version',
'pa_context_get_server_protocol_version', 'PA_UPDATE_SET', 'PA_UPDATE_MERGE',
'PA_UPDATE_REPLACE', 'pa_context_proplist_update',
'pa_context_proplist_remove', 'pa_context_get_index', 'pa_context_rttime_new',
'pa_context_rttime_restart', 'pa_context_get_tile_size',
'pa_context_load_cookie_from_file', 'pa_volume_t', 'pa_cvolume',
'pa_cvolume_equal', 'pa_cvolume_init', 'pa_cvolume_set',
'PA_CVOLUME_SNPRINT_MAX', 'pa_cvolume_snprint',
'PA_SW_CVOLUME_SNPRINT_DB_MAX', 'pa_sw_cvolume_snprint_dB',
'PA_CVOLUME_SNPRINT_VERBOSE_MAX', 'pa_cvolume_snprint_verbose',
'PA_VOLUME_SNPRINT_MAX', 'pa_volume_snprint', 'PA_SW_VOLUME_SNPRINT_DB_MAX',
'pa_sw_volume_snprint_dB', 'PA_VOLUME_SNPRINT_VERBOSE_MAX',
'pa_volume_snprint_verbose', 'pa_cvolume_avg', 'pa_cvolume_avg_mask',
'pa_cvolume_max', 'pa_cvolume_max_mask', 'pa_cvolume_min',
'pa_cvolume_min_mask', 'pa_cvolume_valid', 'pa_cvolume_channels_equal_to',
'pa_sw_volume_multiply', 'pa_sw_cvolume_multiply',
'pa_sw_cvolume_multiply_scalar', 'pa_sw_volume_divide',
'pa_sw_cvolume_divide', 'pa_sw_cvolume_divide_scalar', 'pa_sw_volume_from_dB',
'pa_sw_volume_to_dB', 'pa_sw_volume_from_linear', 'pa_sw_volume_to_linear',
'pa_cvolume_remap', 'pa_cvolume_compatible',
'pa_cvolume_compatible_with_channel_map', 'pa_cvolume_get_balance',
'pa_cvolume_set_balance', 'pa_cvolume_get_fade', 'pa_cvolume_set_fade',
'pa_cvolume_scale', 'pa_cvolume_scale_mask', 'pa_cvolume_set_position',
'pa_cvolume_get_position', 'pa_cvolume_merge', 'pa_cvolume_inc_clamp',
'pa_cvolume_inc', 'pa_cvolume_dec', 'pa_stream', 'pa_stream_success_cb_t',
'pa_stream_request_cb_t', 'pa_stream_notify_cb_t', 'pa_stream_event_cb_t',
'pa_stream_new', 'pa_stream_new_with_proplist', 'PA_ENCODING_ANY',
'PA_ENCODING_PCM', 'PA_ENCODING_AC3_IEC61937', 'PA_ENCODING_EAC3_IEC61937',
'PA_ENCODING_MPEG_IEC61937', 'PA_ENCODING_DTS_IEC61937',
'PA_ENCODING_MPEG2_AAC_IEC61937', 'PA_ENCODING_MAX', 'PA_ENCODING_INVALID',
'pa_stream_new_extended', 'pa_stream_unref', 'pa_stream_ref',
'pa_stream_get_state', 'pa_stream_get_context', 'pa_stream_get_index',
'pa_stream_get_device_index', 'pa_stream_get_device_name',
'pa_stream_is_suspended', 'pa_stream_is_corked', 'pa_stream_connect_playback',
'pa_stream_connect_record', 'pa_stream_disconnect', 'pa_stream_begin_write',
'pa_stream_cancel_write', 'pa_stream_write', 'pa_stream_write_ext_free',
'pa_stream_peek', 'pa_stream_drop', 'pa_stream_writable_size',
'pa_stream_readable_size', 'pa_stream_drain', 'pa_stream_update_timing_info',
'pa_stream_set_state_callback', 'pa_stream_set_write_callback',
'pa_stream_set_read_callback', 'pa_stream_set_overflow_callback',
'pa_stream_get_underflow_index', 'pa_stream_set_underflow_callback',
'pa_stream_set_started_callback', 'pa_stream_set_latency_update_callback',
'pa_stream_set_moved_callback', 'pa_stream_set_suspended_callback',
'pa_stream_set_event_callback', 'pa_stream_set_buffer_attr_callback',
'pa_stream_cork', 'pa_stream_flush', 'pa_stream_prebuf', 'pa_stream_trigger',
'pa_stream_set_name', 'pa_stream_get_time', 'pa_stream_get_latency',
'pa_stream_get_timing_info', 'pa_stream_get_sample_spec',
'pa_stream_get_channel_map', 'pa_stream_get_format_info',
'pa_stream_get_buffer_attr', 'pa_stream_set_buffer_attr',
'pa_stream_update_sample_rate', 'pa_stream_proplist_update',
'pa_stream_proplist_remove', 'pa_stream_set_monitor_stream',
'pa_stream_get_monitor_stream', 'pa_sink_port_info', 'pa_sink_info',
'pa_sink_info_cb_t', 'pa_context_get_sink_info_by_name',
'pa_context_get_sink_info_by_index', 'pa_context_get_sink_info_list',
'pa_context_set_sink_volume_by_index', 'pa_context_set_sink_volume_by_name',
'pa_context_set_sink_mute_by_index', 'pa_context_set_sink_mute_by_name',
'pa_context_suspend_sink_by_name', 'pa_context_suspend_sink_by_index',
'pa_context_set_sink_port_by_index', 'pa_context_set_sink_port_by_name',
'pa_source_port_info', 'pa_source_info', 'pa_source_info_cb_t',
'pa_context_get_source_info_by_name', 'pa_context_get_source_info_by_index',
'pa_context_get_source_info_list', 'pa_context_set_source_volume_by_index',
'pa_context_set_source_volume_by_name', 'pa_context_set_source_mute_by_index',
'pa_context_set_source_mute_by_name', 'pa_context_suspend_source_by_name',
'pa_context_suspend_source_by_index', 'pa_context_set_source_port_by_index',
'pa_context_set_source_port_by_name', 'pa_server_info', 'pa_server_info_cb_t',
'pa_context_get_server_info', 'pa_module_info', 'pa_module_info_cb_t',
'pa_context_get_module_info', 'pa_context_get_module_info_list',
'pa_context_index_cb_t', 'pa_context_load_module', 'pa_context_unload_module',
'pa_client_info', 'pa_client_info_cb_t', 'pa_context_get_client_info',
'pa_context_get_client_info_list', 'pa_context_kill_client',
'pa_card_profile_info', 'pa_card_profile_info2', 'pa_card_port_info',
'pa_card_info', 'pa_card_info_cb_t', 'pa_context_get_card_info_by_index',
'pa_context_get_card_info_by_name', 'pa_context_get_card_info_list',
'pa_context_set_card_profile_by_index', 'pa_context_set_card_profile_by_name',
'pa_context_set_port_latency_offset', 'pa_sink_input_info',
'pa_sink_input_info_cb_t', 'pa_context_get_sink_input_info',
'pa_context_get_sink_input_info_list', 'pa_context_move_sink_input_by_name',
'pa_context_move_sink_input_by_index', 'pa_context_set_sink_input_volume',
'pa_context_set_sink_input_mute', 'pa_context_kill_sink_input',
'pa_source_output_info', 'pa_source_output_info_cb_t',
'pa_context_get_source_output_info', 'pa_context_get_source_output_info_list',
'pa_context_move_source_output_by_name',
'pa_context_move_source_output_by_index',
'pa_context_set_source_output_volume', 'pa_context_set_source_output_mute',
'pa_context_kill_source_output', 'pa_stat_info', 'pa_stat_info_cb_t',
'pa_context_stat', 'pa_sample_info', 'pa_sample_info_cb_t',
'pa_context_get_sample_info_by_name', 'pa_context_get_sample_info_by_index',
'pa_context_get_sample_info_list', 'pa_autoload_type_t', 'PA_AUTOLOAD_SINK',
'PA_AUTOLOAD_SOURCE', 'pa_autoload_info', 'pa_autoload_info_cb_t',
'pa_context_get_autoload_info_by_name',
'pa_context_get_autoload_info_by_index', 'pa_context_get_autoload_info_list',
'pa_context_add_autoload', 'pa_context_remove_autoload_by_name',
'pa_context_remove_autoload_by_index', 'pa_context_subscribe_cb_t',
'pa_context_subscribe', 'pa_context_set_subscribe_callback',
'pa_context_play_sample_cb_t', 'pa_stream_connect_upload',
'pa_stream_finish_upload', 'pa_context_remove_sample',
'pa_context_play_sample', 'pa_context_play_sample_with_proplist',
'pa_strerror', 'pa_xmalloc', 'pa_xmalloc0', 'pa_xrealloc', 'pa_xfree',
'pa_xstrdup', 'pa_xstrndup', 'pa_xmemdup', '_pa_xnew_internal',
'_pa_xnew0_internal', '_pa_xnewdup_internal', '_pa_xrenew_internal',
'pa_utf8_valid', 'pa_ascii_valid', 'pa_utf8_filter', 'pa_ascii_filter',
'pa_utf8_to_locale', 'pa_locale_to_utf8', 'pa_threaded_mainloop',
'pa_threaded_mainloop_new', 'pa_threaded_mainloop_free',
'pa_threaded_mainloop_start', 'pa_threaded_mainloop_stop',
'pa_threaded_mainloop_lock', 'pa_threaded_mainloop_unlock',
'pa_threaded_mainloop_wait', 'pa_threaded_mainloop_signal',
'pa_threaded_mainloop_accept', 'pa_threaded_mainloop_get_retval',
'pa_threaded_mainloop_get_api', 'pa_threaded_mainloop_in_thread',
'pa_threaded_mainloop_set_name', 'pa_mainloop', 'pa_mainloop_new',
'pa_mainloop_free', 'pa_mainloop_prepare', 'pa_mainloop_poll',
'pa_mainloop_dispatch', 'pa_mainloop_get_retval', 'pa_mainloop_iterate',
'pa_mainloop_run', 'pa_mainloop_get_api', 'pa_mainloop_quit',
'pa_mainloop_wakeup', 'pa_poll_func', 'pa_mainloop_set_poll_func',
'pa_signal_event', 'pa_signal_cb_t', 'pa_signal_destroy_cb_t',
'pa_signal_init', 'pa_signal_done', 'pa_signal_new', 'pa_signal_free',
'pa_signal_set_destroy', 'pa_get_user_name', 'pa_get_host_name',
'pa_get_fqdn', 'pa_get_home_dir', 'pa_get_binary_name',
'pa_path_get_filename', 'pa_msleep', 'pa_gettimeofday', 'pa_timeval_diff',
'pa_timeval_cmp', 'pa_timeval_age', 'pa_timeval_add', 'pa_timeval_sub',
'pa_timeval_store', 'pa_timeval_load']
| {
"content_hash": "cd051d5b3ee4b30ae58b89932f8dc7f4",
"timestamp": "",
"source": "github",
"line_count": 2950,
"max_line_length": 168,
"avg_line_length": 40.10677966101695,
"alnum_prop": 0.7162828043781431,
"repo_name": "nicememory/pie",
"id": "73ca198d321b61b8368d93acd466c91e38e2f4ce",
"size": "118315",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyglet/pyglet/media/drivers/pulse/lib_pulseaudio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5318"
},
{
"name": "C",
"bytes": "6624"
},
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "Makefile",
"bytes": "5773"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9377528"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Vim script",
"bytes": "2952"
}
],
"symlink_target": ""
} |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
try:
import queue
except ImportError:
import Queue as queue
from sleekxmpp.xmlstream import StanzaBase, RESPONSE_TIMEOUT
from sleekxmpp.xmlstream.handler.base import BaseHandler
log = logging.getLogger(__name__)
class Waiter(BaseHandler):
"""
The Waiter handler allows an event handler to block
until a particular stanza has been received. The handler
will either be given the matched stanza, or False if the
waiter has timed out.
Methods:
check_delete -- Overrides BaseHandler.check_delete
prerun -- Overrides BaseHandler.prerun
run -- Overrides BaseHandler.run
wait -- Wait for a stanza to arrive and return it to
an event handler.
"""
def __init__(self, name, matcher, stream=None):
"""
Create a new Waiter.
Arguments:
name -- The name of the waiter.
matcher -- A matcher object to detect the desired stanza.
stream -- Optional XMLStream instance to monitor.
"""
BaseHandler.__init__(self, name, matcher, stream=stream)
self._payload = queue.Queue()
def prerun(self, payload):
"""
Store the matched stanza.
Overrides BaseHandler.prerun
Arguments:
payload -- The matched stanza object.
"""
self._payload.put(payload)
def run(self, payload):
"""
Do not process this handler during the main event loop.
Overrides BaseHandler.run
Arguments:
payload -- The matched stanza object.
"""
pass
def wait(self, timeout=RESPONSE_TIMEOUT):
"""
Block an event handler while waiting for a stanza to arrive.
Be aware that this will impact performance if called from a
non-threaded event handler.
Will return either the received stanza, or False if the waiter
timed out.
Arguments:
timeout -- The number of seconds to wait for the stanza to
arrive. Defaults to the global default timeout
value sleekxmpp.xmlstream.RESPONSE_TIMEOUT.
"""
try:
stanza = self._payload.get(True, timeout)
except queue.Empty:
stanza = False
log.warning("Timed out waiting for %s" % self.name)
self.stream.removeHandler(self.name)
return stanza
def check_delete(self):
"""
Always remove waiters after use.
Overrides BaseHandler.check_delete
"""
return True
| {
"content_hash": "eba711aa836196af649d2f57f57177c9",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 70,
"avg_line_length": 27.752475247524753,
"alnum_prop": 0.6036389582590082,
"repo_name": "skinkie/SleekXMPP--XEP-0080-",
"id": "a4bc3545ae6c231ab5be3140efae5d9f64fd5bff",
"size": "2803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sleekxmpp/xmlstream/handler/waiter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "526767"
}
],
"symlink_target": ""
} |
'''
A script to test quicksort, mergesort and bogosort
'''
import random
import quicksort
import mergesort
import bogosort
from sys import argv
data=[i for i in range(int(argv[-1]))]
random.shuffle(data)
if argv[-2]=='q':
quicksort.quicksort(data)
elif argv[-2]=='m':
mergesort.mergesort(data)
elif argv[-2]=='b':
bogosort.bogosort(data)
| {
"content_hash": "2eace303489b078ef8ce4450613aed90",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 50,
"avg_line_length": 17.7,
"alnum_prop": 0.7005649717514124,
"repo_name": "Bolt64/my_code",
"id": "a53fb06311289c771413c3a77c233954dc9515cd",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithms/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "57094"
},
{
"name": "C++",
"bytes": "12255"
},
{
"name": "Haskell",
"bytes": "27215"
},
{
"name": "Jupyter Notebook",
"bytes": "18418"
},
{
"name": "Python",
"bytes": "308871"
},
{
"name": "Racket",
"bytes": "3888"
},
{
"name": "Rust",
"bytes": "22856"
},
{
"name": "Scala",
"bytes": "51026"
},
{
"name": "Shell",
"bytes": "514"
},
{
"name": "Vim script",
"bytes": "341"
}
],
"symlink_target": ""
} |
def is_simple_node(graph, node):
"""A node is "Simple" if none of the following is true
- it has multiple inputs (it joins chains together)
- it has no inputs (it's a root node)
- it has multiple outputs (it splits chains apart)
- it has no outputs (it's a leaf node)
Keyword arguments:
node -- A networkx DiGraph Node
"""
return graph.in_degree(node) == 1 and graph.out_degree(node) == 1
def simplified(graph):
"""Simplify a CallGraph by coalescing call chains and dropping
any unreferenced calls.
Keyword arguments:
graph -- A networkx DiGraph
"""
g = graph.full_copy()
for n in graph:
if is_simple_node(graph, n):
(pre,) = g.predecessors(n)
(suc,) = g.successors(n)
g.add_edge(pre, suc)
g.remove_node(n)
return g
| {
"content_hash": "59d0006d1488c9f90a928d2849efddb1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 27.322580645161292,
"alnum_prop": 0.6080283353010626,
"repo_name": "musec/py-cdg",
"id": "e98e72cf0f8889926f2a60cf3d3e0e31b5eee70e",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdg/simplify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "Python",
"bytes": "25253"
}
],
"symlink_target": ""
} |
"""test_args module contains test for input arguments.
It checks if input argument passed by user is valid or not.
If any invalid data is found,
the called function in test_args returns false"""
import numpy as np
from . import pose
from . import super_pose
def is_mat_list(list_matrices):
"""is_mat_list checks(arg1) checks if arg1
is a list containing numpy matrix data type elements or not.
If not, False is returned."""
flag = True
if isinstance(list_matrices, list):
for matrix in list_matrices:
if not isinstance(matrix, np.matrix):
flag = False
# TODO Check for matrix dimensions?
else:
flag = False
return flag
def unit_check(unit):
if unit == 'rad' or unit == 'deg':
pass
else:
raise AssertionError("Invalid unit value passed. Must be 'rad' or 'deg' only.")
def is_vector(item):
assert isinstance(item, np.matrix)
assert (item.shape[0] == 1 and item.shape[1] > 1) \
or (item.shape[0] > 1 and item.shape[1] == 1)
# ------------------------------------------------------
# TRANSFORMS CHECKS
# ------------------------------------------------------
def rpy2r(theta, order):
pass # TODO Implement this
def np2vtk(mat):
pass # TODO
def tr2angvec(tr, unit):
pass # TODO
def tr2eul(tr, unit, flip):
pass # TODO
def tr2rpy(tr, unit, order):
pass # TODO
# ------------------------------------------------------
# SUPER POSE CHECKS
# ------------------------------------------------------
def valid_pose(obj):
# TODO -- Check if its a valid pose object
assert isinstance(obj, super_pose.SuperPose)
def super_pose_appenditem(obj, item):
valid_pose(obj)
if isinstance(item, super_pose.SuperPose):
assert type(obj) is type(item)
elif isinstance(item, np.matrix):
# TODO valid matrix check ?
pass
else:
raise AssertionError('Invalid data type of item to append. '
'Data types allowed: numpy matrix and super_pose.SuperPose')
def super_pose_multiply_check(obj, other):
if isinstance(other, super_pose.SuperPose):
assert type(obj) is type(other)
assert (obj.length == other.length) \
or (obj.length == 1 and other.length > 1) \
or (obj.length > 1 and other.length == 1)
elif isinstance(other, np.matrix):
assert other.shape[1] == 1 # Should be vector
assert obj.shape[1] == other.shape[0] # Inner Dimensions must match
def super_pose_divide_check(obj, other):
assert type(obj) is type(other)
assert obj.length == other.length or obj.length == 1 or other.length == 1
def super_pose_add_sub_check(obj, other):
valid_pose(obj)
valid_pose(other)
assert type(obj) is type(other)
assert obj.length == 1 and other.length == 1
# TODO Allow pose list ?
def super_pose_subclass_check(obj, other):
pass
# ----------------- POSE.SO2 CHECKS -------------------------
def so2_angle_list_check(ang_list):
for each in ang_list:
assert isinstance(each, int) or isinstance(each, float)
def so2_valid(obj):
assert type(obj) is pose.SO2
for each in obj:
assert each.shape == (2, 2)
assert abs(np.linalg.det(each) - 1) < np.spacing([1])[0]
def so2_input_matrix(args_in):
det = np.linalg.det(args_in)
def so2_input_types_check(args_in):
assert isinstance(args_in, np.matrix) \
or isinstance(args_in, list) \
or isinstance(args_in, int) \
or isinstance(args_in, float) \
or isinstance(args_in, pose.SO2) \
or args_in is None
def so2_interp_check(obj1, obj2, s):
assert type(obj2) is pose.SO2
assert 0 <= s <= 1
assert obj1.length == obj2.length # TODO Intended behaviour ?
# ----------------- POSE.SO2 CHECKS ----------------------------
# ----------------- POSE.SE2 CHECKS ----------------------------
def se2_constructor_args_check(x, y, rot, theta, so2, se2):
pass
def se2_valid(obj):
# TODO
pass
# ----------------- POSE.SE2 CHECKS ----------------------------
# ----------------- POSE.SO3 CHECKS ----------------------------
def so3_constructor_args_check(args_in):
pass
# ----------------- POSE.SO3 CHECKS ----------------------------
# ----------------- SUPER POSE CHECKS ---------------------------
| {
"content_hash": "816276364638924de740889df3c9649e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 89,
"avg_line_length": 27.01219512195122,
"alnum_prop": 0.5471783295711061,
"repo_name": "adityadua24/robopy",
"id": "2511fa18ce37d9b4674d6fc13636c019214d9ded",
"size": "4471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robopy/base/check_args.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "205735"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
from jsframework import urls as js_urls
urlpatterns = [
# Examples:
# url(r'^$', 'django_drf_starter_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^exercise/', include('exercise_customization_database.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('exercise_customization_database.api.urls')),
url(r'', include(js_urls)),
]
| {
"content_hash": "1da0e6d58bce22985d0fcf9921e0f18b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 72,
"avg_line_length": 32.8,
"alnum_prop": 0.6686991869918699,
"repo_name": "itsClay/Exercise-Customization-Database",
"id": "6d046abe2669919de0f6d82f10780d6b86bfccae",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_drf_starter_project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "187"
},
{
"name": "HTML",
"bytes": "3944"
},
{
"name": "JavaScript",
"bytes": "2582"
},
{
"name": "Python",
"bytes": "15565"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from future.builtins import input, int
from optparse import make_option
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
from django.contrib.auth import get_user_model
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from django.utils.encoding import force_text
from django.utils.html import strip_tags
from zhiliao.blog.models import BlogPost, BlogCategory
from zhiliao.conf import settings
from zhiliao.core.models import CONTENT_STATUS_DRAFT
from zhiliao.core.models import CONTENT_STATUS_PUBLISHED
from zhiliao.generic.models import AssignedKeyword, Keyword, ThreadedComment
from zhiliao.pages.models import RichTextPage
from zhiliao.utils.html import decode_entities
User = get_user_model()
class BaseImporterCommand(BaseCommand):
"""
Base importer command for blogging platform specific management
commands to subclass when importing blog posts into Mezzanine.
The ``handle_import`` method should be overridden to provide the
import mechanism specific to the blogging platform being dealt with.
"""
option_list = BaseCommand.option_list + (
make_option("-m", "--mezzanine-user", dest="mezzanine_user",
help="Mezzanine username to assign the imported blog posts to."),
make_option("--noinput", action="store_false", dest="interactive",
default=True, help="Do NOT prompt for input of any kind. "
"Fields will be truncated if too long."),
make_option("-n", "--navigation", action="store_true",
dest="in_navigation", help="Add any imported pages to navigation"),
make_option("-f", "--footer", action="store_true", dest="in_footer",
help="Add any imported pages to footer navigation"),
)
def __init__(self, **kwargs):
self.posts = []
self.pages = []
super(BaseImporterCommand, self).__init__(**kwargs)
def add_post(self, title=None, content=None, old_url=None, pub_date=None,
tags=None, categories=None, comments=None):
"""
Adds a post to the post list for processing.
- ``title`` and ``content`` are strings for the post.
- ``old_url`` is a string that a redirect will be created for.
- ``pub_date`` is assumed to be a ``datetime`` object.
- ``tags`` and ``categories`` are sequences of strings.
- ``comments`` is a sequence of dicts - each dict should be the
return value of ``add_comment``.
"""
if not title:
title = strip_tags(content).split(". ")[0]
title = decode_entities(title)
if categories is None:
categories = []
if tags is None:
tags = []
if comments is None:
comments = []
self.posts.append({
"title": force_text(title),
"publish_date": pub_date,
"content": force_text(content),
"categories": categories,
"tags": tags,
"comments": comments,
"old_url": old_url,
})
return self.posts[-1]
def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
"""
Adds a page to the list of pages to be imported - used by the
Wordpress importer.
"""
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
})
def add_comment(self, post=None, name=None, email=None, pub_date=None,
website=None, body=None):
"""
Adds a comment to the post provided.
"""
if post is None:
if not self.posts:
raise CommandError("Cannot add comments without posts")
post = self.posts[-1]
post["comments"].append({
"user_name": name,
"user_email": email,
"submit_date": pub_date,
"user_url": website,
"comment": body,
})
def trunc(self, model, prompt, **fields):
"""
Truncates fields values for the given model. Prompts for a new
value if truncation occurs.
"""
for field_name, value in fields.items():
field = model._meta.get_field(field_name)
max_length = getattr(field, "max_length", None)
if not max_length:
continue
elif not prompt:
fields[field_name] = value[:max_length]
continue
while len(value) > max_length:
encoded_value = value.encode("utf-8")
new_value = input("The value for the field %s.%s exceeds "
"its maximum length of %s chars: %s\n\nEnter a new value "
"for it, or press return to have it truncated: " %
(model.__name__, field_name, max_length, encoded_value))
value = new_value if new_value else value[:max_length]
fields[field_name] = value
return fields
def handle(self, *args, **options):
"""
Processes the converted data into the Mezzanine database correctly.
Attributes:
mezzanine_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
"""
mezzanine_user = options.get("mezzanine_user")
site = Site.objects.get_current()
verbosity = int(options.get("verbosity", 1))
prompt = options.get("interactive")
# Validate the Mezzanine user.
if mezzanine_user is None:
raise CommandError("No Mezzanine user has been specified")
try:
mezzanine_user = User.objects.get(username=mezzanine_user)
except User.DoesNotExist:
raise CommandError("Invalid Mezzanine user: %s" % mezzanine_user)
# Run the subclassed ``handle_import`` and save posts, tags,
# categories, and comments to the DB.
self.handle_import(options)
for post_data in self.posts:
categories = post_data.pop("categories")
tags = post_data.pop("tags")
comments = post_data.pop("comments")
old_url = post_data.pop("old_url")
post_data = self.trunc(BlogPost, prompt, **post_data)
initial = {
"title": post_data.pop("title"),
"user": mezzanine_user,
}
if post_data["publish_date"] is None:
post_data["status"] = CONTENT_STATUS_DRAFT
post, created = BlogPost.objects.get_or_create(**initial)
for k, v in post_data.items():
setattr(post, k, v)
post.save()
if created and verbosity >= 1:
print("Imported post: %s" % post)
for name in categories:
cat = self.trunc(BlogCategory, prompt, title=name)
if not cat["title"]:
continue
cat, created = BlogCategory.objects.get_or_create(**cat)
if created and verbosity >= 1:
print("Imported category: %s" % cat)
post.categories.add(cat)
for comment in comments:
comment = self.trunc(ThreadedComment, prompt, **comment)
comment["site"] = site
post.comments.add(ThreadedComment(**comment))
if verbosity >= 1:
print("Imported comment by: %s" % comment["user_name"])
self.add_meta(post, tags, prompt, verbosity, old_url)
# Create any pages imported (Wordpress can include pages)
in_menus = []
footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES
if menu[-1] == "pages/menus/footer.html"]
if options["in_navigation"]:
in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES]
if footer and not options["in_footer"]:
in_menus.remove(footer[0])
elif footer and options["in_footer"]:
in_menus = footer
parents = []
for page in self.pages:
tags = page.pop("tags")
old_url = page.pop("old_url")
old_id = page.pop("old_id")
old_parent_id = page.pop("old_parent_id")
page = self.trunc(RichTextPage, prompt, **page)
page["status"] = CONTENT_STATUS_PUBLISHED
page["in_menus"] = in_menus
page, created = RichTextPage.objects.get_or_create(**page)
if created and verbosity >= 1:
print("Imported page: %s" % page)
self.add_meta(page, tags, prompt, verbosity, old_url)
parents.append({
'old_id': old_id,
'old_parent_id': old_parent_id,
'page': page,
})
for obj in parents:
if obj['old_parent_id']:
for parent in parents:
if parent['old_id'] == obj['old_parent_id']:
obj['page'].parent = parent['page']
obj['page'].save()
break
def add_meta(self, obj, tags, prompt, verbosity, old_url=None):
"""
Adds tags and a redirect for the given obj, which is a blog
post or a page.
"""
for tag in tags:
keyword = self.trunc(Keyword, prompt, title=tag)
keyword, created = Keyword.objects.get_or_create_iexact(**keyword)
obj.keywords.add(AssignedKeyword(keyword=keyword))
if created and verbosity >= 1:
print("Imported tag: %s" % keyword)
if old_url is not None:
old_path = urlparse(old_url).path
if not old_path.strip("/"):
return
redirect = self.trunc(Redirect, prompt, old_path=old_path)
redirect['site'] = Site.objects.get_current()
redirect, created = Redirect.objects.get_or_create(**redirect)
redirect.new_path = obj.get_absolute_url()
redirect.save()
if created and verbosity >= 1:
print("Created redirect for: %s" % old_url)
def handle_import(self, options):
"""
Should be overridden by subclasses - performs the conversion from
the originating data source into the lists of posts and comments
ready for processing.
"""
raise NotImplementedError
| {
"content_hash": "5f87816c478a0b0c364142c0e12ad5a1",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 79,
"avg_line_length": 40.825278810408925,
"alnum_prop": 0.562921143689674,
"repo_name": "gladgod/zhiliao",
"id": "02cc6257d78840b30e5ad8d347ee3965117528f2",
"size": "10982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zhiliao/blog/management/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "385074"
},
{
"name": "HTML",
"bytes": "237361"
},
{
"name": "JavaScript",
"bytes": "1117640"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "707535"
},
{
"name": "Shell",
"bytes": "4572"
}
],
"symlink_target": ""
} |