function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]: """ Detect base noun phrases from a dependency parse. Works on both Doc and Span. """ # fmt: off labels = ["nsubj", "nsubj:pass", "obj", "iobj", "ROOT", "appos", "nmod", "nmod:poss"] # fmt: on doc = doclike.doc # Ensure works on both Doc and Span. if not doc.has_annotation("DEP"): raise ValueError(Errors.E029) np_deps = [doc.vocab.strings[label] for label in labels] conj = doc.vocab.strings.add("conj") np_label = doc.vocab.strings.add("NP") prev_end = -1 for i, word in enumerate(doclike): if word.pos not in (NOUN, PROPN, PRON): continue # Prevent nested chunks from being produced if word.left_edge.i <= prev_end: continue if word.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label elif word.dep == conj: head = word.head while head.dep == conj and head.head.i < head.i: head = head.head # If the head is an NP, and we're coordinated to it, we're an NP if head.dep in np_deps: prev_end = word.right_edge.i yield word.left_edge.i, word.right_edge.i + 1, np_label
spacy-io/spaCy
[ 25459, 4045, 25459, 98, 1404400540 ]
def g(x): if x == 0: return 0 return 1
ktok07b6/polyphony
[ 90, 7, 90, 1, 1448846734 ]
def f(v, i, j, k): if i == 0: return v elif i == 1: return v elif i == 2: h(g(j) + g(k)) return v elif i == 3: for m in range(j): v += 2 return v else: for n in range(i): v += 1 return v
ktok07b6/polyphony
[ 90, 7, 90, 1, 1448846734 ]
def test(): assert 1 == if28(0, 1, 1, 0, 0) assert 2 == if28(0, 2, 0, 0, 0) assert 3 == if28(0, 3, 1, 0, 0) assert 4 == if28(0, 4, 2, 0, 0) assert 5 == if28(0, 5, 2, 1, 1) assert 6 == if28(0, 6, 2, 2, 2) assert 7 == if28(0, 7, 3, 0, 0) assert 10 == if28(0, 8, 3, 1, 1) assert 13 == if28(0, 9, 3, 2, 2) assert 14 == if28(0, 10, 4, 0, 0)
ktok07b6/polyphony
[ 90, 7, 90, 1, 1448846734 ]
def __init__(self, domain, *args, **kwargs): super(BaseWorkflowQuerySet, self).__init__(*args, **kwargs) Domain.check(domain) self.domain = domain
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def domain(self): if not hasattr(self, '_domain'): self._domain = None return self._domain
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def domain(self, value): # Avoiding circular import from swf.models.domain import Domain if not isinstance(value, Domain): err = "domain property has to be of"\ "swf.model.domain.Domain type, not %r"\ % type(value) raise TypeError(err) self._domain = value
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def _list_items(self, *args, **kwargs): response = {'nextPageToken': None} while 'nextPageToken' in response: response = self._list( *args, next_page_token=response['nextPageToken'], **kwargs ) for item in response[self._infos_plural]: yield item
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def to_WorkflowType(self, domain, workflow_info, **kwargs): # Not using get_subkey in order for it to explictly # raise when workflowType name doesn't exist for example return WorkflowType( domain, workflow_info['workflowType']['name'], workflow_info['workflowType']['version'], status=workflow_info['status'], **kwargs )
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def get_or_create(self, name, version, status=REGISTERED, creation_date=0.0, deprecation_date=0.0, task_list=None, child_policy=CHILD_POLICIES.TERMINATE, execution_timeout='300', decision_tasks_timeout='300', description=None, *args, **kwargs): """Fetches, or creates the ActivityType with ``name`` and ``version`` When fetching trying to fetch a matching workflow type, only name and version parameters are taken in account. Anyway, If you'd wanna make sure that in case the workflow type has to be created it is made with specific values, just provide it. :param name: name of the workflow type :type name: String :param version: workflow type version :type version: String :param status: workflow type status :type status: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED} :param creation_date: creation date of the current WorkflowType :type creation_date: float (timestamp) :param deprecation_date: deprecation date of WorkflowType :type deprecation_date: float (timestamp) :param task_list: task list to use for scheduling decision tasks for executions of this workflow type :type task_list: String :param child_policy: policy to use for the child workflow executions when a workflow execution of this type is terminated :type child_policy: CHILD_POLICIES.{TERMINATE | REQUEST_CANCEL | ABANDON} :param execution_timeout: maximum duration for executions of this workflow type :type execution_timeout: String :param decision_tasks_timeout: maximum duration of decision tasks for this workflow type :type decision_tasks_timeout: String :param description: Textual description of the workflow type :type description: String :returns: Fetched or created WorkflowType model object :rtype: WorkflowType """ try: return self.get(name, version, task_list=task_list, child_policy=child_policy, execution_timeout=execution_timeout, decision_tasks_timeout=decision_tasks_timeout) except DoesNotExistError: try: return self.create( name, version, status=status, creation_date=creation_date, deprecation_date=deprecation_date, task_list=task_list, child_policy=child_policy, execution_timeout=execution_timeout, decision_tasks_timeout=decision_tasks_timeout, description=description, ) # race conditon could happen if two workflows trying to register the same type except AlreadyExistsError: return self.get(name, version, task_list=task_list, child_policy=child_policy, execution_timeout=execution_timeout, decision_tasks_timeout=decision_tasks_timeout)
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def filter(self, domain=None, registration_status=REGISTERED, name=None, *args, **kwargs): """Filters workflows based on the ``domain`` they belong to, their ``status``, and/or their ``name`` :param domain: domain the workflow type belongs to :type domain: swf.models.domain.Domain :param registration_status: workflow type registration status to match, Valid values are: * ``swf.constants.REGISTERED`` * ``swf.constants.DEPRECATED`` :type registration_status: string :param name: workflow type name to match :type name: string :returns: list of matched WorkflowType models objects :rtype: list """ # As WorkflowTypeQuery has to be built against a specific domain # name, domain filter is disposable, but not mandatory. domain = domain or self.domain return [self.to_WorkflowType(domain, wf) for wf in self._list_items(domain.name, registration_status, name=name)]
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def create(self, name, version, status=REGISTERED, creation_date=0.0, deprecation_date=0.0, task_list=None, child_policy=CHILD_POLICIES.TERMINATE, execution_timeout='300', decision_tasks_timeout='300', description=None, *args, **kwargs): """Creates a new remote workflow type and returns the created WorkflowType model instance. :param name: name of the workflow type :type name: String :param version: workflow type version :type version: String :param status: workflow type status :type status: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED} :param creation_date: creation date of the current WorkflowType :type creation_date: float (timestamp) :param deprecation_date: deprecation date of WorkflowType :type deprecation_date: float (timestamp) :param task_list: task list to use for scheduling decision tasks for executions of this workflow type :type task_list: String :param child_policy: policy to use for the child workflow executions when a workflow execution of this type is terminated :type child_policy: CHILD_POLICIES.{TERMINATE | REQUEST_CANCEL | ABANDON} :param execution_timeout: maximum duration for executions of this workflow type :type execution_timeout: String :param decision_tasks_timeout: maximum duration of decision tasks for this workflow type :type decision_tasks_timeout: String :param description: Textual description of the workflow type :type description: String """ workflow_type = WorkflowType( self.domain, name, version, status=status, creation_date=creation_date, deprecation_date=deprecation_date, task_list=task_list, child_policy=child_policy, execution_timeout=execution_timeout, decision_tasks_timeout=decision_tasks_timeout, description=description ) workflow_type.save() return workflow_type
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def _is_valid_status_param(self, status, param): statuses = { WorkflowExecution.STATUS_OPEN: set([ 'oldest_date', 'latest_date'], ), WorkflowExecution.STATUS_CLOSED: set([ 'start_latest_date', 'start_oldest_date', 'close_latest_date', 'close_oldest_date', 'close_status' ]), } return param in statuses.get(status, set())
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def list_workflow_executions(self, status, *args, **kwargs): statuses = { WorkflowExecution.STATUS_OPEN: 'open', WorkflowExecution.STATUS_CLOSED: 'closed', } # boto.swf.list_closed_workflow_executions awaits a `start_oldest_date` # MANDATORY kwarg, when boto.swf.list_open_workflow_executions awaits a # `oldest_date` mandatory arg. if status == WorkflowExecution.STATUS_OPEN: kwargs['oldest_date'] = kwargs.pop('start_oldest_date') try: method = 'list_{}_workflow_executions'.format(statuses[status]) return getattr(self.connection, method)(*args, **kwargs) except KeyError: raise ValueError("Unknown status provided: %s" % status)
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def to_WorkflowExecution(self, domain, execution_info, **kwargs): workflow_type = WorkflowType( self.domain, execution_info['workflowType']['name'], execution_info['workflowType']['version'] ) return WorkflowExecution( domain, get_subkey(execution_info, ['execution', 'workflowId']), # workflow_id run_id=get_subkey(execution_info, ['execution', 'runId']), workflow_type=workflow_type, status=execution_info.get('executionStatus'), close_status=execution_info.get('closeStatus'), tag_list=execution_info.get('tagList'), start_timestamp=execution_info.get('startTimestamp'), close_timestamp=execution_info.get('closeTimestamp'), cancel_requested=execution_info.get('cancelRequested'), parent=execution_info.get('parent'), **kwargs )
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def filter(self, status=WorkflowExecution.STATUS_OPEN, tag=None, workflow_id=None, workflow_type_name=None, workflow_type_version=None, *args, **kwargs): """Filters workflow executions based on kwargs provided criteras :param status: workflow executions with provided status will be kept. Valid values are: * ``swf.models.WorkflowExecution.STATUS_OPEN`` * ``swf.models.WorkflowExecution.STATUS_CLOSED`` :type status: string :param tag: workflow executions containing the tag will be kept :type tag: String :param workflow_id: workflow executions attached to the id will be kept :type workflow_id: String :param workflow_type_name: workflow executions attached to the workflow type with provided name will be kept :type workflow_type_name: String :param workflow_type_version: workflow executions attached to the workflow type of the provided version will be kept :type workflow_type_version: String **Be aware that** querying over status allows the usage of statuses specific kwargs * STATUS_OPEN :param start_latest_date: latest start or close date and time to return (in days) :type start_latest_date: int * STATUS_CLOSED :param start_latest_date: workflow executions that meet the start time criteria of the filter are kept (in days) :type start_latest_date: int :param start_oldest_date: workflow executions that meet the start time criteria of the filter are kept (in days) :type start_oldest_date: int :param close_latest_date: workflow executions that meet the close time criteria of the filter are kept (in days) :type close_latest_date: int :param close_oldest_date: workflow executions that meet the close time criteria of the filter are kept (in days) :type close_oldest_date: int :param close_status: must match the close status of an execution for it to meet the criteria of this filter. Valid values are: * ``CLOSE_STATUS_COMPLETED`` * ``CLOSE_STATUS_FAILED`` * ``CLOSE_STATUS_CANCELED`` * ``CLOSE_STATUS_TERMINATED`` * ``CLOSE_STATUS_CONTINUED_AS_NEW`` * ``CLOSE_TIMED_OUT`` :type close_status: string :returns: workflow executions objects list :rtype: list """ # As WorkflowTypeQuery has to be built against a specific domain # name, domain filter is disposable, but not mandatory. invalid_kwargs = self._validate_status_parameters(status, kwargs) if invalid_kwargs: err_msg = 'Invalid keyword arguments supplied: {}'.format( ', '.join(invalid_kwargs)) raise InvalidKeywordArgumentError(err_msg) if status == WorkflowExecution.STATUS_OPEN: oldest_date = kwargs.pop('oldest_date', 30) else: # The SWF docs on ListClosedWorkflowExecutions state that: # # "startTimeFilter and closeTimeFilter are mutually exclusive" # # so we must figure out if we have to add a default value for # start_oldest_date or not. if "close_latest_date" in kwargs or "close_oldest_date" in kwargs: default_oldest_date = None else: default_oldest_date = 30 oldest_date = kwargs.pop('start_oldest_date', default_oldest_date) # Compute a timestamp from the delta in days we got from params # If oldest_date is blank at this point, it's because we didn't want # it, so let's leave it blank and assume the user provided an other # time filter. if oldest_date: start_oldest_date = int(datetime_timestamp(past_day(oldest_date))) else: start_oldest_date = None return [self.to_WorkflowExecution(self.domain, wfe) for wfe in self._list_items( *args, domain=self.domain.name, status=status, workflow_id=workflow_id, workflow_name=workflow_type_name, workflow_version=workflow_type_version, start_oldest_date=start_oldest_date, tag=tag, **kwargs )]
botify-labs/python-simple-workflow
[ 18, 4, 18, 15, 1364383242 ]
def network_resnet(octree, flags, training=True, reuse=None): depth = flags.depth channels = [2048, 1024, 512, 256, 128, 64, 32, 16, 8] with tf.variable_scope("ocnn_resnet", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) with tf.variable_scope("conv1"): data = octree_conv_bn_relu(data, octree, depth, channels[depth], training) for d in range(depth, 2, -1): for i in range(0, flags.resblock_num): with tf.variable_scope('resblock_%d_%d' % (d, i)): data = octree_resblock(data, octree, d, channels[d], 1, training) with tf.variable_scope('max_pool_%d' % d): data, _ = octree_max_pool(data, octree, d) with tf.variable_scope("global_average"): data = octree_full_voxel(data, depth=2) data = tf.reduce_mean(data, 2)
microsoft/O-CNN
[ 618, 170, 618, 2, 1497326634 ]
def network_ocnn(octree, flags, training=True, reuse=None): depth = flags.depth channels = [512, 256, 128, 64, 32, 16, 8, 4, 2] with tf.variable_scope("ocnn", reuse=reuse): data = octree_property(octree, property_name="feature", dtype=tf.float32, depth=depth, channel=flags.channel) data = tf.reshape(data, [1, flags.channel, -1, 1]) for d in range(depth, 2, -1): with tf.variable_scope('depth_%d' % d): data = octree_conv_bn_relu(data, octree, d, channels[d], training) data, _ = octree_max_pool(data, octree, d) with tf.variable_scope("full_voxel"): data = octree_full_voxel(data, depth=2) data = tf.layers.dropout(data, rate=0.5, training=training) with tf.variable_scope("fc1"): data = fc_bn_relu(data, channels[2], training=training) data = tf.layers.dropout(data, rate=0.5, training=training) with tf.variable_scope("fc2"): logit = dense(data, flags.nout, use_bias=True) return logit
microsoft/O-CNN
[ 618, 170, 618, 2, 1497326634 ]
def create(params, env=None, headers=None): return request.send('post', request.uri_path("plans"), params, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def update(id, params=None, env=None, headers=None): return request.send('post', request.uri_path("plans",id), params, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def list(params=None, env=None, headers=None): return request.send_list_request('get', request.uri_path("plans"), params, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def retrieve(id, env=None, headers=None): return request.send('get', request.uri_path("plans",id), None, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def delete(id, env=None, headers=None): return request.send('post', request.uri_path("plans",id,"delete"), None, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def copy(params, env=None, headers=None): return request.send('post', request.uri_path("plans","copy"), params, env, headers)
chargebee/chargebee-python
[ 34, 29, 34, 14, 1349501361 ]
def get_openstack_nova_client(config): return get_openstack_clients(config)[0]
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def get_openstack_cinder_client(config): return get_openstack_clients(config)[2]
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def _format_nics(nics): """ Create a networks data structure for python-novaclient. **Note** "auto" is the safest default to pass to novaclient :param nics: either None, one of strings "auto" or "none"or string with a comma-separated list of nic IDs from OpenStack. :return: A data structure that can be passed as Nics """ if not nics: return "auto" if nics == "none": return "none" if nics.lower() == "auto": return "auto" return [{"net-id": item, "v4-fixed-ip": ""} for item in nics.strip().split(",")]
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server_id, config): logging.debug("getting server %s" % server_id) nc = get_openstack_nova_client(config) return nc.servers.get(server_id)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, image_name, config): logging.debug("getting image %s" % image_name) nc = get_openstack_nova_client(config) return nc.glance.find_image(image_name)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, image_name, config): logging.debug("getting images") nc = get_openstack_nova_client(config) return nc.glance.list()
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, flavor_name, config): logging.debug("getting flavor %s" % flavor_name) nc = get_openstack_nova_client(config) return nc.flavors.find(name=flavor_name)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, flavor_name, config): logging.debug("getting flavors") nc = get_openstack_nova_client(config) return nc.flavors.list()
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, master_sg_name, config): logging.debug("create security group %s" % display_name) security_group_name = display_name nc = get_openstack_neutron_client(config) self.secgroup = nc.create_security_group({"security_group": { "name": security_group_name, "description": "Security group generated by Pebbles" }}) self.secgroup_id = self.secgroup["security_group"]["id"] self.secgroup_name = self.secgroup["security_group"]["name"] if master_sg_name: master_sg = nc.find_resource("security_group", master_sg_name) nc.create_security_group_rule({"security_group_rule": dict( security_group_id=self.secgroup_id, protocol='tcp', ethertype='ipv4', port_range_min=1, direction='ingress', port_range_max=65535, remote_group_id=master_sg["id"] )}) nc.create_security_group_rule({"security_group_rule": dict( security_group_id=self.secgroup_id, protocol='udp', ethertype='ipv4', port_range_min=1, direction='ingress', port_range_max=65535, remote_group_id=master_sg["id"] )}) nc.create_security_group_rule({"security_group_rule": dict( security_group_id=self.secgroup_id, protocol='icmp', ethertype='ipv4', port_range_min=1, direction='ingress', port_range_max=255, remote_group_id=master_sg["id"] )}) logging.info("Created security group %s" % self.secgroup_id) return self.secgroup_id
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, image, root_volume_size, config): if root_volume_size: logging.debug("creating a root volume for instance %s from image %s" % (display_name, image)) nc = get_openstack_cinder_client(config) volume_name = '%s-root' % display_name volume = nc.volumes.create( size=root_volume_size, imageRef=image.id, name=volume_name ) self.volume_id = volume.id retries = 0 while nc.volumes.get(volume.id).status not in ('available',): logging.debug("...waiting for volume to be ready") time.sleep(5) retries += 1 if retries > 30: raise RuntimeError('Volume creation %s is stuck') return volume.id else: logging.debug("no root volume defined") return ""
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, data_volume_size, data_volume_type, config): if data_volume_size: logging.debug("creating a data volume for instance %s, %d" % (display_name, data_volume_size)) nc = get_openstack_cinder_client(config) volume_name = '%s-data' % display_name volume = nc.volumes.create( size=data_volume_size, name=volume_name, volume_type=data_volume_type, ) self.volume_id = volume.id retries = 0 while nc.volumes.get(volume.id).status not in ('available',): logging.debug("...waiting for volume to be ready") time.sleep(5) retries += 1 if retries > 30: raise RuntimeError('Volume creation %s is stuck') return volume.id else: logging.debug("no root volume defined") return None
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, image, flavor, security_group, extra_sec_groups, root_volume_id, nics, userdata, config): logging.debug("provisioning instance %s" % display_name) nc = get_openstack_nova_client(config) sgs = [security_group] if extra_sec_groups: sgs.extend(extra_sec_groups) try: if len(root_volume_id): bdm = {'vda': '%s:::1' % (root_volume_id)} else: bdm = None instance = nc.servers.create( display_name, image.id, flavor.id, key_name=display_name, security_groups=sgs, block_device_mapping=bdm, nics=_format_nics(nics), userdata=userdata,) except Exception as e: logging.error("error provisioning instance: %s" % e) raise e self.instance_id = instance.id logging.debug("instance provisioning successful") return instance.id
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server_id, config): logging.debug("deprovisioning instance %s" % server_id) nc = get_openstack_nova_client(config) try: server = nc.servers.get(server_id) except NotFound: logging.warn("Server %s not found" % server_id) return if hasattr(server, "security_groups"): for sg in server.security_groups: try: server.remove_security_group(sg['name']) except: logging.warn("Unable to remove security group from server (%s)" % sg) else: logging.warn("no security groups on server!") try: nc.servers.delete(server_id) wait_for_delete(nc.servers, server_id) except Exception as e: logging.warn("Unable to deprovision server %s" % e) return server.name
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server_id, allocate_public_ip, config): logging.info("Allocate IP for server %s" % server_id) novaclient = get_openstack_nova_client(config) neutronclient = get_openstack_neutron_client(config) retries = 0 while novaclient.servers.get(server_id).status is "BUILDING" or not novaclient.servers.get(server_id).networks: logging.debug("...waiting for server to be ready") time.sleep(5) retries += 1 if retries > 30: raise RuntimeError('Server %s is stuck in building' % server_id) server = novaclient.servers.get(server_id) if allocate_public_ip: ips = neutronclient.list_floatingips() allocated_from_pool = False free_ips = [ip for ip in ips["floatingips"] if ip["status"] != "ACTIVE"] if not free_ips: logging.debug("No allocated free IPs left, trying to allocate one") try: # for backwards compatibility reasons we assume the # network is called "public" network_id = neutronclient.find_resource("network", "public") ip = neutronclient.create_floatingip({ "floating_network_id": network_id}) allocated_from_pool = True except neutronclient.exceptions.ClientException as e: logging.warning("Cannot allocate IP, quota exceeded?") raise e else: ip = free_ips[0]["floating_ip_address"] logging.info("IP assigned IS %s" % ip) try: server.add_floating_ip(ip) except Exception as e: logging.error(e) address_data = { 'public_ip': ip, 'allocated_from_pool': allocated_from_pool, 'private_ip': list(server.networks.values())[0][0], } else: address_data = { 'public_ip': None, 'allocated_from_pool': False, 'private_ip': list(server.networks.values())[0][0], } return address_data
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server_id, config): nc = get_openstack_nova_client(config) return nc.volumes.get_server_volumes(server_id)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server_id, data_volume_id, config): logging.debug("Attach data volume for server %s" % server_id) if data_volume_id: nc = get_openstack_nova_client(config) retries = 0 while nc.servers.get(server_id).status is "BUILDING" or not nc.servers.get(server_id).networks: logging.debug("...waiting for server to be ready") time.sleep(5) retries += 1 if retries > 30: raise RuntimeError('Server %s is stuck in building' % server_id) nc.volumes.create_server_volume(server_id, data_volume_id, '/dev/vdc')
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, public_key, config): logging.debug("adding user public key") nc = get_openstack_nova_client(config) self.keypair_added = False nc.keypairs.create(display_name, public_key) self.keypair_added = True
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, display_name, config): logging.debug("removing user public key") nc = get_openstack_nova_client(config) try: nc.keypairs.find(name=display_name).delete() except: pass
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server, config): logging.debug("delete security group") nc = get_openstack_neutron_client(config) security_group = nc.find_resource("security_group", server.name) try: if security_group: nc.delete_security_group(security_group["id"]) except Exception as e: logging.warn("Could not delete security group: %s" % e)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def execute(self, server, config): nova = get_openstack_nova_client(config) cinder = get_openstack_cinder_client(config) for volume in nova.volumes.get_server_volumes(server.id): retries = 0 while cinder.volumes.get(volume.id).status not in \ ('available', 'error'): logging.debug("...waiting for volume to be ready") time.sleep(5) retries += 1 if retries > 30: raise RuntimeError('Volume %s is stuck' % volume.id) try: cinder.volumes.delete(volume.id) except NotFound: pass
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def get_provision_flow(): """ Provisioning flow consisting of three graph flows, each consisting of set of tasks that can execute in parallel. Returns tuple consisting of the whole flow and a dictionary including references to three graph flows for pre-execution customisations. """ pre_flow = gf.Flow('PreBootInstance').add( AddUserPublicKey('add_user_public_key'), GetImage('get_image', provides='image'), GetFlavor('get_flavor', provides='flavor'), CreateRootVolume('create_root_volume', provides='root_volume_id') ) main_flow = gf.Flow('BootInstance').add( CreateSecurityGroup('create_security_group', provides='security_group'), CreateDataVolume('create_data_volume', provides='data_volume_id'), ProvisionInstance('provision_instance', provides='server_id') ) post_flow = gf.Flow('PostBootInstance').add( AllocateIPForInstance('allocate_ip_for_instance', provides='address_data'), AttachDataVolume('attach_data_volume'), RemoveUserPublicKey('remove_user_public_key') ) return (lf.Flow('ProvisionInstance').add(pre_flow, main_flow, post_flow), {'pre': pre_flow, 'main': main_flow, 'post': post_flow})
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def get_upload_key_flow(): return lf.Flow('UploadKey').add( AddUserPublicKey('upload_key') )
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def __init__(self, config=None): self._config = config
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def deprovision_instance(self, server_id, display_name=None, delete_attached_volumes=False): flow, subflows = get_deprovision_flow() if delete_attached_volumes: subflows['main'].add(DeleteVolumes()) try: return taskflow.engines.run(flow, engine='parallel', store=dict( server_id=server_id, config=self._config)) except Exception as e: logging.error(e) return {'error': 'flow failed due to: %s' % (e)}
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def get_instance_networks(self, instance_id): nc = get_openstack_nova_client(self._config) return nc.servers.get(instance_id).networks
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def list_flavors(self): nc = get_openstack_nova_client(self._config) return nc.flavors.list()
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def delete_key(self, key_name): logging.debug('Deleting key: %s' % key_name) nc = get_openstack_nova_client(self._config) try: key = nc.keypairs.find(name=key_name) key.delete() except: logging.warning('Key not found: %s' % key_name)
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def create_security_group(self, security_group_name, security_group_description): nc = get_openstack_neutron_client(self._config) nc.security_groups.create( security_group_name, "Security group generated by Pebbles")
CSC-IT-Center-for-Science/pouta-blueprints
[ 7, 6, 7, 71, 1418128274 ]
def __init__(self, machine) -> None: """Initialise OPP platform.""" super().__init__(machine) self.opp_connection = {} # type: Dict[str, OPPSerialCommunicator] self.serial_connections = set() # type: Set[OPPSerialCommunicator] self.opp_incands = dict() # type: Dict[str, OPPIncandCard] self.opp_solenoid = [] # type: List[OPPSolenoidCard] self.sol_dict = dict() # type: Dict[str, OPPSolenoid] self.opp_inputs = [] # type: List[Union[OPPInputCard, OPPMatrixCard]] self.inp_dict = dict() # type: Dict[str, OPPSwitch] self.inp_addr_dict = dict() # type: Dict[str, OPPInputCard] self.matrix_inp_addr_dict = dict() # type: Dict[str, OPPMatrixCard] self.read_input_msg = {} # type: Dict[str, bytes] self.neo_card_dict = dict() # type: Dict[str, OPPNeopixelCard] self.matrix_light_cards = dict() # type: Dict[str, OPPModernMatrixLightsCard] self.num_gen2_brd = 0 self.gen2_addr_arr = {} # type: Dict[str, Dict[int, Optional[int]]] self.bad_crc = defaultdict(lambda: 0) self.min_version = defaultdict(lambda: 0xffffffff) # type: Dict[str, int] self._poll_task = {} # type: Dict[str, asyncio.Task] self._incand_task = None # type: Optional[asyncio.Task] self._light_system = None # type: Optional[PlatformBatchLightSystem] self.features['tickless'] = True self.config = self.machine.config_validator.validate_config("opp", self.machine.config.get('opp', {})) self._configure_device_logging_and_debug("OPP", self.config) self._poll_response_received = {} # type: Dict[str, asyncio.Event] assert self.log is not None if self.config['driverboards']: self.machine_type = self.config['driverboards'] else: self.machine_type = self.machine.config['hardware']['driverboards'].lower() if self.machine_type == 'gen1': raise AssertionError("Original OPP boards not currently supported.") if self.machine_type == 'gen2': self.debug_log("Configuring the OPP Gen2 boards") else: self.raise_config_error('Invalid driverboards type: {}'.format(self.machine_type), 15) # Only including responses that should be received self.opp_commands = { ord(OppRs232Intf.INV_CMD): self.inv_resp, ord(OppRs232Intf.EOM_CMD): self.eom_resp, ord(OppRs232Intf.GET_GEN2_CFG): self.get_gen2_cfg_resp, ord(OppRs232Intf.READ_GEN2_INP_CMD): self.read_gen2_inp_resp_initial, ord(OppRs232Intf.GET_VERS_CMD): self.vers_resp, ord(OppRs232Intf.READ_MATRIX_INP): self.read_matrix_inp_resp_initial, }
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def stop(self): """Stop hardware and close connections.""" if self._light_system: self._light_system.stop() for task in self._poll_task.values(): task.cancel() self._poll_task = {} if self._incand_task: self._incand_task.cancel() self._incand_task = None for connections in self.serial_connections: connections.stop() self.serial_connections = []
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def process_received_message(self, chain_serial, msg): """Send an incoming message from the OPP hardware to the proper method for servicing. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ if len(msg) >= 1: # Verify valid Gen2 address if (msg[0] & 0xe0) == 0x20: if len(msg) >= 2: cmd = msg[1] else: cmd = OppRs232Intf.ILLEGAL_CMD # Look for EOM or INV commands elif msg[0] == ord(OppRs232Intf.INV_CMD) or msg[0] == ord(OppRs232Intf.EOM_CMD): cmd = msg[0] else: cmd = OppRs232Intf.ILLEGAL_CMD else: # No messages received, fake an EOM cmd = OppRs232Intf.EOM_CMD # Can't use try since it swallows too many errors for now if cmd in self.opp_commands: self.opp_commands[cmd](chain_serial, msg) else: self.log.warning("Received unknown serial command?%s. (This is " "very worrisome.)", "".join(HEX_FORMAT % b for b in msg)) # TODO: This means synchronization is lost. Send EOM characters # until they come back self.opp_connection[chain_serial].lost_synch()
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def _get_numbers(mask): number = 0 ref = 1 result = [] while mask > ref: if mask & ref: result.append(number) number += 1 ref = ref << 1 return result
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def register_processor_connection(self, serial_number, communicator): """Register the processors to the platform. Args: ---- serial_number: Serial number of chain. communicator: Instance of OPPSerialCommunicator """ self.opp_connection[serial_number] = communicator
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def update_incand(self): """Update all the incandescents connected to OPP hardware. This is done once per game loop if changes have been made. It is currently assumed that the UART oversampling will guarantee proper communication with the boards. If this does not end up being the case, this will be changed to update all the incandescents each loop. This is used for board with firmware < 2.1.0 """ for incand in self.opp_incands.values(): if self.min_version[incand.chain_serial] >= 0x02010000: continue whole_msg = bytearray() # Check if any changes have been made if incand.old_state is None or (incand.old_state ^ incand.new_state) != 0: # Update card incand.old_state = incand.new_state msg = bytearray() msg.append(incand.addr) msg.extend(OppRs232Intf.INCAND_CMD) msg.extend(OppRs232Intf.INCAND_SET_ON_OFF) msg.append((incand.new_state >> 24) & 0xff) msg.append((incand.new_state >> 16) & 0xff) msg.append((incand.new_state >> 8) & 0xff) msg.append(incand.new_state & 0xff) msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg)) whole_msg.extend(msg) if whole_msg: # Note: No need to send EOM at end of cmds send_cmd = bytes(whole_msg) if self.debug: self.debug_log("Update incand on %s cmd:%s", incand.chain_serial, "".join(HEX_FORMAT % b for b in send_cmd)) self.send_to_processor(incand.chain_serial, send_cmd)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def get_coil_config_section(cls): """Return coil config section.""" return "opp_coils"
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def inv_resp(self, chain_serial, msg): """Parse inventory response. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ self.debug_log("Received Inventory Response: %s for %s", "".join(HEX_FORMAT % b for b in msg), chain_serial) index = 1 self.gen2_addr_arr[chain_serial] = {} while msg[index] != ord(OppRs232Intf.EOM_CMD): if (msg[index] & ord(OppRs232Intf.CARD_ID_TYPE_MASK)) == ord(OppRs232Intf.CARD_ID_GEN2_CARD): self.num_gen2_brd += 1 self.gen2_addr_arr[chain_serial][msg[index]] = None else: self.log.warning("Invalid inventory response %s for %s.", msg[index], chain_serial) index += 1 self.debug_log("Found %d Gen2 OPP boards on %s.", self.num_gen2_brd, chain_serial)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def eom_resp(chain_serial, msg): """Process an EOM. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ # An EOM command can be used to resynchronize communications if message synch is lost
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def _bad_crc(self, chain_serial, msg): """Show warning and increase counter.""" self.bad_crc[chain_serial] += 1 self.log.warning("Chain: %sMsg contains bad CRC: %s.", chain_serial, "".join(HEX_FORMAT % b for b in msg))
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def vers_resp(self, chain_serial, msg): """Process version response. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ # Multiple get version responses can be received at once self.debug_log("Received Version Response (Chain: %s): %s", chain_serial, "".join(HEX_FORMAT % b for b in msg)) curr_index = 0 while True: # check that message is long enough, must include crc8 if len(msg) < curr_index + 7: self.log.warning("Msg is too short (Chain: %s): %s.", chain_serial, "".join(HEX_FORMAT % b for b in msg)) self.opp_connection[chain_serial].lost_synch() break # Verify the CRC8 is correct crc8 = OppRs232Intf.calc_crc8_part_msg(msg, curr_index, 6) if msg[curr_index + 6] != ord(crc8): self._bad_crc(chain_serial, msg) break version = (msg[curr_index + 2] << 24) | \ (msg[curr_index + 3] << 16) | \ (msg[curr_index + 4] << 8) | \ msg[curr_index + 5] self.debug_log("Firmware version of board 0x%02x (Chain: %s): %d.%d.%d.%d", msg[curr_index], chain_serial, msg[curr_index + 2], msg[curr_index + 3], msg[curr_index + 4], msg[curr_index + 5]) if msg[curr_index] not in self.gen2_addr_arr[chain_serial]: self.log.warning("Got firmware response for %s but not in inventory at %s", msg[curr_index], chain_serial) else: self.gen2_addr_arr[chain_serial][msg[curr_index]] = version if version < self.min_version[chain_serial]: self.min_version[chain_serial] = version if version == BAD_FW_VERSION: raise AssertionError("Original firmware sent only to Brian before adding " "real version numbers. The firmware must be updated before " "MPF will work.") if (len(msg) > curr_index + 7) and (msg[curr_index + 7] == ord(OppRs232Intf.EOM_CMD)): break if (len(msg) > curr_index + 8) and (msg[curr_index + 8] == ord(OppRs232Intf.GET_VERS_CMD)): curr_index += 7 else: self.log.warning("Malformed GET_VERS_CMD response (Chain %s): %s.", chain_serial, "".join(HEX_FORMAT % b for b in msg)) self.opp_connection[chain_serial].lost_synch() break
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def read_gen2_inp_resp(self, chain_serial, msg): """Read switch changes. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ # Single read gen2 input response. Receive function breaks them down # Verify the CRC8 is correct if len(msg) < 7: self.log.warning("Msg too short: %s.", "".join(HEX_FORMAT % b for b in msg)) self.opp_connection[chain_serial].lost_synch() return crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 6) if msg[6] != ord(crc8): self._bad_crc(chain_serial, msg) else: if chain_serial + '-' + str(msg[0]) not in self.inp_addr_dict: self.log.warning("Got input response for invalid card: %s. Msg: %s.", msg[0], "".join(HEX_FORMAT % b for b in msg)) return opp_inp = self.inp_addr_dict[chain_serial + '-' + str(msg[0])] new_state = (msg[2] << 24) | \ (msg[3] << 16) | \ (msg[4] << 8) | \ msg[5] # Update the state which holds inputs that are active changes = opp_inp.old_state ^ new_state if changes != 0: curr_bit = 1 for index in range(0, 32): if (curr_bit & changes) != 0: if (curr_bit & new_state) == 0: self.machine.switch_controller.process_switch_by_num( state=1, num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index), platform=self) else: self.machine.switch_controller.process_switch_by_num( state=0, num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index), platform=self) curr_bit <<= 1 opp_inp.old_state = new_state # we can continue to poll self._poll_response_received[chain_serial].set()
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def read_matrix_inp_resp(self, chain_serial, msg): """Read matrix switch changes. Args: ---- chain_serial: Serial of the chain which received the message. msg: Message to parse. """ # Single read gen2 input response. Receive function breaks them down # Verify the CRC8 is correct if len(msg) < 11: self.log.warning("Msg too short: %s.", "".join(HEX_FORMAT % b for b in msg)) self.opp_connection[chain_serial].lost_synch() return crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 10) if msg[10] != ord(crc8): self._bad_crc(chain_serial, msg) else: if chain_serial + '-' + str(msg[0]) not in self.matrix_inp_addr_dict: self.log.warning("Got input response for invalid matrix card: %s. Msg: %s.", msg[0], "".join(HEX_FORMAT % b for b in msg)) return opp_inp = self.matrix_inp_addr_dict[chain_serial + '-' + str(msg[0])] new_state = ((msg[2] << 56) | (msg[3] << 48) | (msg[4] << 40) | (msg[5] << 32) | (msg[6] << 24) | (msg[7] << 16) | (msg[8] << 8) | msg[9]) changes = opp_inp.old_state ^ new_state if changes != 0: curr_bit = 1 for index in range(32, 96): if (curr_bit & changes) != 0: if (curr_bit & new_state) == 0: self.machine.switch_controller.process_switch_by_num( state=1, num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index), platform=self) else: self.machine.switch_controller.process_switch_by_num( state=0, num=opp_inp.chain_serial + '-' + opp_inp.card_num + '-' + str(index), platform=self) curr_bit <<= 1 opp_inp.old_state = new_state # we can continue to poll self._poll_response_received[chain_serial].set()
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict): """Configure a driver. Args: ---- config: Config dict. number: Number of this driver. platform_settings: Platform specific settings. """ if not self.opp_connection: self.raise_config_error("A request was made to configure an OPP solenoid, " "but no OPP connection is available", 4) number = self._get_dict_index(number) if number not in self.sol_dict: self.raise_config_error("A request was made to configure an OPP solenoid " "with number {} which doesn't exist".format(number), 5) # Use new update individual solenoid command opp_sol = self.sol_dict[number] opp_sol.config = config opp_sol.platform_settings = platform_settings if self.debug: self.debug_log("Configure driver %s", number) default_pulse = PulseSettings(config.default_pulse_power, config.default_pulse_ms) default_hold = HoldSettings(config.default_hold_power) opp_sol.reconfigure_driver(default_pulse, default_hold) # Removing the default input is not necessary since the # CFG_SOL_USE_SWITCH is not being set return opp_sol
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def parse_light_number_to_channels(self, number: str, subtype: str): """Parse number and subtype to channel.""" if subtype in ("matrix", "incand"): return [ { "number": self._get_dict_index(number) } ] if not subtype or subtype == "led": full_index = self._get_dict_index(number) chain_serial, card, index = full_index.split('-') number_format = "{}-{}-{}" return [ { "number": number_format.format(chain_serial, card, int(index) * 3) }, { "number": number_format.format(chain_serial, card, int(index) * 3 + 1) }, { "number": number_format.format(chain_serial, card, int(index) * 3 + 2) }, ] self.raise_config_error("Unknown subtype {}".format(subtype), 8) return []
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def _verify_coil_and_switch_fit(self, switch, coil): chain_serial, card, solenoid = coil.hw_driver.number.split('-') sw_chain_serial, sw_card, sw_num = switch.hw_switch.number.split('-') if self.min_version[chain_serial] >= 0x20000: if chain_serial != sw_chain_serial or card != sw_card: self.raise_config_error('Invalid switch being configured for driver. Driver = {} ' 'Switch = {}. Driver and switch have to be on the same ' 'board.'.format(coil.hw_driver.number, switch.hw_switch.number), 13) else: matching_sw = ((int(solenoid) & 0x0c) << 1) | (int(solenoid) & 0x03) if chain_serial != sw_chain_serial or card != sw_card or matching_sw != int(sw_num): self.raise_config_error('Invalid switch being configured for driver. Driver = {} ' 'Switch = {}. For Firmware < 0.2.0 they have to be on the same board and ' 'have the same number'.format(coil.hw_driver.number, switch.hw_switch.number), 14)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int): """Set pulse on hit and release rule to driver. When a switch is hit and a certain delay passed it pulses a driver. When the switch is released the pulse continues. Typically used for kickbacks. """ if delay_ms <= 0: raise AssertionError("set_delayed_pulse_on_hit_rule should be used with a positive delay " "not {}".format(delay_ms)) if delay_ms > 255: raise AssertionError("set_delayed_pulse_on_hit_rule is limited to max 255ms " "(was {})".format(delay_ms)) self._write_hw_rule(enable_switch, coil, use_hold=False, can_cancel=False, delay_ms=int(delay_ms))
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings): """Set pulse on hit and enable and relase rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver gets disabled. Typically used for single coil flippers. """ self._write_hw_rule(enable_switch, coil, use_hold=True, can_cancel=True)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings, eos_switch: SwitchSettings, coil: DriverSettings, repulse_settings: Optional[RepulseSettings]): """Set pulse on hit and enable and release and disable rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver becomes disabled. When the eos_switch is hit the pulse is canceled and the driver becomes enabled (likely with PWM). Typically used on the coil for single-wound coil flippers with eos switch. """ raise AssertionError("Not implemented in OPP currently")
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def _write_hw_rule(self, switch_obj: SwitchSettings, driver_obj: DriverSettings, use_hold, can_cancel, delay_ms=None): if switch_obj.invert: raise AssertionError("Cannot handle inverted switches") if driver_obj.hold_settings and not use_hold: raise AssertionError("Invalid call") self._verify_coil_and_switch_fit(switch_obj, driver_obj) self.debug_log("Setting HW Rule. Driver: %s", driver_obj.hw_driver.number) driver_obj.hw_driver.switches.append(switch_obj.hw_switch.number) driver_obj.hw_driver.set_switch_rule(driver_obj.pulse_settings, driver_obj.hold_settings, driver_obj.recycle, can_cancel, delay_ms) _, _, switch_num = switch_obj.hw_switch.number.split("-") switch_num = int(switch_num) self._add_switch_coil_mapping(switch_num, driver_obj.hw_driver)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def _add_switch_coil_mapping(self, switch_num, driver: "OPPSolenoid"): """Add mapping between switch and coil.""" if self.min_version[driver.sol_card.chain_serial] < 0x20000: return _, _, coil_num = driver.number.split('-') # mirror switch matrix columns to handle the fact that OPP matrix is in reverse column order if switch_num >= 32: switch_num = 8 * (15 - (switch_num // 8)) + switch_num % 8 msg = bytearray() msg.append(driver.sol_card.addr) msg.extend(OppRs232Intf.SET_SOL_INP_CMD) msg.append(int(switch_num)) msg.append(int(coil_num)) msg.extend(OppRs232Intf.calc_crc8_whole_msg(msg)) msg.extend(OppRs232Intf.EOM_CMD) final_cmd = bytes(msg) if self.debug: self.debug_log("Mapping input %s and coil %s on %s", switch_num, coil_num, driver.sol_card.chain_serial) self.send_to_processor(driver.sol_card.chain_serial, final_cmd)
missionpinball/mpf
[ 176, 127, 176, 108, 1403853986 ]
def lenient_json(v): if isinstance(v, (str, bytes)): try: return json.loads(v) except (ValueError, TypeError): pass return v
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def __init__(self, method, url, status, response_text): self.method = method self.url = url self.status = status self.body = response_text
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def __init__(self, root_url, settings: Settings): self.settings = settings self.root = root_url.rstrip('/') + '/'
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def _modify_request(self, method, url, data): return method, url, data
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def __init__(self, settings): super().__init__(settings.mandrill_url, settings)
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def __init__(self, settings): super().__init__(settings.messagebird_url, settings)
tutorcruncher/morpheus
[ 17, 3, 17, 7, 1495054960 ]
def setup_args(parser=None): if parser is None: parser = ParlaiParser(True, True, 'Check tasks for common errors') # Get command line arguments parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2) parser.add_argument('-d', '--display-examples', type='bool', default=False) parser.set_defaults(datatype='train:stream:ordered') return parser
facebookresearch/ParlAI
[ 9846, 2003, 9846, 72, 1493053844 ]
def warn(txt, act, opt): if opt.get('display_examples'): print(txt + ":\n" + str(act)) else: warn_once(txt)
facebookresearch/ParlAI
[ 9846, 2003, 9846, 72, 1493053844 ]
def verify_data(opt): counts = verify(opt) print(counts) return counts
facebookresearch/ParlAI
[ 9846, 2003, 9846, 72, 1493053844 ]
def setup_args(cls): return setup_args()
facebookresearch/ParlAI
[ 9846, 2003, 9846, 72, 1493053844 ]
def __init__(self, integration_id=None, created=None, local_vars_configuration=None): # noqa: E501 """IntegrationEntity - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._integration_id = None self._created = None self.discriminator = None self.integration_id = integration_id self.created = created
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def integration_id(self): """Gets the integration_id of this IntegrationEntity. # noqa: E501 The integration ID for this entity sent to and used in the Talon.One system. # noqa: E501 :return: The integration_id of this IntegrationEntity. # noqa: E501 :rtype: str """ return self._integration_id
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def integration_id(self, integration_id): """Sets the integration_id of this IntegrationEntity. The integration ID for this entity sent to and used in the Talon.One system. # noqa: E501 :param integration_id: The integration_id of this IntegrationEntity. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and integration_id is None: # noqa: E501 raise ValueError("Invalid value for `integration_id`, must not be `None`") # noqa: E501 self._integration_id = integration_id
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def created(self): """Gets the created of this IntegrationEntity. # noqa: E501 The exact moment this entity was created. # noqa: E501 :return: The created of this IntegrationEntity. # noqa: E501 :rtype: datetime """ return self._created
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def created(self, created): """Sets the created of this IntegrationEntity. The exact moment this entity was created. # noqa: E501 :param created: The created of this IntegrationEntity. # noqa: E501 :type: datetime """ if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501 raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501 self._created = created
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, IntegrationEntity): return False return self.to_dict() == other.to_dict()
talon-one/talon_one.py
[ 1, 3, 1, 1, 1484929786 ]
def _PUT(self, *param, **params): (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() if is_param(self.input, 'id') is False \ or is_int(self.input.id) is False: return web.badrequest("Request data is invalid.") snapshot_id = str(self.input.id) snapshot = s_findbyname_guestby1(self.orm, snapshot_id, guest_id) if snapshot is None: pass # ignore snapshots that is not in database. #return web.badrequest("Request data is invalid.") model = findbyguest1(self.orm, guest_id) kvs = KaresansuiVirtSnapshot(readonly=False) snapshot_list = [] try: domname = kvs.kvc.uuid_to_domname(model.uniq_key) if not domname: return web.notfound() self.view.is_creatable = kvs.isSupportedDomain(domname) try: snapshot_list = kvs.listNames(domname)[domname] except: pass finally: kvs.finish() if not snapshot_id in snapshot_list: self.logger.debug(_("The specified snapshot does not exist in database. - %s") % snapshot_id) # ignore snapshots that is not in database. #return web.notfound() action_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_APPLY_SNAPSHOT), {"name" : domname, "id" : snapshot_id}) cmdname = 'Apply Snapshot' _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, action_cmd) _jobgroup.jobs.append(_job) _machine2jobgroup = m2j_new(machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, )
karesansui/karesansui
[ 106, 29, 106, 6, 1333588008 ]
def setUp(self): super(_CommonSVNTestCase, self).setUp() self._old_backend_setting = settings.SVNTOOL_BACKENDS settings.SVNTOOL_BACKENDS = [self.backend] recompute_svn_backend() self.svn_repo_path = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', 'testdata', 'svn_repo')) self.svn_ssh_path = ('svn+ssh://localhost%s' % self.svn_repo_path.replace('\\', '/')) self.repository = Repository.objects.create( name='Subversion SVN', path='file://%s' % self.svn_repo_path, tool=Tool.objects.get(name='Subversion')) try: self.tool = self.repository.get_scmtool() except ImportError: raise unittest.SkipTest('The %s backend could not be used. A ' 'dependency may be missing.' % self.backend) assert self.tool.client.__class__.__module__ == self.backend
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def shortDescription(self): desc = super(_CommonSVNTestCase, self).shortDescription() desc = desc.replace('<backend>', self.backend_name) return desc
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_ssh(self): """Testing SVN (<backend>) with a SSH-backed Subversion repository""" self._test_ssh(self.svn_ssh_path, 'trunk/doc/misc-docs/Makefile')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_file(self): """Testing SVN (<backend>) get_file""" tool = self.tool expected = (b'include ../tools/Makefile.base-vars\n' b'NAME = misc-docs\n' b'OUTNAME = svn-misc-docs\n' b'INSTALL_DIR = $(DESTDIR)/usr/share/doc/subversion\n' b'include ../tools/Makefile.base-rules\n') # There are 3 versions of this test in order to get 100% coverage of # the svn module. rev = Revision('2') filename = 'trunk/doc/misc-docs/Makefile' value = tool.get_file(filename, rev) self.assertIsInstance(value, bytes) self.assertEqual(value, expected) value = tool.get_file('/%s' % filename, rev) self.assertIsInstance(value, bytes) self.assertEqual(value, expected) value = tool.get_file('%s/%s' % (self.repository.path, filename), rev) self.assertIsInstance(value, bytes) self.assertEqual(value, expected) with self.assertRaises(FileNotFoundError): tool.get_file('')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_get_file_with_special_url_chars(self): """Testing SVN (<backend>) get_file with filename containing characters that are special in URLs and repository path as a URI """ value = self.tool.get_file('trunk/crazy& ?#.txt', Revision('12')) self.assertTrue(isinstance(value, bytes)) self.assertEqual(value, b'Lots of characters in this one.\n')
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_normalize_path_with_special_chars_and_remote_url(self): """Testing SVN (<backend>) normalize_path with special characters and remote URL """ client = self.tool.client client.repopath = 'svn+ssh://example.com/svn' path = client.normalize_path(''.join( chr(c) for c in range(128) )) # This URL was generated based on modified code that directly used # Subversion's lookup take explicitly, ensuring we're getting the # results we want from urllib.quote() and our list of safe characters. self.assertEqual( path, "svn+ssh://example.com/svn/%00%01%02%03%04%05%06%07%08%09%0A" "%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E" "%1F%20!%22%23$%25&'()*+,-./0123456789:%3B%3C=%3E%3F@ABCDEFGH" "IJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmnopqrstuvwxyz" "%7B%7C%7D~%7F")
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_normalize_path_with_absolute_repo_path(self): """Testing SVN (<backend>) normalize_path with absolute path""" client = self.tool.client client.repopath = '/var/lib/svn' path = '/var/lib/svn/foo/bar' self.assertEqual(client.normalize_path(path), path) client.repopath = 'svn+ssh://example.com/svn/' path = 'svn+ssh://example.com/svn/foo/bar' self.assertEqual(client.normalize_path(path), path)
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_revision_parsing(self): """Testing SVN (<backend>) revision number parsing""" self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b'(working copy)'), (b'', HEAD)) self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b' (revision 0)'), (b'', PRE_CREATION)) self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b'(revision 1)'), (b'', b'1')) self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b'(revision 23)'), (b'', b'23')) # Fix for bug 2176 self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b'\t(revision 4)'), (b'', b'4')) self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision=b'2007-06-06 15:32:23 UTC (rev 10958)'), (b'', b'10958')) # Fix for bug 2632 self.assertEqual( self.tool.parse_diff_revision(filename=b'', revision=b'(revision )'), (b'', PRE_CREATION)) with self.assertRaises(SCMError): self.tool.parse_diff_revision(filename=b'', revision=b'hello') # Verify that 'svn diff' localized revision strings parse correctly. self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision='(revisión: 5)'.encode('utf-8')), (b'', b'5')) self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision='(リビジョン 6)'.encode('utf-8')), (b'', b'6')) self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision='(版本 7)'.encode('utf-8')), (b'', b'7'))
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]
def test_revision_parsing_with_nonexistent_and_branches(self): """Testing SVN (<backend>) revision parsing with relocation information and nonexistent revision specifier """ self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision=b'(.../trunk) (nonexistent)'), (b'trunk/', PRE_CREATION)) self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision=b'(.../branches/branch-1.0) (nicht existent)'), (b'branches/branch-1.0/', PRE_CREATION)) self.assertEqual( self.tool.parse_diff_revision( filename=b'', revision=' (.../trunk) (不存在的)'.encode('utf-8')), (b'trunk/', PRE_CREATION))
reviewboard/reviewboard
[ 1464, 419, 1464, 1, 1250977189 ]