text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_comments_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1):
"""
Get comments of delivery note per page
:param delivery_note_id: the delivery note
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:return: list
"""
return self._get_resource_per_page(
resource=DELIVERY_NOTE_COMMENTS,
per_page=per_page,
page=page,
params={'delivery_note_id': delivery_note_id},
) | 0.005386 |
def setMood(self, mood):
"""
Update the activity message for the current user.
Args:
mood (str): new mood message
"""
self.conn("POST", "{0}/users/{1}/profile/partial".format(SkypeConnection.API_USER, self.userId),
auth=SkypeConnection.Auth.SkypeToken, json={"payload": {"mood": mood or ""}})
self.user.mood = SkypeUser.Mood(plain=mood) if mood else None | 0.009238 |
def run_job_flow(Name=None, LogUri=None, AdditionalInfo=None, AmiVersion=None, ReleaseLabel=None, Instances=None, Steps=None, BootstrapActions=None, SupportedProducts=None, NewSupportedProducts=None, Applications=None, Configurations=None, VisibleToAllUsers=None, JobFlowRole=None, ServiceRole=None, Tags=None, SecurityConfiguration=None, AutoScalingRole=None, ScaleDownBehavior=None):
"""
RunJobFlow creates and starts running a new cluster (job flow). The cluster runs the steps specified. After the steps complete, the cluster stops and the HDFS partition is lost. To prevent loss of data, configure the last step of the job flow to store results in Amazon S3. If the JobFlowInstancesConfig KeepJobFlowAliveWhenNoSteps parameter is set to TRUE , the cluster transitions to the WAITING state rather than shutting down after the steps have completed.
For additional protection, you can set the JobFlowInstancesConfig TerminationProtected parameter to TRUE to lock the cluster and prevent it from being terminated by API call, user intervention, or in the event of a job flow error.
A maximum of 256 steps are allowed in each job flow.
If your cluster is long-running (such as a Hive data warehouse) or complex, you may require more than 256 steps to process your data. You can bypass the 256-step limitation in various ways, including using the SSH shell to connect to the master node and submitting queries directly to the software running on the master node, such as Hive and Hadoop. For more information on how to do this, see Add More than 256 Steps to a Cluster in the Amazon EMR Management Guide .
For long running clusters, we recommend that you periodically store your results.
See also: AWS API Documentation
:example: response = client.run_job_flow(
Name='string',
LogUri='string',
AdditionalInfo='string',
AmiVersion='string',
ReleaseLabel='string',
Instances={
'MasterInstanceType': 'string',
'SlaveInstanceType': 'string',
'InstanceCount': 123,
'InstanceGroups': [
{
'Name': 'string',
'Market': 'ON_DEMAND'|'SPOT',
'InstanceRole': 'MASTER'|'CORE'|'TASK',
'BidPrice': 'string',
'InstanceType': 'string',
'InstanceCount': 123,
'Configurations': [
{
'Classification': 'string',
'Configurations': {'... recursive ...'},
'Properties': {
'string': 'string'
}
},
],
'EbsConfiguration': {
'EbsBlockDeviceConfigs': [
{
'VolumeSpecification': {
'VolumeType': 'string',
'Iops': 123,
'SizeInGB': 123
},
'VolumesPerInstance': 123
},
],
'EbsOptimized': True|False
},
'AutoScalingPolicy': {
'Constraints': {
'MinCapacity': 123,
'MaxCapacity': 123
},
'Rules': [
{
'Name': 'string',
'Description': 'string',
'Action': {
'Market': 'ON_DEMAND'|'SPOT',
'SimpleScalingPolicyConfiguration': {
'AdjustmentType': 'CHANGE_IN_CAPACITY'|'PERCENT_CHANGE_IN_CAPACITY'|'EXACT_CAPACITY',
'ScalingAdjustment': 123,
'CoolDown': 123
}
},
'Trigger': {
'CloudWatchAlarmDefinition': {
'ComparisonOperator': 'GREATER_THAN_OR_EQUAL'|'GREATER_THAN'|'LESS_THAN'|'LESS_THAN_OR_EQUAL',
'EvaluationPeriods': 123,
'MetricName': 'string',
'Namespace': 'string',
'Period': 123,
'Statistic': 'SAMPLE_COUNT'|'AVERAGE'|'SUM'|'MINIMUM'|'MAXIMUM',
'Threshold': 123.0,
'Unit': 'NONE'|'SECONDS'|'MICRO_SECONDS'|'MILLI_SECONDS'|'BYTES'|'KILO_BYTES'|'MEGA_BYTES'|'GIGA_BYTES'|'TERA_BYTES'|'BITS'|'KILO_BITS'|'MEGA_BITS'|'GIGA_BITS'|'TERA_BITS'|'PERCENT'|'COUNT'|'BYTES_PER_SECOND'|'KILO_BYTES_PER_SECOND'|'MEGA_BYTES_PER_SECOND'|'GIGA_BYTES_PER_SECOND'|'TERA_BYTES_PER_SECOND'|'BITS_PER_SECOND'|'KILO_BITS_PER_SECOND'|'MEGA_BITS_PER_SECOND'|'GIGA_BITS_PER_SECOND'|'TERA_BITS_PER_SECOND'|'COUNT_PER_SECOND',
'Dimensions': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
},
]
}
},
],
'InstanceFleets': [
{
'Name': 'string',
'InstanceFleetType': 'MASTER'|'CORE'|'TASK',
'TargetOnDemandCapacity': 123,
'TargetSpotCapacity': 123,
'InstanceTypeConfigs': [
{
'InstanceType': 'string',
'WeightedCapacity': 123,
'BidPrice': 'string',
'BidPriceAsPercentageOfOnDemandPrice': 123.0,
'EbsConfiguration': {
'EbsBlockDeviceConfigs': [
{
'VolumeSpecification': {
'VolumeType': 'string',
'Iops': 123,
'SizeInGB': 123
},
'VolumesPerInstance': 123
},
],
'EbsOptimized': True|False
},
'Configurations': [
{
'Classification': 'string',
'Configurations': {'... recursive ...'},
'Properties': {
'string': 'string'
}
},
]
},
],
'LaunchSpecifications': {
'SpotSpecification': {
'TimeoutDurationMinutes': 123,
'TimeoutAction': 'SWITCH_TO_ON_DEMAND'|'TERMINATE_CLUSTER',
'BlockDurationMinutes': 123
}
}
},
],
'Ec2KeyName': 'string',
'Placement': {
'AvailabilityZone': 'string',
'AvailabilityZones': [
'string',
]
},
'KeepJobFlowAliveWhenNoSteps': True|False,
'TerminationProtected': True|False,
'HadoopVersion': 'string',
'Ec2SubnetId': 'string',
'Ec2SubnetIds': [
'string',
],
'EmrManagedMasterSecurityGroup': 'string',
'EmrManagedSlaveSecurityGroup': 'string',
'ServiceAccessSecurityGroup': 'string',
'AdditionalMasterSecurityGroups': [
'string',
],
'AdditionalSlaveSecurityGroups': [
'string',
]
},
Steps=[
{
'Name': 'string',
'ActionOnFailure': 'TERMINATE_JOB_FLOW'|'TERMINATE_CLUSTER'|'CANCEL_AND_WAIT'|'CONTINUE',
'HadoopJarStep': {
'Properties': [
{
'Key': 'string',
'Value': 'string'
},
],
'Jar': 'string',
'MainClass': 'string',
'Args': [
'string',
]
}
},
],
BootstrapActions=[
{
'Name': 'string',
'ScriptBootstrapAction': {
'Path': 'string',
'Args': [
'string',
]
}
},
],
SupportedProducts=[
'string',
],
NewSupportedProducts=[
{
'Name': 'string',
'Args': [
'string',
]
},
],
Applications=[
{
'Name': 'string',
'Version': 'string',
'Args': [
'string',
],
'AdditionalInfo': {
'string': 'string'
}
},
],
Configurations=[
{
'Classification': 'string',
'Configurations': {'... recursive ...'},
'Properties': {
'string': 'string'
}
},
],
VisibleToAllUsers=True|False,
JobFlowRole='string',
ServiceRole='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SecurityConfiguration='string',
AutoScalingRole='string',
ScaleDownBehavior='TERMINATE_AT_INSTANCE_HOUR'|'TERMINATE_AT_TASK_COMPLETION'
)
:type Name: string
:param Name: [REQUIRED]
The name of the job flow.
:type LogUri: string
:param LogUri: The location in Amazon S3 to write the log files of the job flow. If a value is not provided, logs are not created.
:type AdditionalInfo: string
:param AdditionalInfo: A JSON string for selecting additional features.
:type AmiVersion: string
:param AmiVersion:
Note
For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use ReleaseLabel.
The version of the Amazon Machine Image (AMI) to use when launching Amazon EC2 instances in the job flow. The following values are valid:
The version number of the AMI to use, for example, '2.0.'
If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter to modify the version of Hadoop from the defaults shown above.
For details about the AMI versions currently supported by Amazon Elastic MapReduce, see AMI Versions Supported in Elastic MapReduce in the Amazon Elastic MapReduce Developer Guide.
Note
Previously, the EMR AMI version API parameter options allowed you to use latest for the latest AMI version rather than specify a numerical value. Some regions no longer support this deprecated option as they only have a newer release label version of EMR, which requires you to specify an EMR release label release (EMR 4.x or later).
:type ReleaseLabel: string
:param ReleaseLabel:
Note
Amazon EMR releases 4.x or later.
The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x AMIs, use amiVersion instead instead of ReleaseLabel.
:type Instances: dict
:param Instances: [REQUIRED]
A specification of the number and type of Amazon EC2 instances.
MasterInstanceType (string) --The EC2 instance type of the master node.
SlaveInstanceType (string) --The EC2 instance type of the slave nodes.
InstanceCount (integer) --The number of EC2 instances in the cluster.
InstanceGroups (list) --Configuration for the instance groups in a cluster.
(dict) --Configuration defining a new instance group.
Name (string) --Friendly name given to the instance group.
Market (string) --Market type of the EC2 instances used to create a cluster node.
InstanceRole (string) -- [REQUIRED]The role of the instance group in the cluster.
BidPrice (string) --Bid price for each EC2 instance in the instance group when launching nodes as Spot Instances, expressed in USD.
InstanceType (string) -- [REQUIRED]The EC2 instance type for all instances in the instance group.
InstanceCount (integer) -- [REQUIRED]Target number of instances for the instance group.
Configurations (list) --
Note
Amazon EMR releases 4.x or later.
The list of configurations supplied for an EMR cluster instance group. You can specify a separate configuration for each instance group (master, core, and task).
(dict) --
Note
Amazon EMR releases 4.x or later.
An optional configuration specification to be used when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file. For more information, see Configuring Applications .
Classification (string) --The classification within a configuration.
Configurations (list) --A list of additional configurations to apply within a configuration object.
Properties (dict) --A set of properties specified within a configuration classification.
(string) --
(string) --
EbsConfiguration (dict) --EBS configurations that will be attached to each EC2 instance in the instance group.
EbsBlockDeviceConfigs (list) --An array of Amazon EBS volume specifications attached to a cluster instance.
(dict) --Configuration of requested EBS block device associated with the instance group with count of volumes that will be associated to every instance.
VolumeSpecification (dict) -- [REQUIRED]EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.
VolumeType (string) -- [REQUIRED]The volume type. Volume types supported are gp2, io1, standard.
Iops (integer) --The number of I/O operations per second (IOPS) that the volume supports.
SizeInGB (integer) -- [REQUIRED]The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
VolumesPerInstance (integer) --Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group
EbsOptimized (boolean) --Indicates whether an Amazon EBS volume is EBS-optimized.
AutoScalingPolicy (dict) --An automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster. The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. See PutAutoScalingPolicy .
Constraints (dict) -- [REQUIRED]The upper and lower EC2 instance limits for an automatic scaling policy. Automatic scaling activity will not cause an instance group to grow above or below these limits.
MinCapacity (integer) -- [REQUIRED]The lower boundary of EC2 instances in an instance group below which scaling activities are not allowed to shrink. Scale-in activities will not terminate instances below this boundary.
MaxCapacity (integer) -- [REQUIRED]The upper boundary of EC2 instances in an instance group beyond which scaling activities are not allowed to grow. Scale-out activities will not add instances beyond this boundary.
Rules (list) -- [REQUIRED]The scale-in and scale-out rules that comprise the automatic scaling policy.
(dict) --A scale-in or scale-out rule that defines scaling activity, including the CloudWatch metric alarm that triggers activity, how EC2 instances are added or removed, and the periodicity of adjustments. The automatic scaling policy for an instance group can comprise one or more automatic scaling rules.
Name (string) -- [REQUIRED]The name used to identify an automatic scaling rule. Rule names must be unique within a scaling policy.
Description (string) --A friendly, more verbose description of the automatic scaling rule.
Action (dict) -- [REQUIRED]The conditions that trigger an automatic scaling activity.
Market (string) --Not available for instance groups. Instance groups use the market type specified for the group.
SimpleScalingPolicyConfiguration (dict) -- [REQUIRED]The type of adjustment the automatic scaling activity makes when triggered, and the periodicity of the adjustment.
AdjustmentType (string) --The way in which EC2 instances are added (if ScalingAdjustment is a positive number) or terminated (if ScalingAdjustment is a negative number) each time the scaling activity is triggered. CHANGE_IN_CAPACITY is the default. CHANGE_IN_CAPACITY indicates that the EC2 instance count increments or decrements by ScalingAdjustment , which should be expressed as an integer. PERCENT_CHANGE_IN_CAPACITY indicates the instance count increments or decrements by the percentage specified by ScalingAdjustment , which should be expressed as a decimal. For example, 0.20 indicates an increase in 20% increments of cluster capacity. EXACT_CAPACITY indicates the scaling activity results in an instance group with the number of EC2 instances specified by ScalingAdjustment , which should be expressed as a positive integer.
ScalingAdjustment (integer) -- [REQUIRED]The amount by which to scale in or scale out, based on the specified AdjustmentType . A positive value adds to the instance group's EC2 instance count while a negative number removes instances. If AdjustmentType is set to EXACT_CAPACITY , the number should only be a positive integer. If AdjustmentType is set to PERCENT_CHANGE_IN_CAPACITY , the value should express the percentage as a decimal. For example, -0.20 indicates a decrease in 20% increments of cluster capacity.
CoolDown (integer) --The amount of time, in seconds, after a scaling activity completes before any further trigger-related scaling activities can start. The default value is 0.
Trigger (dict) -- [REQUIRED]The CloudWatch alarm definition that determines when automatic scaling activity is triggered.
CloudWatchAlarmDefinition (dict) -- [REQUIRED]The definition of a CloudWatch metric alarm. When the defined alarm conditions are met along with other trigger parameters, scaling activity begins.
ComparisonOperator (string) -- [REQUIRED]Determines how the metric specified by MetricName is compared to the value specified by Threshold .
EvaluationPeriods (integer) --The number of periods, expressed in seconds using Period , during which the alarm condition must exist before the alarm triggers automatic scaling activity. The default value is 1 .
MetricName (string) -- [REQUIRED]The name of the CloudWatch metric that is watched to determine an alarm condition.
Namespace (string) --The namespace for the CloudWatch metric. The default is AWS/ElasticMapReduce .
Period (integer) -- [REQUIRED]The period, in seconds, over which the statistic is applied. EMR CloudWatch metrics are emitted every five minutes (300 seconds), so if an EMR CloudWatch metric is specified, specify 300 .
Statistic (string) --The statistic to apply to the metric associated with the alarm. The default is AVERAGE .
Threshold (float) -- [REQUIRED]The value against which the specified statistic is compared.
Unit (string) --The unit of measure associated with the CloudWatch metric being watched. The value specified for Unit must correspond to the units specified in the CloudWatch metric.
Dimensions (list) --A CloudWatch metric dimension.
(dict) --A CloudWatch dimension, which is specified using a Key (known as a Name in CloudWatch), Value pair. By default, Amazon EMR uses one dimension whose Key is JobFlowID and Value is a variable representing the cluster ID, which is ${emr.clusterId} . This enables the rule to bootstrap when the cluster ID becomes available.
Key (string) --The dimension name.
Value (string) --The dimension value.
InstanceFleets (list) --
Note
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
Describes the EC2 instances and instance configurations for clusters that use the instance fleet configuration.
(dict) --The configuration that defines an instance fleet.
Note
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
Name (string) --The friendly name of the instance fleet.
InstanceFleetType (string) -- [REQUIRED]The node type that the instance fleet hosts. Valid values are MASTER,CORE,and TASK.
TargetOnDemandCapacity (integer) --The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by InstanceTypeConfig . Each instance configuration has a specified WeightedCapacity . When an On-Demand instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.
Note
If not specified or set to 0, only Spot instances are provisioned for the instance fleet using TargetSpotCapacity . At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.
TargetSpotCapacity (integer) --The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by InstanceTypeConfig . Each instance configuration has a specified WeightedCapacity . When a Spot instance is provisioned, the WeightedCapacity units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a WeightedCapacity of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.
Note
If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of TargetSpotCapacity and TargetOnDemandCapacity should be greater than 0. For a master instance fleet, only one of TargetSpotCapacity and TargetOnDemandCapacity can be specified, and its value must be 1.
InstanceTypeConfigs (list) --The instance type configurations that define the EC2 instances in the instance fleet.
(dict) --An instance type configuration for each instance type in an instance fleet, which determines the EC2 instances Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities. There can be a maximum of 5 instance type configurations in a fleet.
Note
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
InstanceType (string) -- [REQUIRED]An EC2 instance type, such as m3.xlarge .
WeightedCapacity (integer) --The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in InstanceFleetConfig . This value is 1 for a master instance fleet, and must be greater than 0 for core and task instance fleets.
BidPrice (string) --The bid price for each EC2 Spot instance type as defined by InstanceType . Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.
BidPriceAsPercentageOfOnDemandPrice (float) --The bid price, as a percentage of On-Demand price, for each EC2 Spot instance as defined by InstanceType . Expressed as a number between 0 and 1000 (for example, 20 specifies 20%). If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%.
EbsConfiguration (dict) --The configuration of Amazon Elastic Block Storage (EBS) attached to each instance as defined by InstanceType .
EbsBlockDeviceConfigs (list) --An array of Amazon EBS volume specifications attached to a cluster instance.
(dict) --Configuration of requested EBS block device associated with the instance group with count of volumes that will be associated to every instance.
VolumeSpecification (dict) -- [REQUIRED]EBS volume specifications such as volume type, IOPS, and size (GiB) that will be requested for the EBS volume attached to an EC2 instance in the cluster.
VolumeType (string) -- [REQUIRED]The volume type. Volume types supported are gp2, io1, standard.
Iops (integer) --The number of I/O operations per second (IOPS) that the volume supports.
SizeInGB (integer) -- [REQUIRED]The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
VolumesPerInstance (integer) --Number of EBS volumes with a specific volume configuration that will be associated with every instance in the instance group
EbsOptimized (boolean) --Indicates whether an Amazon EBS volume is EBS-optimized.
Configurations (list) --A configuration classification that applies when provisioning cluster instances, which can include configurations for applications and software that run on the cluster.
(dict) --
Note
Amazon EMR releases 4.x or later.
An optional configuration specification to be used when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file. For more information, see Configuring Applications .
Classification (string) --The classification within a configuration.
Configurations (list) --A list of additional configurations to apply within a configuration object.
Properties (dict) --A set of properties specified within a configuration classification.
(string) --
(string) --
LaunchSpecifications (dict) --The launch specification for the instance fleet.
SpotSpecification (dict) -- [REQUIRED]The launch specification for Spot instances in the fleet, which determines the defined duration and provisioning timeout behavior.
TimeoutDurationMinutes (integer) -- [REQUIRED]The spot provisioning timeout period in minutes. If Spot instances are not provisioned within this time period, the TimeOutAction is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created.
TimeoutAction (string) -- [REQUIRED]The action to take when TargetSpotCapacity has not been fulfilled when the TimeoutDurationMinutes has expired. Spot instances are not uprovisioned within the Spot provisioining timeout. Valid values are TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND to fulfill the remaining capacity.
BlockDurationMinutes (integer) --The defined duration for Spot instances (also known as Spot blocks) in minutes. When specified, the Spot instance does not terminate before the defined duration expires, and defined duration pricing for Spot instances applies. Valid values are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as a Spot instance receives its instance ID. At the end of the duration, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
Ec2KeyName (string) --The name of the EC2 key pair that can be used to ssh to the master node as the user called 'hadoop.'
Placement (dict) --The Availability Zone in which the cluster runs.
AvailabilityZone (string) --The Amazon EC2 Availability Zone for the cluster. AvailabilityZone is used for uniform instance groups, while AvailabilityZones (plural) is used for instance fleets.
AvailabilityZones (list) --When multiple Availability Zones are specified, Amazon EMR evaluates them and launches instances in the optimal Availability Zone. AvailabilityZones is used for instance fleets, while AvailabilityZone (singular) is used for uniform instance groups.
Note
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
(string) --
KeepJobFlowAliveWhenNoSteps (boolean) --Specifies whether the cluster should remain available after completing all steps.
TerminationProtected (boolean) --Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.
HadoopVersion (string) --The Hadoop version for the cluster. Valid inputs are '0.18' (deprecated), '0.20' (deprecated), '0.20.205' (deprecated), '1.0.3', '2.2.0', or '2.4.0'. If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.
Ec2SubnetId (string) --Applies to clusters that use the uniform instance group configuration. To launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this parameter to the identifier of the Amazon VPC subnet where you want the cluster to launch. If you do not specify this value, the cluster launches in the normal Amazon Web Services cloud, outside of an Amazon VPC, if the account launching the cluster supports EC2 Classic networks in the region where the cluster launches.
Amazon VPC currently does not support cluster compute quadruple extra large (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance type for clusters launched in an Amazon VPC.
Ec2SubnetIds (list) --Applies to clusters that use the instance fleet configuration. When multiple EC2 subnet IDs are specified, Amazon EMR evaluates them and launches instances in the optimal subnet.
Note
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.
(string) --
EmrManagedMasterSecurityGroup (string) --The identifier of the Amazon EC2 security group for the master node.
EmrManagedSlaveSecurityGroup (string) --The identifier of the Amazon EC2 security group for the slave nodes.
ServiceAccessSecurityGroup (string) --The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.
AdditionalMasterSecurityGroups (list) --A list of additional Amazon EC2 security group IDs for the master node.
(string) --
AdditionalSlaveSecurityGroups (list) --A list of additional Amazon EC2 security group IDs for the slave nodes.
(string) --
:type Steps: list
:param Steps: A list of steps to run.
(dict) --Specification of a cluster (job flow) step.
Name (string) -- [REQUIRED]The name of the step.
ActionOnFailure (string) --The action to take if the step fails.
HadoopJarStep (dict) -- [REQUIRED]The JAR file used for the step.
Properties (list) --A list of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.
(dict) --A key value pair.
Key (string) --The unique identifier of a key value pair.
Value (string) --The value part of the identified key.
Jar (string) -- [REQUIRED]A path to a JAR file run during the step.
MainClass (string) --The name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
Args (list) --A list of command line arguments passed to the JAR file's main function when executed.
(string) --
:type BootstrapActions: list
:param BootstrapActions: A list of bootstrap actions to run before Hadoop starts on the cluster nodes.
(dict) --Configuration of a bootstrap action.
Name (string) -- [REQUIRED]The name of the bootstrap action.
ScriptBootstrapAction (dict) -- [REQUIRED]The script run by the bootstrap action.
Path (string) -- [REQUIRED]Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system.
Args (list) --A list of command line arguments to pass to the bootstrap action script.
(string) --
:type SupportedProducts: list
:param SupportedProducts:
Note
For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.
A list of strings that indicates third-party software to use. For more information, see Use Third Party Applications with Amazon EMR . Currently supported values are:
'mapr-m3' - launch the job flow using MapR M3 Edition.
'mapr-m5' - launch the job flow using MapR M5 Edition.
(string) --
:type NewSupportedProducts: list
:param NewSupportedProducts:
Note
For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, use Applications.
A list of strings that indicates third-party software to use with the job flow that accepts a user argument list. EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action arguments. For more information, see 'Launch a Job Flow on the MapR Distribution for Hadoop' in the Amazon EMR Developer Guide . Supported values are:
'mapr-m3' - launch the cluster using MapR M3 Edition.
'mapr-m5' - launch the cluster using MapR M5 Edition.
'mapr' with the user arguments specifying '--edition,m3' or '--edition,m5' - launch the job flow using MapR M3 or M5 Edition respectively.
'mapr-m7' - launch the cluster using MapR M7 Edition.
'hunk' - launch the cluster with the Hunk Big Data Analtics Platform.
'hue'- launch the cluster with Hue installed.
'spark' - launch the cluster with Apache Spark installed.
'ganglia' - launch the cluster with the Ganglia Monitoring System installed.
(dict) --The list of supported product configurations which allow user-supplied arguments. EMR accepts these arguments and forwards them to the corresponding installation script as bootstrap action arguments.
Name (string) --The name of the product configuration.
Args (list) --The list of user-supplied arguments.
(string) --
:type Applications: list
:param Applications:
Note
Amazon EMR releases 4.x or later.
A list of applications for the cluster. Valid values are: 'Hadoop', 'Hive', 'Mahout', 'Pig', and 'Spark.' They are case insensitive.
(dict) --An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation script as bootstrap action argument. For more information, see Using the MapR Distribution for Hadoop . Currently supported values are:
'mapr-m3' - launch the cluster using MapR M3 Edition.
'mapr-m5' - launch the cluster using MapR M5 Edition.
'mapr' with the user arguments specifying '--edition,m3' or '--edition,m5' - launch the cluster using MapR M3 or M5 Edition, respectively.
Note
In Amazon EMR releases 4.0 and greater, the only accepted parameter is the application name. To pass arguments to applications, you supply a configuration for each application.
Name (string) --The name of the application.
Version (string) --The version of the application.
Args (list) --Arguments for Amazon EMR to pass to the application.
(string) --
AdditionalInfo (dict) --This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.
(string) --
(string) --
:type Configurations: list
:param Configurations:
Note
Amazon EMR releases 4.x or later.
The list of configurations supplied for the EMR cluster you are creating.
(dict) --
Note
Amazon EMR releases 4.x or later.
An optional configuration specification to be used when provisioning cluster instances, which can include configurations for applications and software bundled with Amazon EMR. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file. For more information, see Configuring Applications .
Classification (string) --The classification within a configuration.
Configurations (list) --A list of additional configurations to apply within a configuration object.
Properties (dict) --A set of properties specified within a configuration classification.
(string) --
(string) --
:type VisibleToAllUsers: boolean
:param VisibleToAllUsers: Whether the cluster is visible to all IAM users of the AWS account associated with the cluster. If this value is set to true , all IAM users of that AWS account can view and (if they have the proper policy permissions set) manage the cluster. If it is set to false , only the IAM user that created the cluster can view and manage it.
:type JobFlowRole: string
:param JobFlowRole: Also called instance profile and EC2 role. An IAM role for an EMR cluster. The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole . In order to use the default role, you must have already created it using the CLI or console.
:type ServiceRole: string
:param ServiceRole: The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
:type Tags: list
:param Tags: A list of tags to associate with a cluster and propagate to Amazon EC2 instances.
(dict) --A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. For more information, see Tagging Amazon EMR Resources .
Key (string) --A user-defined key, which is the minimum required information for a valid tag. For more information, see Tagging Amazon EMR Resources .
Value (string) --A user-defined value, which is optional in a tag. For more information, see Tagging Amazon EMR Resources .
:type SecurityConfiguration: string
:param SecurityConfiguration: The name of a security configuration to apply to the cluster.
:type AutoScalingRole: string
:param AutoScalingRole: An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole . The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
:type ScaleDownBehavior: string
:param ScaleDownBehavior: Specifies the way that individual Amazon EC2 instances terminate when an automatic scale-in activity occurs or an instance group is resized. TERMINATE_AT_INSTANCE_HOUR indicates that Amazon EMR terminates nodes at the instance-hour boundary, regardless of when the request to terminate the instance was submitted. This option is only available with Amazon EMR 5.1.0 and later and is the default for clusters created using that version. TERMINATE_AT_TASK_COMPLETION indicates that Amazon EMR blacklists and drains tasks from nodes before terminating the Amazon EC2 instances, regardless of the instance-hour boundary. With either behavior, Amazon EMR removes the least active nodes first and blocks instance termination if it could lead to HDFS corruption. TERMINATE_AT_TASK_COMPLETION available only in Amazon EMR version 4.1.0 and later, and is the default for versions of Amazon EMR earlier than 5.1.0.
:rtype: dict
:return: {
'JobFlowId': 'string'
}
"""
pass | 0.00447 |
def get_ordered_tokens_from_vocab(vocab: Vocab) -> List[str]:
"""
Returns the list of tokens in a vocabulary, ordered by increasing vocabulary id.
:param vocab: Input vocabulary.
:return: List of tokens.
"""
return [token for token, token_id in sorted(vocab.items(), key=lambda i: i[1])] | 0.009615 |
def _estimate_expenses(self, num_workers, reward):
''' Returns tuple describing expenses:
amount paid to workers
amount paid to amazon'''
# fee structure changed 07.22.15:
# 20% for HITS with < 10 assignments
# 40% for HITS with >= 10 assignments
commission = 0.2
if float(num_workers) >= 10:
commission = 0.4
work = float(num_workers) * float(reward)
fee = work * commission
return (work, fee, work+fee) | 0.00396 |
def refresh_file_dependent_actions(self):
"""Enable/disable file dependent actions
(only if dockwidget is visible)"""
if self.dockwidget and self.dockwidget.isVisible():
enable = self.get_current_editor() is not None
for action in self.file_dependent_actions:
action.setEnabled(enable) | 0.005634 |
def genl_register(ops):
"""Register Generic Netlink family backed cache.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L241
Same as genl_register_family() but additionally registers the specified cache operations using
nl_cache_mngt_register() and associates it with the Generic Netlink family.
Positional arguments:
ops -- cache operations definition (nl_cache_ops class instance).
Returns:
0 on success or a negative error code.
"""
if ops.co_protocol != NETLINK_GENERIC:
return -NLE_PROTO_MISMATCH
if ops.co_hdrsize < GENL_HDRSIZE(0):
return -NLE_INVAL
if ops.co_genl is None:
return -NLE_INVAL
ops.co_genl.o_cache_ops = ops
ops.co_genl.o_hdrsize = ops.co_hdrsize - GENL_HDRLEN
ops.co_genl.o_name = ops.co_msgtypes[0].mt_name
ops.co_genl.o_id = ops.co_msgtypes[0].mt_id
ops.co_msg_parser = genl_msg_parser
err = genl_register_family(ops.co_genl)
if err < 0:
return err
return nl_cache_mngt_register(ops) | 0.001916 |
def __ensure_provisioning_alarm(table_name, table_key, gsi_name, gsi_key):
""" Ensure that provisioning alarm threshold is not exceeded
:type table_name: str
:param table_name: Name of the DynamoDB table
:type table_key: str
:param table_key: Table configuration option key name
:type gsi_name: str
:param gsi_name: Name of the GSI
:type gsi_key: str
:param gsi_key: Configuration option key name
"""
lookback_window_start = get_gsi_option(
table_key, gsi_key, 'lookback_window_start')
lookback_period = get_gsi_option(
table_key, gsi_key, 'lookback_period')
consumed_read_units_percent = gsi_stats.get_consumed_read_units_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
consumed_write_units_percent = gsi_stats.get_consumed_write_units_percent(
table_name, gsi_name, lookback_window_start, lookback_period)
reads_upper_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'reads-upper-alarm-threshold')
reads_lower_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'reads-lower-alarm-threshold')
writes_upper_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'writes-upper-alarm-threshold')
writes_lower_alarm_threshold = \
get_gsi_option(table_key, gsi_key, 'writes-lower-alarm-threshold')
# Check upper alarm thresholds
upper_alert_triggered = False
upper_alert_message = []
if 0 < reads_upper_alarm_threshold <= consumed_read_units_percent:
upper_alert_triggered = True
upper_alert_message.append(
'{0} - GSI: {1} - Consumed Read Capacity {2:f}% '
'was greater than or equal to the upper alarm '
'threshold {3:f}%\n'.format(
table_name,
gsi_name,
consumed_read_units_percent,
reads_upper_alarm_threshold))
if 0 < writes_upper_alarm_threshold <= consumed_write_units_percent:
upper_alert_triggered = True
upper_alert_message.append(
'{0} - GSI: {1} - Consumed Write Capacity {2:f}% '
'was greater than or equal to the upper alarm '
'threshold {3:f}%\n'.format(
table_name,
gsi_name,
consumed_write_units_percent,
writes_upper_alarm_threshold))
# Check lower alarm thresholds
lower_alert_triggered = False
lower_alert_message = []
if (reads_lower_alarm_threshold > 0 and
consumed_read_units_percent < reads_lower_alarm_threshold):
lower_alert_triggered = True
lower_alert_message.append(
'{0} - GSI: {1} - Consumed Read Capacity {2:f}% '
'was below the lower alarm threshold {3:f}%\n'.format(
table_name,
gsi_name,
consumed_read_units_percent,
reads_lower_alarm_threshold))
if (writes_lower_alarm_threshold > 0 and
consumed_write_units_percent < writes_lower_alarm_threshold):
lower_alert_triggered = True
lower_alert_message.append(
'{0} - GSI: {1} - Consumed Write Capacity {2:f}% '
'was below the lower alarm threshold {3:f}%\n'.format(
table_name,
gsi_name,
consumed_write_units_percent,
writes_lower_alarm_threshold))
# Send alert if needed
if upper_alert_triggered:
logger.info(
'{0} - GSI: {1} - Will send high provisioning alert'.format(
table_name, gsi_name))
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(upper_alert_message),
['high-throughput-alarm'],
subject='ALARM: High Throughput for Table {0} - GSI: {1}'.format(
table_name, gsi_name))
elif lower_alert_triggered:
logger.info(
'{0} - GSI: {1} - Will send low provisioning alert'.format(
table_name, gsi_name))
sns.publish_gsi_notification(
table_key,
gsi_key,
''.join(lower_alert_message),
['low-throughput-alarm'],
subject='ALARM: Low Throughput for Table {0} - GSI: {1}'.format(
table_name, gsi_name))
else:
logger.debug(
'{0} - GSI: {1} - Throughput alarm thresholds not crossed'.format(
table_name, gsi_name)) | 0.000224 |
def geojson_handler(geojson, hType='map'):
"""Restructure a GeoJSON object in preparation to be added directly by add_map_data or add_data_set methods.
The geojson will be broken down to fit a specific Highcharts (highmaps) type, either map, mapline or mappoint.
Meta data in GeoJSON's properties object will be copied directly over to object['properties']
1. geojson is the map data (GeoJSON) to be converted
2. hType is the type of highmap types. "map" will return GeoJSON polygons and multipolygons.
"mapline" will return GeoJSON linestrings and multilinestrings.
"mappoint" will return GeoJSON points and multipoints.
default: "map"
"""
hType_dict = {
'map': ['polygon', 'multipolygon'],
'mapline': ['linestring', 'multilinestring'],
'mappoint': ['point', 'multipoint'],
}
oldlist = [x for x in geojson['features'] if x['geometry']['type'].lower() in hType_dict[hType]]
newlist = []
for each_dict in oldlist:
geojson_type = each_dict['geometry']['type'].lower()
if hType == 'mapline':
newlist.append(
{'name': each_dict['properties'].get('name', None),
'path': _coordinates_to_path(each_dict['geometry']['coordinates'], hType, geojson_type),
'properties': each_dict['properties'],
}
)
elif hType == 'map':
newlist.append(
{'name': each_dict['properties']['name'],
'path': _coordinates_to_path(each_dict['geometry']['coordinates'], hType, geojson_type),
'properties': each_dict['properties'],
}
)
elif hType == 'mappoint':
newlist.append(
{'name': each_dict['properties']['name'],
'x': each_dict['geometry']['coordinates'][0],
'y': -each_dict['geometry']['coordinates'][1],
'properties': each_dict['properties'],
}
)
return newlist | 0.011928 |
def p_elseif_list(p):
'''elseif_list : empty
| elseif_list ELSEIF LPAREN expr RPAREN statement'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[1] + [ast.ElseIf(p[4], p[6], lineno=p.lineno(2))] | 0.004255 |
def identify_factory(*extensions):
"""Factory function to create I/O identifiers for a set of extensions
The returned function is designed for use in the unified I/O registry
via the `astropy.io.registry.register_identifier` hool.
Parameters
----------
extensions : `str`
one or more file extension strings
Returns
-------
identifier : `callable`
an identifier function that tests whether an incoming file path
carries any of the given file extensions (using `str.endswith`)
"""
def identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify the given extensions in a file object/path
"""
# pylint: disable=unused-argument
if (isinstance(filepath, string_types) and
filepath.endswith(extensions)):
return True
return False
return identify | 0.001126 |
def remove_ifcfg_file(device_index='0'):
"""Removes the ifcfg file at the specified device index
and restarts the network service
:param device_index: (int) Device Index
:return: None
:raises CommandError
"""
log = logging.getLogger(mod_logger + '.remove_ifcfg_file')
if not isinstance(device_index, basestring):
msg = 'device_index argument must be a string'
log.error(msg)
raise CommandError(msg)
network_script = '/etc/sysconfig/network-scripts/ifcfg-eth{d}'.format(d=device_index)
if not os.path.isfile(network_script):
log.info('File does not exist, nothing will be removed: {n}'.format(n=network_script))
return
# Remove the network config script
log.info('Attempting to remove file: {n}'.format(n=network_script))
try:
os.remove(network_script)
except(IOError, OSError):
_, ex, trace = sys.exc_info()
msg = 'There was a problem removing network script file: {n}\n{e}'.format(n=network_script, e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully removed file: {n}'.format(n=network_script))
# Restart the network service
log.info('Restarting the network service...')
try:
service_network_restart()
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem restarting the network service\n{e}'.format(e=str(ex))
log.error(msg)
raise OSError, msg, trace
else:
log.info('Successfully restarted the network service') | 0.003153 |
def keys(self, namespace, prefix=None, limit=None, offset=None):
"""Get keys from a namespace"""
params = [namespace]
query = 'SELECT key FROM gauged_keys WHERE namespace = %s'
if prefix is not None:
query += ' AND key LIKE %s'
params.append(prefix + '%')
if limit is not None:
query += ' LIMIT %s'
params.append(limit)
if offset is not None:
query += ' OFFSET %s'
params.append(offset)
cursor = self.cursor
cursor.execute(query, params)
return [key for key, in cursor] | 0.003263 |
def on_user_status(
self=None,
filters=None,
group: int = 0
) -> callable:
"""Use this decorator to automatically register a function for handling user status updates.
This does the same thing as :meth:`add_handler` using the :class:`UserStatusHandler`.
Args:
filters (:obj:`Filters <pyrogram.Filters>`):
Pass one or more filters to allow only a subset of UserStatus updated to be passed in your function.
group (``int``, *optional*):
The group identifier, defaults to 0.
"""
def decorator(func: callable) -> Tuple[Handler, int]:
if isinstance(func, tuple):
func = func[0].callback
handler = pyrogram.UserStatusHandler(func, filters)
if isinstance(self, Filter):
return pyrogram.UserStatusHandler(func, self), group if filters is None else filters
if self is not None:
self.add_handler(handler, group)
return handler, group
return decorator | 0.006434 |
def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y):
"""Returns the effective precession spin argument for the smaller mass.
This function assumes it's given spins of the secondary mass.
"""
q = q_from_mass1_mass2(mass1, mass2)
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / (2 * q)
return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y) | 0.002632 |
def busy(self):
"""Return if the connection is currently executing a query or is locked
by a session that still exists.
:rtype: bool
"""
if self.handle.isexecuting():
return True
elif self.used_by is None:
return False
return not self.used_by() is None | 0.005988 |
def set_geometry(im, width_height):
"""Rescale the image to the new geometry.
"""
width, height = width_height
if not width and not height:
return im
im_width, im_height = im.size
# Geometry match the current size?
if (width is None) or (im_width == width):
if (height is None) or (im_height == height):
return im
ratio = float(im_width) / im_height
if width and height:
new_width = width
new_height = int(ceil(width / ratio))
if new_height < height:
new_height = height
new_width = int(ceil(height * ratio))
elif height:
new_width = int(ceil(height * ratio))
new_height = height
else:
new_width = width
new_height = int(ceil(width / ratio))
im.resize(new_width, new_height)
box = get_box(new_width, new_height, width, height)
im.crop(*box, reset_coords=True)
return im | 0.00106 |
def H_iso(x,params):
""" Isochrone Hamiltonian = -GM/(b+sqrt(b**2+(r-r0)**2))"""
#r = (np.sqrt(np.sum(x[:3]**2))-params[2])**2
r = np.sum(x[:3]**2)
return 0.5*np.sum(x[3:]**2)-Grav*params[0]/(params[1]+np.sqrt(params[1]**2+r)) | 0.016529 |
def export_compact(self, filename, optimize=True, toco_compatible=False):
"""Create a self-contained inference-only graph and write final graph (in pb format) to disk.
Args:
filename (str): path to the output graph
optimize (bool): whether to use TensorFlow's `optimize_for_inference`
to prune and optimize the graph. This does not work on all types of graphs.
toco_compatible (bool): See TensorFlow's
`optimize_for_inference
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_
for details. Only available after TF 1.8.
"""
if toco_compatible:
assert optimize, "toco_compatible is only effective when optimize=True!"
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
output_tensors = get_tensors_by_names(self.config.output_names)
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
dtypes = [n.dtype for n in input_tensors]
# freeze variables to constants
frozen_graph_def = graph_util.convert_variables_to_constants(
sess,
self.graph.as_graph_def(),
[n.name[:-2] for n in output_tensors],
variable_names_whitelist=None,
variable_names_blacklist=None)
# prune unused nodes from graph
if optimize:
toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, )
frozen_graph_def = optimize_for_inference_lib.optimize_for_inference(
frozen_graph_def,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
*toco_args)
with gfile.FastGFile(filename, "wb") as f:
f.write(frozen_graph_def.SerializeToString())
logger.info("Output graph written to {}.".format(filename)) | 0.003785 |
def distinct(self):
"""
Only return distinct row.
Return a new query set with distinct mark
"""
new_query_set = self.clone()
new_query_set.query.distinct = True
return new_query_set | 0.012605 |
def failed_login_limit_reached(self):
""" A boolean method to check for failed login limit being reached"""
login_limit = 10
if self.failed_logins and self.failed_logins >= login_limit:
return True
else:
return False | 0.007353 |
def connect_options_namespaced_service_proxy(self, name, namespace, **kwargs):
"""
connect OPTIONS requests to proxy of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_options_namespaced_service_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ServiceProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_options_namespaced_service_proxy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.connect_options_namespaced_service_proxy_with_http_info(name, namespace, **kwargs)
return data | 0.004958 |
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
"""Override Transforms input data with specified augmentations."""
for aug in self.auglist:
data, label = aug(data, label)
return (data, label) | 0.01145 |
def extract_options(name):
"""
Extracts comparison option from filename.
As example, ``Binarizer-SkipDim1`` means
options *SkipDim1* is enabled.
``(1, 2)`` and ``(2,)`` are considered equal.
Available options:
* `'SkipDim1'`: reshape arrays by skipping 1-dimension: ``(1, 2)`` --> ``(2,)``
* `'OneOff'`: inputs comes in a list for the predictions are computed with a call for each of them,
not with one call
* ...
See function *dump_data_and_model* to get the full list.
"""
opts = name.replace("\\", "/").split("/")[-1].split('.')[0].split('-')
if len(opts) == 1:
return {}
else:
res = {}
for opt in opts[1:]:
if opt in ("SkipDim1", "OneOff", "NoProb", "Dec4", "Dec3", 'Out0', 'Dec2', 'Reshape', 'Opp'):
res[opt] = True
else:
raise NameError("Unable to parse option '{}'".format(opts[1:]))
return res | 0.00523 |
def get_bench_api(self):
"""
Extend bench functionality with these new commands
:return: Dictionary
"""
# Extend bench functionality with these new commands
ret_dict = dict()
ret_dict["assertTraceDoesNotContain"] = asserts.assertTraceDoesNotContain
ret_dict["assertTraceContains"] = asserts.assertTraceContains
ret_dict["assertDutTraceDoesNotContain"] = self.assert_dut_trace_not_contains
ret_dict["assertDutTraceContains"] = self.assert_dut_trace_contains
ret_dict["assertTrue"] = asserts.assertTrue
ret_dict["assertFalse"] = asserts.assertFalse
ret_dict["assertNone"] = asserts.assertNone
ret_dict["assertNotNone"] = asserts.assertNotNone
ret_dict["assertEqual"] = asserts.assertEqual
ret_dict["assertNotEqual"] = asserts.assertNotEqual
ret_dict["assertJsonContains"] = asserts.assertJsonContains
return ret_dict | 0.004175 |
def index(self, prefix):
"""
Return the model index for a prefix.
"""
# Any web domain will be handled by the standard URLField.
if self.is_external_url_prefix(prefix):
prefix = 'http'
for i, urltype in enumerate(self._url_types):
if urltype.prefix == prefix:
return i
return None | 0.005305 |
def rewrite_elife_funding_awards(json_content, doi):
""" rewrite elife funding awards """
# remove a funding award
if doi == "10.7554/eLife.00801":
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-2":
del json_content[i]
# add funding award recipient
if doi == "10.7554/eLife.04250":
recipients_for_04250 = [{"type": "person", "name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"}}]
for i, award in enumerate(json_content):
if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]:
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_04250
# add funding award recipient
if doi == "10.7554/eLife.06412":
recipients_for_06412 = [{"type": "person", "name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"}}]
for i, award in enumerate(json_content):
if "id" in award and award["id"] == "par-1":
if "recipients" not in award:
json_content[i]["recipients"] = recipients_for_06412
return json_content | 0.002553 |
def prepare(self, context, stream_id):
"""Invoke prepare() of this custom grouping"""
self.grouping.prepare(context, self.source_comp_name, stream_id, self.task_ids) | 0.011561 |
def morph_cost(self) -> Optional["Cost"]:
""" This returns 150 minerals for OrbitalCommand instead of 550 """
# Fix for BARRACKSREACTOR which has tech alias [REACTOR] which has (0, 0) cost
if self.tech_alias is None or self.tech_alias[0] in {UnitTypeId.TECHLAB, UnitTypeId.REACTOR}:
return None
# Morphing a HIVE would have HATCHERY and LAIR in the tech alias - now subtract HIVE cost from LAIR cost instead of from HATCHERY cost
tech_alias_cost_minerals = max([self._game_data.units[tech_alias.value].cost.minerals for tech_alias in self.tech_alias])
tech_alias_cost_vespene = max([self._game_data.units[tech_alias.value].cost.vespene for tech_alias in self.tech_alias])
return Cost(
self._proto.mineral_cost - tech_alias_cost_minerals,
self._proto.vespene_cost - tech_alias_cost_vespene,
self._proto.build_time
) | 0.007431 |
async def auth_crypt(wallet_handle: int,
sender_vk: str,
recipient_vk: str,
msg: bytes) -> bytes:
"""
**** THIS FUNCTION WILL BE DEPRECATED USE pack_message INSTEAD ****
Encrypt a message by authenticated-encryption scheme.
Sender can encrypt a confidential message specifically for Recipient, using Sender's public key.
Using Recipient's public key, Sender can compute a shared secret key.
Using Sender's public key and his secret key, Recipient can compute the exact same shared secret key.
That shared secret key can be used to verify that the encrypted message was not tampered with,
before eventually decrypting it.
Note to use DID keys with this function you can call indy_key_for_did to get key id (verkey)
for specific DID.
:param wallet_handle: wallet handler (created by open_wallet).
:param sender_vk: id (verkey) of my key. The key must be created by calling indy_create_key or
indy_create_and_store_my_did
:param recipient_vk: id (verkey) of their key
:param msg: a message to be signed
:return: encrypted message as an array of bytes
"""
logger = logging.getLogger(__name__)
logger.debug("auth_crypt: >>> wallet_handle: %r,sender_vk: %r, recipient_vk: %r, msg: %r",
wallet_handle,
sender_vk,
recipient_vk,
msg)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(auth_crypt, "cb"):
logger.debug("auth_crypt: Creating callback")
auth_crypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_sender_vk = c_char_p(sender_vk.encode('utf-8'))
c_recipient_vk = c_char_p(recipient_vk.encode('utf-8'))
c_msg_len = c_uint32(len(msg))
res = await do_call('indy_crypto_auth_crypt',
c_wallet_handle,
c_sender_vk,
c_recipient_vk,
msg,
c_msg_len,
auth_crypt.cb)
logger.debug("auth_crypt: <<< res: %r", res)
return res | 0.003506 |
def multiscale_permutation_entropy(time_series, m, delay, scale):
"""Calculate the Multiscale Permutation Entropy
Args:
time_series: Time series for analysis
m: Order of permutation entropy
delay: Time delay
scale: Scale factor
Returns:
Vector containing Multiscale Permutation Entropy
Reference:
[1] Francesco Carlo Morabito et al. Multivariate Multi-Scale Permutation Entropy for
Complexity Analysis of Alzheimer’s Disease EEG. www.mdpi.com/1099-4300/14/7/1186
[2] http://www.mathworks.com/matlabcentral/fileexchange/37288-multiscale-permutation-entropy-mpe/content/MPerm.m
"""
mspe = []
for i in range(scale):
coarse_time_series = util_granulate_time_series(time_series, i + 1)
pe = permutation_entropy(coarse_time_series, order=m, delay=delay)
mspe.append(pe)
return mspe | 0.004435 |
def from_jd(jd: float, fmt: str = 'jd') -> datetime:
"""
Converts a Julian Date to a datetime object.
Algorithm is from Fliegel and van Flandern (1968)
Parameters
----------
jd: float
Julian Date as type specified in the string fmt
fmt: str
Returns
-------
dt: datetime
"""
jd, jdf = __from_format(jd, fmt)
l = jd+68569
n = 4*l//146097
l = l-(146097*n+3)//4
i = 4000*(l+1)//1461001
l = l-1461*i//4+31
j = 80*l//2447
k = l-2447*j//80
l = j//11
j = j+2-12*l
i = 100*(n-49)+i+l
year = int(i)
month = int(j)
day = int(k)
# in microseconds
frac_component = int(jdf * (1e6*24*3600))
hours = int(frac_component // (1e6*3600))
frac_component -= hours * 1e6*3600
minutes = int(frac_component // (1e6*60))
frac_component -= minutes * 1e6*60
seconds = int(frac_component // 1e6)
frac_component -= seconds*1e6
frac_component = int(frac_component)
dt = datetime(year=year, month=month, day=day,
hour=hours, minute=minutes, second=seconds, microsecond=frac_component)
return dt | 0.00524 |
def eval_pth(filename, sitedir, dest=None, imports=None):
'''
Evaluates a `.pth` file (including support for `import` statements), and
appends the result to the list *dest*. If *dest* is #None, it will fall
back to `sys.path`.
If *imports* is specified, it must be a list. `import` statements will not
executed but instead appended to that list in tuples of
(*filename*, *line*, *stmt*).
Returns a tuple of (*dest*, *imports*).
'''
if dest is None:
dest = sys.path
if not os.path.isfile(filename):
return
with open(filename, 'r') as fp:
for index, line in enumerate(fp):
if line.startswith('import'):
if imports is None:
exec_pth_import(filename, index+1, line)
else:
imports.append((filename, index+1, line))
else:
index = line.find('#')
if index > 0: line = line[:index]
line = line.strip()
if not os.path.isabs(line):
line = os.path.join(os.path.dirname(filename), line)
line = os.path.normpath(line)
if line and line not in dest:
dest.insert(0, line)
return dest | 0.011566 |
def reset_sequence(self, topic):
"""Reset the expected sequence number for a topic
If the topic is unknown, this does nothing. This behaviour is
useful when you have wildcard topics that only create queues
once they receive the first message matching the topic.
Args:
topic (string): The topic to reset the packet queue on
"""
if topic in self.queues:
self.queues[topic].reset() | 0.004329 |
def _publish_response(self, slug, message):
"""Publish a response message for a device
Args:
slug (string): The device slug that we are publishing on behalf of
message (dict): A set of key value pairs that are used to create the message
that is sent.
"""
resp_topic = self.topics.gateway_topic(slug, 'data/response')
self._logger.debug("Publishing response message: (topic=%s) (message=%s)", resp_topic, message)
self.client.publish(resp_topic, message) | 0.007366 |
def recarray(self):
"""Return a recarray from the (parsed) string."""
if self.records is None:
self.parse()
try:
# simple (should this also be subjected to convert.to_int64() ?)
return numpy.rec.fromrecords(self.records, names=self.names)
except ValueError:
# complicated because fromrecords cannot deal with records of lists
# Quick hack: use objects for lists etc (instead of building the proper
# data types (see docs for numpy.dtype , eg dtype('coord', (float, 3)) )
D = numpy.empty(len(self.records[0]), dtype=object) # number of fields from first record
types = numpy.array([map(type, r) for r in self.records]) # types of all fields
for icol, isSame in enumerate([numpy.all(col) for col in types.T]):
if isSame:
D[icol] = types[0][icol]
else:
D[icol] = object
dtype = numpy.dtype(zip(self.names, D))
# from numpy.rec.records
# TODO: this is not working properly yet; for instance, text fields
# are reduced to length 0 (<U0) and the final convert.to_int64 dies
# with '<U0'*** TypeError: TypeError('data type not understood',)
retval = numpy.array(self.records, dtype=dtype)
res = retval.view(numpy.recarray)
## res.dtype = numpy.dtype((numpy.rec.record, res.dtype)) # fails -- ARGH, this makes it a recarray
return convert.to_int64(res) | 0.005092 |
def Zabransky_quasi_polynomial_integral_over_T(T, Tc, a1, a2, a3, a4, a5, a6):
r'''Calculates the integral of liquid heat capacity over T using the
quasi-polynomial model developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a6 : float
Coefficients
Returns
-------
S : float
Difference in entropy from 0 K, [J/mol/K]
Notes
-----
The analytical integral was derived with Sympy. It requires the
Polylog(2,x) function, which is unimplemented in SciPy. A very accurate
numerical approximation was implemented as :obj:`thermo.utils.polylog2`.
Relatively slow due to the use of that special function.
Examples
--------
>>> S2 = Zabransky_quasi_polynomial_integral_over_T(300, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> S1 = Zabransky_quasi_polynomial_integral_over_T(200, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> S2 - S1
59.16997291893654
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
'''
term = T - Tc
logT = log(T)
Tc2 = Tc*Tc
Tc3 = Tc2*Tc
return R*(a3*logT -a1*polylog2(T/Tc) - a2*(-logT + 0.5*log(term*term))
+ T*(T*(T*a6/(3.*Tc3) + a5/(2.*Tc2)) + a4/Tc)) | 0.005337 |
def mbar_objective_and_gradient(u_kn, N_k, f_k):
"""Calculates both objective function and gradient for MBAR.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
obj : float
Objective function
grad : np.ndarray, dtype=float, shape=(n_states)
Gradient of objective function
Notes
-----
This objective function is essentially a doubly-summed partition function and is
quite sensitive to precision loss from both overflow and underflow. For optimal
results, u_kn can be preconditioned by subtracting out a `n` dependent
vector.
More optimal precision, the objective function uses math.fsum for the
outermost sum and logsumexp for the inner sum.
The gradient is equation C6 in the JCP MBAR paper; the objective
function is its integral.
"""
u_kn, N_k, f_k = validate_inputs(u_kn, N_k, f_k)
log_denominator_n = logsumexp(f_k - u_kn.T, b=N_k, axis=1)
log_numerator_k = logsumexp(-log_denominator_n - u_kn, axis=1)
grad = -1 * N_k * (1.0 - np.exp(f_k + log_numerator_k))
obj = math.fsum(log_denominator_n) - N_k.dot(f_k)
return obj, grad | 0.002699 |
def get_page_as_pdf(self, page_id):
"""
Export page as standard pdf exporter
:param page_id: Page ID
:return: PDF File
"""
headers = self.form_token_headers
url = 'spaces/flyingpdf/pdfpageexport.action?pageId={pageId}'.format(pageId=page_id)
return self.get(url, headers=headers, not_json_response=True) | 0.008174 |
def search_onfail_requisites(sid, highstate):
'''
For a particular low chunk, search relevant onfail related states
'''
onfails = []
if '_|-' in sid:
st = salt.state.split_low_tag(sid)
else:
st = {'__id__': sid}
for fstate, fchunks in six.iteritems(highstate):
if fstate == st['__id__']:
continue
else:
for mod_, fchunk in six.iteritems(fchunks):
if (
not isinstance(mod_, six.string_types) or
mod_.startswith('__')
):
continue
else:
if not isinstance(fchunk, list):
continue
else:
# bydefault onfail will fail, but you can
# set onfail_stop: False to prevent the highstate
# to stop if you handle it
onfail_handled = False
for fdata in fchunk:
if not isinstance(fdata, dict):
continue
onfail_handled = (fdata.get('onfail_stop', True)
is False)
if onfail_handled:
break
if not onfail_handled:
continue
for fdata in fchunk:
if not isinstance(fdata, dict):
continue
for knob, fvalue in six.iteritems(fdata):
if knob != 'onfail':
continue
for freqs in fvalue:
for fmod, fid in six.iteritems(freqs):
if not (
fid == st['__id__'] and
fmod == st.get('state', fmod)
):
continue
onfails.append((fstate, mod_, fchunk))
return onfails | 0.000444 |
def qualify(workers, qualification, value, by_name, notify, sandbox):
"""Assign a qualification to 1 or more workers"""
if not (workers and qualification and value):
raise click.BadParameter(
"Must specify a qualification ID, value/score, and at least one worker ID"
)
mturk = _mturk_service_from_config(sandbox)
if by_name:
result = mturk.get_qualification_type_by_name(qualification)
if result is None:
raise click.BadParameter(
'No qualification with name "{}" exists.'.format(qualification)
)
qid = result["id"]
else:
qid = qualification
click.echo(
"Assigning qualification {} with value {} to {} worker{}...".format(
qid, value, len(workers), "s" if len(workers) > 1 else ""
)
)
for worker in workers:
if mturk.set_qualification_score(qid, worker, int(value), notify=notify):
click.echo("{} OK".format(worker))
# print out the current set of workers with the qualification
results = list(mturk.get_workers_with_qualification(qid))
click.echo("{} workers with qualification {}:".format(len(results), qid))
for score, count in Counter([r["score"] for r in results]).items():
click.echo("{} with value {}".format(count, score)) | 0.00224 |
def transformer_tpu_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 2.0) | 0.018256 |
def long_list_to_word(val_list, big_endian=True):
"""Long list (32 bits int) to word list (16 bits int)
By default long_list_to_word() use big endian order. For use little endian, set
big_endian param to False.
:param val_list: list of 32 bits int value
:type val_list: list
:param big_endian: True for big endian/False for little (optional)
:type big_endian: bool
:returns: list of 16 bits int value
:rtype: list
"""
# allocate list for long int
word_list = list()
# fill registers list with register items
for i, item in enumerate(val_list):
if big_endian:
word_list.append(val_list[i] >> 16)
word_list.append(val_list[i] & 0xffff)
else:
word_list.append(val_list[i] & 0xffff)
word_list.append(val_list[i] >> 16)
# return long list
return word_list | 0.002193 |
def hooks(self, project):
""" Look up the urls we need to post to"""
return self.get_queryset().filter(
Q(project=None) |
Q(project=project)
).distinct('url') | 0.009662 |
def add_it(workbench, file_list, labels):
"""Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s.
"""
md5s = []
for filename in file_list:
if filename != '.DS_Store':
with open(filename, 'rb') as pe_file:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')
workbench.add_node(md5, md5[:6], labels)
md5s.append(md5)
return md5s | 0.00149 |
def generate_threshold_mask(hist):
'''Masking array elements when equal 0.0 or greater than 10 times the median
Parameters
----------
hist : array_like
Input data.
Returns
-------
masked array
Returns copy of the array with masked elements.
'''
masked_array = np.ma.masked_values(hist, 0)
masked_array = np.ma.masked_greater(masked_array, 10 * np.ma.median(hist))
logging.info('Masking %d pixel(s)', np.ma.count_masked(masked_array))
return np.ma.getmaskarray(masked_array) | 0.003711 |
def _assert_gcs_files(files):
"""Check files starts wtih gs://.
Args:
files: string to file path, or list of file paths.
"""
if sys.version_info.major > 2:
string_type = (str, bytes) # for python 3 compatibility
else:
string_type = basestring # noqa
if isinstance(files, string_type):
files = [files]
for f in files:
if f is not None and not f.startswith('gs://'):
raise ValueError('File %s is not a gcs path' % f) | 0.015217 |
def _parse_pool_options(options):
"""Parse connection pool options."""
max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE)
min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE)
default_idle_seconds = common.validate_timeout_or_none(
'maxidletimems', common.MAX_IDLE_TIME_MS)
max_idle_time_seconds = options.get('maxidletimems', default_idle_seconds)
if max_pool_size is not None and min_pool_size > max_pool_size:
raise ValueError("minPoolSize must be smaller or equal to maxPoolSize")
connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT)
socket_keepalive = options.get('socketkeepalive', True)
socket_timeout = options.get('sockettimeoutms')
wait_queue_timeout = options.get('waitqueuetimeoutms')
wait_queue_multiple = options.get('waitqueuemultiple')
event_listeners = options.get('event_listeners')
appname = options.get('appname')
driver = options.get('driver')
compression_settings = CompressionSettings(
options.get('compressors', []),
options.get('zlibcompressionlevel', -1))
ssl_context, ssl_match_hostname = _parse_ssl_options(options)
return PoolOptions(max_pool_size,
min_pool_size,
max_idle_time_seconds,
connect_timeout, socket_timeout,
wait_queue_timeout, wait_queue_multiple,
ssl_context, ssl_match_hostname, socket_keepalive,
_EventListeners(event_listeners),
appname,
driver,
compression_settings) | 0.0006 |
def get_application_modules(self):
"""
Instantiate all application modules (i.e.
:class:`~admin_tools.dashboard.modules.AppList`,
:class:`~fluent_dashboard.modules.AppIconList` and
:class:`~fluent_dashboard.modules.CmsAppIconList`)
for use in the dashboard.
"""
modules = []
appgroups = get_application_groups()
for title, kwargs in appgroups:
AppListClass = get_class(kwargs.pop('module')) # e.g. CmsAppIconlist, AppIconlist, Applist
modules.append(AppListClass(title, **kwargs))
return modules | 0.004902 |
def map_indices_child2parent(child, child_indices):
"""Map child RTDCBase event indices to parent RTDCBase
Parameters
----------
child: RTDC_Hierarchy
hierarchy child with `child_indices`
child_indices: 1d ndarray
child indices to map
Returns
-------
parent_indices: 1d ndarray
hierarchy parent indices
"""
parent = child.hparent
# filters
pf = parent.filter.all
# indices corresponding to all child events
idx = np.where(pf)[0] # True means present in the child
# indices corresponding to selected child events
parent_indices = idx[child_indices]
return parent_indices | 0.001506 |
def get_resource_dirs(resource):
"""Returns a list of all known resource dirs for a given resource.
:param str resource:
Name of the resource (e.g. "themes")
:return:
A list of resource dirs
"""
dirs = [
os.path.join(dir, resource) for dir in
itertools.chain(GLib.get_system_data_dirs(), GUAKE_THEME_DIR, GLib.get_user_data_dir())
]
dirs += [os.path.join(os.path.expanduser("~"), ".{}".format(resource))]
return [Path(dir) for dir in dirs if os.path.isdir(dir)] | 0.003795 |
def instance(self, other):
'''Returns an instance Key, by appending a name to the namespace.'''
assert '/' not in str(other)
return Key(str(self) + ':' + str(other)) | 0.00565 |
def get_response_headers(self, *args, **kwargs):
"""
A convenience method for obtaining the headers that were sent from the
S3 server.
The AWS S3 API depends upon setting headers. This method is used by the
head_object API call for getting a S3 object's metadata.
"""
if self.response_headers:
return self._unpack_headers(self.response_headers) | 0.004843 |
def add_attribute_listener(self, attr_name, observer):
"""
Add an attribute listener callback.
The callback function (``observer``) is invoked differently depending on the *type of attribute*.
Attributes that represent sensor values or which are used to monitor connection status are updated
whenever a message is received from the vehicle. Attributes which reflect vehicle "state" are
only updated when their values change (for example :py:attr:`Vehicle.system_status`,
:py:attr:`Vehicle.armed`, and :py:attr:`Vehicle.mode`).
The callback can be removed using :py:func:`remove_attribute_listener`.
.. note::
The :py:func:`on_attribute` decorator performs the same operation as this method, but with
a more elegant syntax. Use ``add_attribute_listener`` by preference if you will need to remove
the observer.
The argument list for the callback is ``observer(object, attr_name, attribute_value)``:
* ``self`` - the associated :py:class:`Vehicle`. This may be compared to a global vehicle handle
to implement vehicle-specific callback handling (if needed).
* ``attr_name`` - the attribute name. This can be used to infer which attribute has triggered
if the same callback is used for watching several attributes.
* ``value`` - the attribute value (so you don't need to re-query the vehicle object).
The example below shows how to get callbacks for (global) location changes:
.. code:: python
#Callback to print the location in global frame
def location_callback(self, attr_name, msg):
print "Location (Global): ", msg
#Add observer for the vehicle's current location
vehicle.add_attribute_listener('global_frame', location_callback)
See :ref:`vehicle_state_observe_attributes` for more information.
:param String attr_name: The name of the attribute to watch (or '*' to watch all attributes).
:param observer: The callback to invoke when a change in the attribute is detected.
"""
listeners_for_attr = self._attribute_listeners.get(attr_name)
if listeners_for_attr is None:
listeners_for_attr = []
self._attribute_listeners[attr_name] = listeners_for_attr
if observer not in listeners_for_attr:
listeners_for_attr.append(observer) | 0.006066 |
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context) | 0.001116 |
def get_best(self):
"""Return best fitted distribution and its parameters
a dictionary with one key (the distribution name) and its parameters
"""
# self.df should be sorted, so then us take the first one as the best
name = self.df_errors.sort_values('sumsquare_error').iloc[0].name
params = self.fitted_param[name]
return {name: params} | 0.005063 |
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit() | 0.003456 |
def write_image(self, filename="image.png", magnification=1,
image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large | 0.003209 |
def generate_project(args):
"""New project."""
# Project templates path
src = os.path.join(dirname(abspath(__file__)), 'project')
project_name = args.get('<project>')
if not project_name:
logger.warning('Project name cannot be empty.')
return
# Destination project path
dst = os.path.join(os.getcwd(), project_name)
if os.path.isdir(dst):
logger.warning('Project directory already exists.')
return
logger.info('Start generating project files.')
_mkdir_p(dst)
for src_dir, sub_dirs, filenames in os.walk(src):
# Build and create destination directory path
relative_path = src_dir.split(src)[1].lstrip(os.path.sep)
dst_dir = os.path.join(dst, relative_path)
if src != src_dir:
_mkdir_p(dst_dir)
# Copy, rewrite and move project files
for filename in filenames:
if filename in ['development.py', 'production.py']:
continue
src_file = os.path.join(src_dir, filename)
dst_file = os.path.join(dst_dir, filename)
if filename.endswith(REWRITE_FILE_EXTS):
_rewrite_and_copy(src_file, dst_file, project_name)
else:
shutil.copy(src_file, dst_file)
logger.info("New: %s" % dst_file)
if filename in ['development_sample.py', 'production_sample.py']:
dst_file = os.path.join(dst_dir, "%s.py" % filename.split('_')[0])
_rewrite_and_copy(src_file, dst_file, project_name)
logger.info("New: %s" % dst_file)
logger.info('Finish generating project files.') | 0.001198 |
def download_ncbi_associations(gene2go="gene2go", prt=sys.stdout, loading_bar=True):
"""Download associations from NCBI, if necessary"""
# Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz
gzip_file = "{GENE2GO}.gz".format(GENE2GO=gene2go)
if not os.path.isfile(gene2go):
file_remote = "ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/{GZ}".format(
GZ=os.path.basename(gzip_file))
dnld_file(file_remote, gene2go, prt, loading_bar)
else:
if prt is not None:
prt.write(" EXISTS: {FILE}\n".format(FILE=gene2go))
return gene2go | 0.003373 |
def _get_training_data(vrn_files):
"""Retrieve training data, returning an empty set of information if not available.
"""
out = {"SNP": [], "INDEL": []}
# SNPs
for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"),
("train_omni", "known=false,training=true,truth=true,prior=12.0"),
("train_1000g", "known=false,training=true,truth=false,prior=10.0"),
("dbsnp", "known=true,training=false,truth=false,prior=2.0")]:
if name not in vrn_files:
return {}
else:
out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name]))
# Indels
if "train_indels" in vrn_files:
out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0",
vrn_files["train_indels"]))
else:
return {}
return out | 0.008377 |
def get_key_state(self, status, state_dict):
"""Returns the key associated with the dict. """
for key, val in state_dict.items():
if val == status:
return key | 0.009901 |
def extract_arguments(text):
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile("/\w*(@\w*)*\s*([\s\S]*)",re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None | 0.018315 |
def prt_error_summary(self, fout_err):
"""Print a summary about the GAF file that was read."""
# Get summary of error types and their counts
errcnts = []
if self.ignored:
errcnts.append(" {N:9,} IGNORED associations\n".format(N=len(self.ignored)))
if self.illegal_lines:
for err_name, errors in self.illegal_lines.items():
errcnts.append(" {N:9,} {ERROR}\n".format(N=len(errors), ERROR=err_name))
# Save error details into a log file
fout_log = self._wrlog_details_illegal_gaf(fout_err, errcnts)
sys.stdout.write(" WROTE GAF ERROR LOG: {LOG}:\n".format(LOG=fout_log))
for err_cnt in errcnts:
sys.stdout.write(err_cnt) | 0.00672 |
def multi_index_df_to_component_dfs(multi_index_df, rid="rid", cid="cid"):
""" Convert a multi-index df into 3 component dfs. """
# Id level of the multiindex will become the index
rids = list(multi_index_df.index.get_level_values(rid))
cids = list(multi_index_df.columns.get_level_values(cid))
# It's possible that the index and/or columns of multi_index_df are not
# actually multi-index; need to check for this and there are more than one level in index(python3)
if isinstance(multi_index_df.index, pd.MultiIndex):
# check if there are more than one levels in index (python3)
if len(multi_index_df.index.names) > 1:
# If so, drop rid because it won't go into the body of the metadata
mi_df_index = multi_index_df.index.droplevel(rid)
# Names of the multiindex levels become the headers
rhds = list(mi_df_index.names)
# Assemble metadata values
row_metadata = np.array([mi_df_index.get_level_values(level).values for level in list(rhds)]).T
# if there is one level in index (python3), then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# If the index is not multi-index, then rhds and row metadata should be empty
else:
rhds = []
row_metadata = []
# Check if columns of multi_index_df are in fact multi-index
if isinstance(multi_index_df.columns, pd.MultiIndex):
# Check if there are more than one levels in columns(python3)
if len(multi_index_df.columns.names) > 1:
# If so, drop cid because it won't go into the body of the metadata
mi_df_columns = multi_index_df.columns.droplevel(cid)
# Names of the multiindex levels become the headers
chds = list(mi_df_columns.names)
# Assemble metadata values
col_metadata = np.array([mi_df_columns.get_level_values(level).values for level in list(chds)]).T
# If there is one level in columns (python3), then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# If the columns are not multi-index, then rhds and row metadata should be empty
else:
chds = []
col_metadata = []
# Create component dfs
row_metadata_df = pd.DataFrame.from_records(row_metadata, index=pd.Index(rids, name="rid"), columns=pd.Index(rhds, name="rhd"))
col_metadata_df = pd.DataFrame.from_records(col_metadata, index=pd.Index(cids, name="cid"), columns=pd.Index(chds, name="chd"))
data_df = pd.DataFrame(multi_index_df.values, index=pd.Index(rids, name="rid"), columns=pd.Index(cids, name="cid"))
return data_df, row_metadata_df, col_metadata_df | 0.003951 |
def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate') | 0.002232 |
def dictionary(
element_name, # type: Text
children, # type: List[Processor]
required=True, # type: bool
alias=None, # type: Optional[Text]
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create a processor for dictionary values.
:param element_name: Name of the XML element containing the dictionary value. Can also be
specified using supported XPath syntax.
:param children: List of declxml processor objects for processing the children
contained within the dictionary.
:param required: Indicates whether the value is required when parsing and serializing.
:param alias: If specified, then this is used as the name of the value when read from
XML. If not specified, then the element_name is used as the name of the value.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Dictionary(element_name, children, required, alias)
return _processor_wrap_if_hooks(processor, hooks) | 0.005698 |
def get_cache_data(request):
if 'init' in request.POST:
init = bool(float(request.POST['init']))
else:
init = False
active_variables = []
if 'variables[]' in request.POST:
active_variables = request.POST.getlist('variables[]')
"""
else:
active_variables = list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'charts__variables', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'xy_charts__variables', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'control_items__variable', flat=True))
active_variables += list(
GroupDisplayPermission.objects.filter(hmi_group__in=request.user.groups.iterator()).values_list(
'custom_html_panels__variables', flat=True))
active_variables = list(set(active_variables))
"""
active_variable_properties = []
if 'variable_properties[]' in request.POST:
active_variable_properties = request.POST.getlist('variable_properties[]')
timestamp_from = time.time()
if 'timestamp_from' in request.POST:
timestamp_from = float(request.POST['timestamp_from']) / 1000.0
timestamp_to = time.time()
if 'timestamp_to' in request.POST:
timestamp_to = min(timestamp_to, float(request.POST['timestamp_to']) / 1000.0)
if timestamp_to == 0:
timestamp_to = time.time()
if timestamp_from == 0:
timestamp_from == time.time() - 60
if timestamp_to - timestamp_from > 120 * 60:
timestamp_from = timestamp_to - 120 * 60
#if not init:
#timestamp_to = min(timestamp_from + 30, timestamp_to)
if len(active_variables) > 0:
data = RecordedData.objects.db_data(
variable_ids=active_variables,
time_min=timestamp_from,
time_max=timestamp_to,
time_in_ms=True,
query_first_value=init)
else:
data = None
if data is None:
data = {}
data['variable_properties'] = {}
for item in VariableProperty.objects.filter(pk__in=active_variable_properties):
data['variable_properties'][item.pk] = item.value()
data["server_time"] = time.time() * 1000
return HttpResponse(json.dumps(data), content_type='application/json') | 0.002346 |
def get(self):
"""
Retrieves all properties again for the collection and
sets the attributes.
"""
data = self.resource(self.name).properties.get()
self.set_data(**data)
return data | 0.008097 |
def sub_menu_pressed(self, widget, event):
"""
Function serves for getting full assistant path and
collects the information from GUI
"""
for index, data in enumerate(self.dev_assistant_path):
index += 1
if settings.SUBASSISTANT_N_STRING.format(index) in self.kwargs:
del self.kwargs[settings.SUBASSISTANT_N_STRING.format(index)]
self.kwargs[settings.SUBASSISTANT_N_STRING.format(index)] = data
self.kwargs['subassistant_0'] = self.get_current_main_assistant().name
self._open_path_window() | 0.00335 |
def recv_msg(self):
'''message receive routine'''
if self._index >= self._count:
return None
m = self._msgs[self._index]
self._index += 1
self.percent = (100.0 * self._index) / self._count
self.messages[m.get_type()] = m
return m | 0.006734 |
def _execute_after_prepare(self, host, connection, pool, response):
"""
Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.
"""
if pool:
pool.return_connection(connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_PREPARED:
if self.prepared_statement:
# result metadata is the only thing that could have
# changed from an alter
(_, _, _,
self.prepared_statement.result_metadata,
new_metadata_id) = response.results
if new_metadata_id is not None:
self.prepared_statement.result_metadata_id = new_metadata_id
# use self._query to re-use the same host and
# at the same time properly borrow the connection
request_id = self._query(host)
if request_id is None:
# this host errored out, move on to the next
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response when preparing statement "
"on host %s: %s" % (host, response)))
elif isinstance(response, ErrorMessage):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
elif isinstance(response, ConnectionException):
log.debug("Connection error when preparing statement on host %s: %s",
host, response)
# try again on a different host, preparing again if necessary
self._errors[host] = response
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response type when preparing "
"statement on host %s: %s" % (host, response))) | 0.001824 |
def add_noise_to_program(prog, T1=30e-6, T2=30e-6, gate_time_1q=50e-9, gate_time_2q=150e-09,
ro_fidelity=0.95):
"""
Add generic damping and dephasing noise to a program.
.. warning::
This function is deprecated. Please use :py:func:`add_decoherence_noise` instead.
:param prog: A pyquil program consisting of I, RZ, CZ, and RX(+-pi/2) instructions
:param Union[Dict[int,float],float] T1: The T1 amplitude damping time either globally or in a
dictionary indexed by qubit id. By default, this is 30 us.
:param Union[Dict[int,float],float] T2: The T2 dephasing time either globally or in a
dictionary indexed by qubit id. By default, this is also 30 us.
:param float gate_time_1q: The duration of the one-qubit gates, namely RX(+pi/2) and RX(-pi/2).
By default, this is 50 ns.
:param float gate_time_2q: The duration of the two-qubit gates, namely CZ.
By default, this is 150 ns.
:param Union[Dict[int,float],float] ro_fidelity: The readout assignment fidelity
:math:`F = (p(0|0) + p(1|1))/2` either globally or in a dictionary indexed by qubit id.
:return: A new program with noisy operators.
"""
warnings.warn("pyquil.kraus.add_noise_to_program is deprecated, please use "
"pyquil.noise.add_decoherence_noise instead.",
DeprecationWarning)
return add_decoherence_noise(prog, T1=T1, T2=T2, gate_time_1q=gate_time_1q,
gate_time_2q=gate_time_2q, ro_fidelity=ro_fidelity) | 0.007038 |
def mount_volume(volume, device='/dev/xvdf', mountpoint='/mnt/data', fstype='ext4'):
'''
Mount an EBS volume
Args:
volume (str): EBS volume ID
device (str): default /dev/xvdf
mountpoint (str): default /mnt/data
fstype (str): default ext4
'''
_ec2().attach_volume(volume, _host_node()['id'], device)
time.sleep(1)
sudo('mkdir -p "%s"' % mountpoint)
sudo('mount -t "%s" "%s" "%s"' % (fstype, device, mountpoint)) | 0.004211 |
def consume_payload(rlp, prefix, start, type_, length):
"""Read the payload of an item from an RLP string.
:param rlp: the rlp string to read from
:param type_: the type of the payload (``bytes`` or ``list``)
:param start: the position at which to start reading
:param length: the length of the payload in bytes
:returns: a tuple ``(item, per_item_rlp, end)``, where ``item`` is
the read item, per_item_rlp is a list containing the RLP
encoding of each item and ``end`` is the position of the
first unprocessed byte
"""
if type_ is bytes:
item = rlp[start: start + length]
return (item, [prefix + item], start + length)
elif type_ is list:
items = []
per_item_rlp = []
list_rlp = prefix
next_item_start = start
end = next_item_start + length
while next_item_start < end:
p, t, l, s = consume_length_prefix(rlp, next_item_start)
item, item_rlp, next_item_start = consume_payload(rlp, p, s, t, l)
per_item_rlp.append(item_rlp)
# When the item returned above is a single element, item_rlp will also contain a
# single element, but when it's a list, the first element will be the RLP of the
# whole List, which is what we want here.
list_rlp += item_rlp[0]
items.append(item)
per_item_rlp.insert(0, list_rlp)
if next_item_start > end:
raise DecodingError('List length prefix announced a too small '
'length', rlp)
return (items, per_item_rlp, next_item_start)
else:
raise TypeError('Type must be either list or bytes') | 0.001731 |
def __username(self, fname, lname): # pragma: no cover
"""Convert first name + last name into first.last style username."""
self.username = '.'.join([i.lower() for i in [fname, lname]]) | 0.009901 |
def readRaw8(self):
"""Read an 8-bit value on the bus (without register)."""
self._idle()
self._transaction_start()
self._i2c_start()
self._i2c_write_bytes([self._address_byte(False)])
self._i2c_stop()
self._i2c_idle()
self._i2c_start()
self._i2c_write_bytes([self._address_byte(True)])
self._i2c_read_bytes(1)
self._i2c_stop()
response = self._transaction_end()
self._verify_acks(response[:-1])
return response[-1] | 0.003795 |
def _gtu8(ins):
""" Compares & pops top 2 operands out of the stack, and checks
if the 1st operand > 2nd operand (top of the stack).
Pushes 0 if False, 1 if True.
8 bit unsigned version
"""
output = _8bit_oper(ins.quad[2], ins.quad[3], reversed_=True)
output.append('cp h')
output.append('sbc a, a')
output.append('push af')
return output | 0.002551 |
def _set_cam_share(self, v, load=False):
"""
Setter method for cam_share, mapped from YANG variable /hardware/profile/tcam/cam_share (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cam_share is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cam_share() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cam_share.cam_share, is_container='container', presence=False, yang_name="cam-share", rest_name="cam-share", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable cam-sharing for features'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cam_share must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cam_share.cam_share, is_container='container', presence=False, yang_name="cam-share", rest_name="cam-share", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable cam-sharing for features'}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""",
})
self.__cam_share = t
if hasattr(self, '_set'):
self._set() | 0.006127 |
def call_audit(func):
"""Print a detailed audit of all calls to this function."""
def audited_func(*args, **kwargs):
import traceback
stack = traceback.extract_stack()
r = func(*args, **kwargs)
func_name = func.__name__
print("@depth %d, trace %s -> %s(*%r, **%r) => %r" % (
len(stack),
" -> ".join("%s:%d:%s" % x[0:3] for x in stack[-5:-2]),
func_name,
args,
kwargs,
r))
return r
return audited_func | 0.001873 |
def update_classification_annotations_and_summaries(
self,
updatePeakMagnitudes=True):
"""*update classification annotations and summaries*
**Key Arguments:**
- ``updatePeakMagnitudes`` -- update the peak magnitudes in the annotations to give absolute magnitudes. Default *True*
**Return:**
- None
**Usage:**
.. todo::
- add usage info
- create a sublime snippet for usage
- write a command-line tool for this method
- update package tutorial with command-line tool info if needed
.. code-block:: python
usage code
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``update_classification_annotations_and_summaries`` method')
# import time
# start_time = time.time()
# print "COLLECTING TRANSIENTS WITH NO ANNOTATIONS"
if updatePeakMagnitudes:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and (cl.annotation is null orcl.dateLastModified is null or cl.dateLastModified > DATE_SUB(NOW(), INTERVAL 30 DAY)) order by cl.dateLastModified asc limit 100000
""" % locals()
else:
sqlQuery = u"""
SELECT * from sherlock_crossmatches cm, sherlock_classifications cl where rank =1 and cl.transient_object_id=cm.transient_object_id and cl.summary is null
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
quiet=False
)
# print "FINISHED COLLECTING TRANSIENTS WITH NO ANNOTATIONS/GENERATING ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
from astrocalc.coords import unit_conversion
# ASTROCALC UNIT CONVERTER OBJECT
converter = unit_conversion(
log=self.log
)
updates = []
for row in rows:
catalogue = row["catalogue_table_name"]
objectId = row["catalogue_object_id"]
objectType = row["catalogue_object_type"]
objectSubtype = row["catalogue_object_subtype"]
catalogueString = catalogue
if "catalogue" not in catalogueString.lower():
catalogueString = catalogue + " catalogue"
if "/" in catalogueString:
catalogueString += "s"
if "ned" in catalogue.lower() and "/" not in catalogue:
objectId = '''<a href="https://ned.ipac.caltech.edu/cgi-bin/objsearch?objname=%(objectId)s&extend=no&hconst=73&omegam=0.27&omegav=0.73&corr_z=1&out_csys=Equatorial&out_equinox=J2000.0&obj_sort=RA+or+Longitude&of=pre_text&zv_breaker=30000.0&list_limit=5&img_stamp=YES">%(objectId)s</a>''' % locals()
elif "sdss" in catalogue.lower() and "/" not in catalogue:
objectId = "http://skyserver.sdss.org/dr12/en/tools/explore/Summary.aspx?id=%(objectId)s" % locals(
)
ra = converter.ra_decimal_to_sexegesimal(
ra=row["raDeg"],
delimiter=""
)
dec = converter.dec_decimal_to_sexegesimal(
dec=row["decDeg"],
delimiter=""
)
betterName = "SDSS J" + ra[0:9] + dec[0:9]
objectId = '''<a href="%(objectId)s">%(betterName)s</a>''' % locals()
elif "milliquas" in catalogue.lower() and "/" not in catalogue:
thisName = objectId
objectId = objectId.replace(" ", "+")
objectId = '''<a href="https://heasarc.gsfc.nasa.gov/db-perl/W3Browse/w3table.pl?popupFrom=Query+Results&tablehead=name%%3Dheasarc_milliquas%%26description%%3DMillion+Quasars+Catalog+%%28MILLIQUAS%%29%%2C+Version+4.8+%%2822+June+2016%%29%%26url%%3Dhttp%%3A%%2F%%2Fheasarc.gsfc.nasa.gov%%2FW3Browse%%2Fgalaxy-catalog%%2Fmilliquas.html%%26archive%%3DN%%26radius%%3D1%%26mission%%3DGALAXY+CATALOG%%26priority%%3D5%%26tabletype%%3DObject&dummy=Examples+of+query+constraints%%3A&varon=name&bparam_name=%%3D%%22%(objectId)s%%22&bparam_name%%3A%%3Aunit=+&bparam_name%%3A%%3Aformat=char25&varon=ra&bparam_ra=&bparam_ra%%3A%%3Aunit=degree&bparam_ra%%3A%%3Aformat=float8%%3A.5f&varon=dec&bparam_dec=&bparam_dec%%3A%%3Aunit=degree&bparam_dec%%3A%%3Aformat=float8%%3A.5f&varon=bmag&bparam_bmag=&bparam_bmag%%3A%%3Aunit=mag&bparam_bmag%%3A%%3Aformat=float8%%3A4.1f&varon=rmag&bparam_rmag=&bparam_rmag%%3A%%3Aunit=mag&bparam_rmag%%3A%%3Aformat=float8%%3A4.1f&varon=redshift&bparam_redshift=&bparam_redshift%%3A%%3Aunit=+&bparam_redshift%%3A%%3Aformat=float8%%3A6.3f&varon=radio_name&bparam_radio_name=&bparam_radio_name%%3A%%3Aunit=+&bparam_radio_name%%3A%%3Aformat=char22&varon=xray_name&bparam_xray_name=&bparam_xray_name%%3A%%3Aunit=+&bparam_xray_name%%3A%%3Aformat=char22&bparam_lii=&bparam_lii%%3A%%3Aunit=degree&bparam_lii%%3A%%3Aformat=float8%%3A.5f&bparam_bii=&bparam_bii%%3A%%3Aunit=degree&bparam_bii%%3A%%3Aformat=float8%%3A.5f&bparam_broad_type=&bparam_broad_type%%3A%%3Aunit=+&bparam_broad_type%%3A%%3Aformat=char4&bparam_optical_flag=&bparam_optical_flag%%3A%%3Aunit=+&bparam_optical_flag%%3A%%3Aformat=char3&bparam_red_psf_flag=&bparam_red_psf_flag%%3A%%3Aunit=+&bparam_red_psf_flag%%3A%%3Aformat=char1&bparam_blue_psf_flag=&bparam_blue_psf_flag%%3A%%3Aunit=+&bparam_blue_psf_flag%%3A%%3Aformat=char1&bparam_ref_name=&bparam_ref_name%%3A%%3Aunit=+&bparam_ref_name%%3A%%3Aformat=char6&bparam_ref_redshift=&bparam_ref_redshift%%3A%%3Aunit=+&bparam_ref_redshift%%3A%%3Aformat=char6&bparam_qso_prob=&bparam_qso_prob%%3A%%3Aunit=percent&bparam_qso_prob%%3A%%3Aformat=int2%%3A3d&bparam_alt_name_1=&bparam_alt_name_1%%3A%%3Aunit=+&bparam_alt_name_1%%3A%%3Aformat=char22&bparam_alt_name_2=&bparam_alt_name_2%%3A%%3Aunit=+&bparam_alt_name_2%%3A%%3Aformat=char22&Entry=&Coordinates=J2000&Radius=Default&Radius_unit=arcsec&NR=CheckCaches%%2FGRB%%2FSIMBAD%%2BSesame%%2FNED&Time=&ResultMax=1000&displaymode=Display&Action=Start+Search&table=heasarc_milliquas">%(thisName)s</a>''' % locals()
if objectSubtype and objectSubtype.lower() in ["uvs", "radios", "xray", "qso", "irs", 'uves', 'viss', 'hii', 'gclstr', 'ggroup', 'gpair', 'gtrpl']:
objectType = objectSubtype
if objectType == "star":
objectType = "stellar source"
elif objectType == "agn":
objectType = "AGN"
elif objectType == "cb":
objectType = "CV"
elif objectType == "unknown":
objectType = "unclassified source"
sep = row["separationArcsec"]
if row["classificationReliability"] == 1:
classificationReliability = "synonymous"
psep = row["physical_separation_kpc"]
if psep:
location = '%(sep)0.1f" (%(psep)0.1f Kpc) from the %(objectType)s core' % locals(
)
else:
location = '%(sep)0.1f" from the %(objectType)s core' % locals(
)
elif row["classificationReliability"] in (2, 3):
classificationReliability = "possibly associated"
n = row["northSeparationArcsec"]
if n > 0:
nd = "S"
else:
nd = "N"
e = row["eastSeparationArcsec"]
if e > 0:
ed = "W"
else:
ed = "E"
n = math.fabs(n)
e = math.fabs(e)
psep = row["physical_separation_kpc"]
if psep:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s (%(psep)0.1f Kpc) from the %(objectType)s centre' % locals(
)
else:
location = '%(n)0.2f" %(nd)s, %(e)0.2f" %(ed)s from the %(objectType)s centre' % locals(
)
location = location.replace("unclassified", "object's")
best_mag = None
best_mag_error = None
best_mag_filter = None
filters = ["R", "V", "B", "I", "J", "G", "H", "K", "U",
"_r", "_g", "_i", "_g", "_z", "_y", "_u", "unkMag"]
for f in filters:
if row[f] and not best_mag:
best_mag = row[f]
best_mag_error = row[f + "Err"]
subfilter = f.replace(
"_", "").replace("Mag", "")
best_mag_filter = f.replace(
"_", "").replace("Mag", "") + "="
if "unk" in best_mag_filter:
best_mag_filter = ""
subfilter = ''
if not best_mag_filter:
if str(best_mag).lower() in ("8", "11", "18"):
best_mag_filter = "an "
else:
best_mag_filter = "a "
else:
if str(best_mag_filter)[0].lower() in ("r", "i", "h"):
best_mag_filter = "an " + best_mag_filter
else:
best_mag_filter = "a " + best_mag_filter
if not best_mag:
best_mag = "an unknown-"
best_mag_filter = ""
else:
best_mag = "%(best_mag)0.2f " % locals()
distance = None
if row["direct_distance"]:
d = row["direct_distance"]
distance = "distance of %(d)0.1f Mpc" % locals()
if row["z"]:
z = row["z"]
distance += "(z=%(z)0.3f)" % locals()
elif row["z"]:
z = row["z"]
distance = "z=%(z)0.3f" % locals()
elif row["photoZ"]:
z = row["photoZ"]
zErr = row["photoZErr"]
distance = "photoZ=%(z)0.3f (±%(zErr)0.3f)" % locals()
if distance:
distance = "%(distance)s" % locals()
distance_modulus = None
if row["direct_distance_modulus"]:
distance_modulus = row["direct_distance_modulus"]
elif row["distance_modulus"]:
distance_modulus = row["distance_modulus"]
if updatePeakMagnitudes:
if distance:
absMag = row["transientAbsMag"]
absMag = """ A host %(distance)s implies a transient <em>M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
else:
if distance and distance_modulus:
absMag = "%(distance_modulus)0.2f" % locals()
absMag = """ A host %(distance)s implies a <em>m - M =</em> %(absMag)s.""" % locals(
)
else:
absMag = ""
annotation = "The transient is %(classificationReliability)s with <em>%(objectId)s</em>; %(best_mag_filter)s%(best_mag)smag %(objectType)s found in the %(catalogueString)s. It's located %(location)s.%(absMag)s" % locals()
summary = '%(sep)0.1f" from %(objectType)s in %(catalogue)s' % locals(
)
update = {
"transient_object_id": row["transient_object_id"],
"annotation": annotation,
"summary": summary,
"separationArcsec": sep
}
updates.append(update)
# print "FINISHED GENERATING ANNOTATIONS/ADDING ANNOTATIONS TO TRANSIENT DATABASE: %d" % (time.time() - start_time,)
# start_time = time.time()
insert_list_of_dictionaries_into_database_tables(
dbConn=self.transientsDbConn,
log=self.log,
dictList=updates,
dbTableName="sherlock_classifications",
dateModified=True,
batchSize=10000,
replace=True,
dbSettings=self.settings["database settings"]["transients"]
)
# print "FINISHED ADDING ANNOTATIONS TO TRANSIENT DATABASE/UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
sqlQuery = """update sherlock_classifications set annotation = "The transient location is not matched against any known catalogued source", summary = "No catalogued match" where classification = 'ORPHAN' and summary is null """ % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.transientsDbConn,
)
# print "FINISHED UPDATING ORPHAN ANNOTATIONS: %d" % (time.time() - start_time,)
# start_time = time.time()
self.log.debug(
'completed the ``update_classification_annotations_and_summaries`` method')
return None | 0.001857 |
def get_queue_func(request):
"""Establish the connection to rabbitmq."""
def cleanup(request):
conn.close()
def queue_func(**kwargs):
return conn.channel().basic_publish(
exchange='', body=json.dumps(kwargs), routing_key=queue,
properties=pika.BasicProperties(delivery_mode=2))
server = request.registry.settings['queue_server']
queue = request.registry.settings['queue_verification']
conn = pika.BlockingConnection(pika.ConnectionParameters(host=server))
request.add_finished_callback(cleanup)
return queue_func | 0.001709 |
def set_net_configuration(self, ipv4_address=None, ipv4_configuration=None,
ipv4_gateway=None, channel=None):
"""Set network configuration data.
Apply desired network configuration data, leaving unspecified
parameters alone.
:param ipv4_address: CIDR notation for IP address and netmask
Example: '192.168.0.10/16'
:param ipv4_configuration: Method to use to configure the network.
'DHCP' or 'Static'.
:param ipv4_gateway: IP address of gateway to use.
:param channel: LAN channel to configure, defaults to autodetect
"""
if channel is None:
channel = self.get_network_channel()
if ipv4_configuration is not None:
cmddata = [channel, 4, 0]
if ipv4_configuration.lower() == 'dhcp':
cmddata[-1] = 2
elif ipv4_configuration.lower() == 'static':
cmddata[-1] = 1
else:
raise Exception('Unrecognized ipv4cfg parameter {0}'.format(
ipv4_configuration))
self.xraw_command(netfn=0xc, command=1, data=cmddata)
if ipv4_address is not None:
netmask = None
if '/' in ipv4_address:
ipv4_address, prefix = ipv4_address.split('/')
netmask = _cidr_to_mask(int(prefix))
cmddata = bytearray((channel, 3)) + socket.inet_aton(ipv4_address)
self.xraw_command(netfn=0xc, command=1, data=cmddata)
if netmask is not None:
cmddata = bytearray((channel, 6)) + netmask
self.xraw_command(netfn=0xc, command=1, data=cmddata)
if ipv4_gateway is not None:
cmddata = bytearray((channel, 12)) + socket.inet_aton(ipv4_gateway)
self.xraw_command(netfn=0xc, command=1, data=cmddata) | 0.001569 |
def _updateVariantAnnotationSets(self, variantFile, dataUrl):
"""
Updates the variant annotation set associated with this variant using
information in the specified pysam variantFile.
"""
# TODO check the consistency of this between VCF files.
if not self.isAnnotated():
annotationType = None
for record in variantFile.header.records:
if record.type == "GENERIC":
if record.key == "SnpEffVersion":
annotationType = ANNOTATIONS_SNPEFF
elif record.key == "VEP":
version = record.value.split()[0]
# TODO we need _much_ more sophisticated processing
# of VEP versions here. When do they become
# incompatible?
if version == "v82":
annotationType = ANNOTATIONS_VEP_V82
elif version == "v77":
annotationType = ANNOTATIONS_VEP_V77
else:
# TODO raise a proper typed exception there with
# the file name as an argument.
raise ValueError(
"Unsupported VEP version {} in '{}'".format(
version, dataUrl))
if annotationType is None:
infoKeys = variantFile.header.info.keys()
if 'CSQ' in infoKeys or 'ANN' in infoKeys:
# TODO likewise, we want a properly typed exception that
# we can throw back to the repo manager UI and display
# as an import error.
raise ValueError(
"Unsupported annotations in '{}'".format(dataUrl))
if annotationType is not None:
vas = HtslibVariantAnnotationSet(self, self.getLocalId())
vas.populateFromFile(variantFile, annotationType)
self.addVariantAnnotationSet(vas) | 0.000945 |
def next(self) -> mx.io.DataBatch:
"""
Returns the next batch.
"""
if self.iter_next():
return self.next_batch
raise StopIteration | 0.010989 |
def wait(self, wait_time=0):
"""
Blocking call to check if the worker returns the result. One can use
job.result after this call returns ``True``.
:arg wait_time: Time in seconds to wait, default is infinite.
:return: `True` or `False`.
.. note::
This is a blocking call, you can specity wait_time argument for timeout.
"""
if self.__result:
return True
data = self.rdb.brpop(self.urn, wait_time)
if data:
self.rdb.delete(self.urn)
data = json.loads(data[1])
self.__result = data
return True
else:
return False | 0.004367 |
def splitGenoSlidingWindow(pos,out_file,size=5e4,step=None):
"""
split into windows using a slide criterion
Args:
size: window size
step: moving step (default: 0.5*size)
Returns:
wnd_i: number of windows
nSnps: vector of per-window number of SNPs
"""
if step is None: step = 0.5*size
chroms = SP.unique(pos[:,0])
RV = []
wnd_i = 0
wnd_file = csv.writer(open(out_file,'w'),delimiter='\t')
nSnps = []
for chrom_i in chroms:
Ichrom = pos[:,0]==chrom_i
idx_chrom_start = SP.where(Ichrom)[0][0]
pos_chr = pos[Ichrom,1]
start = pos_chr.min()
pos_chr_max = pos_chr.max()
while 1:
if start>pos_chr_max: break
end = start+size
Ir = (pos_chr>=start)*(pos_chr<end)
_nSnps = Ir.sum()
if _nSnps>0:
idx_wnd_start = idx_chrom_start+SP.where(Ir)[0][0]
nSnps.append(_nSnps)
line = SP.array([wnd_i,chrom_i,start,end,idx_wnd_start,_nSnps],dtype=int)
wnd_file.writerow(line)
wnd_i+=1
start += step
nSnps = SP.array(nSnps)
return wnd_i,nSnps | 0.021053 |
def _expectation(p, kern, feat, mean, none, nghp=None):
"""
Compute the expectation:
expectation[n] = <K_{Z, x_n} x_{n+1}^T>_p(x_{n:n+1})
- K_{.,.} :: Linear kernel
- p :: MarkovGaussian distribution (p.cov 2x(N+1)xDxD)
:return: NxMxD
"""
Xmu, Xcov = p.mu, p.cov
with tf.control_dependencies([tf.assert_equal(
tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type),
message="Currently cannot handle slicing in exKxz.")]):
Xmu = tf.identity(Xmu)
with params_as_tensors_for(kern, feat):
N = tf.shape(Xmu)[0] - 1
var_Z = kern.variance * feat.Z # MxD
tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD
eXX = Xcov[1, :-1] + (Xmu[:-1][..., None] * Xmu[1:][:, None, :]) # NxDxD
return tf.matmul(tiled_Z, eXX) | 0.002339 |
def _txtinfo_to_jsoninfo(self, data):
"""
converts olsr 1 txtinfo format to jsoninfo
"""
# replace INFINITE with inf, which is convertible to float
data = data.replace('INFINITE', 'inf')
# find interesting section
lines = data.split('\n')
# process links in topology section
try:
start = lines.index('Table: Topology') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = lines[start:end]
# convert topology section to jsoninfo format
topology = []
for line in topology_lines:
values = line.split('\t')
topology.append({
'destinationIP': values[0],
'lastHopIP': values[1],
'linkQuality': float(values[2]),
'neighborLinkQuality': float(values[3]),
'tcEdgeCost': float(values[4]) * 1024.0
})
# process alias (MID) section
try:
start = lines.index('Table: MID') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
mid_lines = lines[start:end]
# convert mid section to jsoninfo format
mid = []
for line in mid_lines:
values = line.split('\t')
node = values[0]
aliases = values[1].split(';')
mid.append({
'ipAddress': node,
'aliases': [{'ipAddress': alias} for alias in aliases]
})
return {
'topology': topology,
'mid': mid
} | 0.001158 |
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1,
seq_align="start", trim_seq_len=None):
"""
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
"""
y = dt[response]
seq = dt[sequence]
if trim_seq_len is not None:
seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)
seq = [s.replace("N", "") for s in seq]
dt_kmer = kmer_count(seq, k)
Xsp = csc_matrix(dt_kmer)
en = ElasticNet(alpha=1, standardize=False, n_splits=3)
en.fit(Xsp, y)
# which coefficients are nonzero?=
nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist()
# perform stepwise selection
#
# TODO - how do we deal with the intercept?
# largest number of motifs where they don't differ by more than 1 k-mer
def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):
"""
perform stepwise model selection while preventing to add a motif similar to the
already selected motifs.
"""
F, pval = f_regression(dt_kmer[to_be_selected_kmers], y)
kmer = to_be_selected_kmers.pop(pval.argmin())
selected_kmers.append(kmer)
def select_criterion(s1, s2, consider_shift=True):
if hamming_distance(s1, s2) <= 1:
return False
if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0:
return False
if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0:
return False
return True
to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers
if select_criterion(ckmer, kmer, consider_shift)]
if len(to_be_selected_kmers) == 0:
return selected_kmers
else:
# regress out the new feature
lm = LinearRegression()
lm.fit(dt_kmer[selected_kmers], y)
y_new = y - lm.predict(dt_kmer[selected_kmers])
return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)
selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)
return selected_kmers | 0.003283 |
def _subprocess_method(self, command):
"""Use the subprocess module to execute ipmitool commands
and and set status
"""
p = subprocess.Popen([self._ipmitool_path] + self.args + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.error = p.communicate()
self.status = p.returncode | 0.008571 |
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit] | 0.003322 |
def _update_github(
self):
"""commit the changes and push them to github
"""
self.log.debug('starting the ``_update_github`` method')
from subprocess import Popen, PIPE, STDOUT
gdir = self.settings["sherlock wiki root"]
cmd = """cd %(gdir)s && git add --all && git commit -m "x" && git pull origin master && git push origin master""" % locals()
p = Popen(cmd, stdout=PIPE, stdin=PIPE, shell=True)
output = p.communicate()[0]
print output
self.log.debug('output: %(output)s' % locals())
self.log.debug('completed the ``_update_github`` method')
return None | 0.004511 |
def isTransitionAllowed(instance, transition_id):
"""Checks if the object can perform the transition passed in.
:returns: True if transition can be performed
:rtype: bool
"""
wf_tool = getToolByName(instance, "portal_workflow")
for wf_id in wf_tool.getChainFor(instance):
wf = wf_tool.getWorkflowById(wf_id)
if wf and wf.isActionSupported(instance, transition_id):
return True
return False | 0.002242 |
def to_sky(self, wcs, mode='all'):
"""
Convert the aperture to a `SkyRectangularAnnulus` object
defined in celestial coordinates.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The world coordinate system (WCS) transformation to use.
mode : {'all', 'wcs'}, optional
Whether to do the transformation including distortions
(``'all'``; default) or only including only the core WCS
transformation (``'wcs'``).
Returns
-------
aperture : `SkyRectangularAnnulus` object
A `SkyRectangularAnnulus` object.
"""
sky_params = self._to_sky_params(wcs, mode=mode)
return SkyRectangularAnnulus(**sky_params) | 0.002628 |
def exists(package):
"""
Return True if package information is available.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config'
cmd = '{0} --exists {1}'.format(pkg_config_exe, package).split()
return call(cmd) == 0 | 0.003115 |
def update_from(
self,
obj=None,
yaml_env=None,
yaml_file=None,
json_env=None,
json_file=None,
env_namespace=None,
):
"""
Update dict from several sources at once.
This is simply a convenience method that can be used as an alternative
to making several calls to the various
:meth:`~ConfigLoader.update_from_*` methods.
Updates will be applied in the order that the parameters are listed
below, with each source taking precedence over those before it.
:arg obj: Object or name of object, e.g. 'myapp.settings'.
:arg yaml_env: Name of an environment variable containing the path to
a YAML config file.
:arg yaml_file: Path to a YAML config file, or a file-like object.
:arg json_env: Name of an environment variable containing the path to
a JSON config file.
:arg json_file: Path to a JSON config file, or a file-like object.
:arg env_namespace: Common prefix of the environment variables
containing the desired config.
"""
if obj:
self.update_from_object(obj)
if yaml_env:
self.update_from_yaml_env(yaml_env)
if yaml_file:
self.update_from_yaml_file(yaml_file)
if json_env:
self.update_from_json_env(json_env)
if json_file:
self.update_from_json_file(json_file)
if env_namespace:
self.update_from_env_namespace(env_namespace) | 0.001257 |
def available_modes_with_ids(self):
"""Return list of objects containing available mode name and id."""
if not self._available_mode_ids:
all_modes = FIXED_MODES.copy()
self._available_mode_ids = all_modes
modes = self.get_available_modes()
try:
if modes:
# pylint: disable=consider-using-dict-comprehension
simple_modes = dict(
[(m.get("type", m.get("name")), m.get("id"))
for m in modes]
)
all_modes.update(simple_modes)
self._available_mode_ids = all_modes
except TypeError:
_LOGGER.debug("Did not receive a valid response. Passing..")
return self._available_mode_ids | 0.002398 |
def update_file(self, id, hidden=None, lock_at=None, locked=None, name=None, on_duplicate=None, parent_folder_id=None, unlock_at=None):
"""
Update file.
Update some settings on the specified file
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - name
"""The new display name of the file"""
if name is not None:
data["name"] = name
# OPTIONAL - parent_folder_id
"""The id of the folder to move this file into.
The new folder must be in the same context as the original parent folder.
If the file is in a context without folders this does not apply."""
if parent_folder_id is not None:
data["parent_folder_id"] = parent_folder_id
# OPTIONAL - on_duplicate
"""If the file is moved to a folder containing a file with the same name,
or renamed to a name matching an existing file, the API call will fail
unless this parameter is supplied.
"overwrite":: Replace the existing file with the same name
"rename":: Add a qualifier to make the new filename unique"""
if on_duplicate is not None:
self._validate_enum(on_duplicate, ["overwrite", "rename"])
data["on_duplicate"] = on_duplicate
# OPTIONAL - lock_at
"""The datetime to lock the file at"""
if lock_at is not None:
data["lock_at"] = lock_at
# OPTIONAL - unlock_at
"""The datetime to unlock the file at"""
if unlock_at is not None:
data["unlock_at"] = unlock_at
# OPTIONAL - locked
"""Flag the file as locked"""
if locked is not None:
data["locked"] = locked
# OPTIONAL - hidden
"""Flag the file as hidden"""
if hidden is not None:
data["hidden"] = hidden
self.logger.debug("PUT /api/v1/files/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/files/{id}".format(**path), data=data, params=params, single_item=True) | 0.003501 |
def islice(self, start, end):
"""
Returns a new DateTimeIndex, containing a subslice of the timestamps in this index,
as specified by the given integer start and end locations.
Parameters
----------
start : int
The location of the start of the range, inclusive.
end : int
The location of the end of the range, exclusive.
"""
jdt_index = self._jdt_index.islice(start, end)
return DateTimeIndex(jdt_index=jdt_index) | 0.005792 |