function
stringlengths 16
7.61k
| repo_name
stringlengths 9
46
| features
sequence |
---|---|---|
def post_with_null_author():
return make_post(with_author=False) | marshmallow-code/marshmallow-jsonapi | [
218,
64,
218,
52,
1442088655
] |
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.hoverlabel", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, model, data):
# try and import pytorch
global torch
if torch is None:
import torch
if version.parse(torch.__version__) < version.parse("0.4"):
warnings.warn("Your PyTorch version is older than 0.4 and not supported.")
# check if we have multiple inputs
self.multi_input = False
if type(data) == list:
self.multi_input = True
if type(data) != list:
data = [data]
self.data = data
self.layer = None
self.input_handle = None
self.interim = False
self.interim_inputs_shape = None
self.expected_value = None # to keep the DeepExplainer base happy
if type(model) == tuple:
self.interim = True
model, layer = model
model = model.eval()
self.layer = layer
self.add_target_handle(self.layer)
# if we are taking an interim layer, the 'data' is going to be the input
# of the interim layer; we will capture this using a forward hook
with torch.no_grad():
_ = model(*data)
interim_inputs = self.layer.target_input
if type(interim_inputs) is tuple:
# this should always be true, but just to be safe
self.interim_inputs_shape = [i.shape for i in interim_inputs]
else:
self.interim_inputs_shape = [interim_inputs.shape]
self.target_handle.remove()
del self.layer.target_input
self.model = model.eval()
self.multi_output = False
self.num_outputs = 1
with torch.no_grad():
outputs = model(*data)
# also get the device everything is running on
self.device = outputs.device
if outputs.shape[1] > 1:
self.multi_output = True
self.num_outputs = outputs.shape[1]
self.expected_value = outputs.mean(0).cpu().numpy() | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def add_handles(self, model, forward_handle, backward_handle):
"""
Add handles to all non-container layers in the model.
Recursively for non-container layers
"""
handles_list = []
model_children = list(model.children())
if model_children:
for child in model_children:
handles_list.extend(self.add_handles(child, forward_handle, backward_handle))
else: # leaves
handles_list.append(model.register_forward_hook(forward_handle))
handles_list.append(model.register_backward_hook(backward_handle))
return handles_list | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def gradient(self, idx, inputs):
self.model.zero_grad()
X = [x.requires_grad_() for x in inputs]
outputs = self.model(*X)
selected = [val for val in outputs[:, idx]]
grads = []
if self.interim:
interim_inputs = self.layer.target_input
for idx, input in enumerate(interim_inputs):
grad = torch.autograd.grad(selected, input,
retain_graph=True if idx + 1 < len(interim_inputs) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
del self.layer.target_input
return grads, [i.detach().cpu().numpy() for i in interim_inputs]
else:
for idx, x in enumerate(X):
grad = torch.autograd.grad(selected, x,
retain_graph=True if idx + 1 < len(X) else None,
allow_unused=True)[0]
if grad is not None:
grad = grad.cpu().numpy()
else:
grad = torch.zeros_like(X[idx]).cpu().numpy()
grads.append(grad)
return grads | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def deeplift_grad(module, grad_input, grad_output):
"""The backward hook which computes the deeplift
gradient for an nn.Module
"""
# first, get the module type
module_type = module.__class__.__name__
# first, check the module is supported
if module_type in op_handler:
if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:
return op_handler[module_type](module, grad_input, grad_output)
else:
print('Warning: unrecognized nn.Module: {}'.format(module_type))
return grad_input | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def get_target_input(module, input, output):
"""A forward hook which saves the tensor - attached to its graph.
Used if we want to explain the interim outputs of a model
"""
try:
del module.target_input
except AttributeError:
pass
setattr(module, 'target_input', input) | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def deeplift_tensor_grad(grad):
return_grad = complex_module_gradients[-1]
del complex_module_gradients[-1]
return return_grad | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def passthrough(module, grad_input, grad_output):
"""No change made to gradients"""
return None | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def linear_1d(module, grad_input, grad_output):
"""No change made to gradients."""
return None | slundberg/shap | [
18731,
2825,
18731,
1626,
1479842228
] |
def _coerce_iterator_output(self, expr, state=None):
import supriya.patterns
if not isinstance(expr, supriya.patterns.Event):
expr = supriya.patterns.NoteEvent(**expr)
if expr.get("uuid") is None:
expr = new(expr, uuid=uuid.uuid4())
return expr | Pulgama/supriya | [
208,
25,
208,
13,
1394072845
] |
def play(self, clock=None, server=None):
import supriya.patterns
import supriya.realtime
event_player = supriya.patterns.RealtimeEventPlayer(
self, clock=clock, server=server or supriya.realtime.Server.default()
)
event_player.start()
return event_player | Pulgama/supriya | [
208,
25,
208,
13,
1394072845
] |
def with_effect(self, synthdef, release_time=0.25, **settings):
import supriya.patterns
return supriya.patterns.Pfx(
self, synthdef=synthdef, release_time=release_time, **settings
) | Pulgama/supriya | [
208,
25,
208,
13,
1394072845
] |
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_address(self):
if self.device_is_address:
return self._values['device'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port']) | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/" \
"?$filter={2}&$top=1".format(self.client.provider['server'],
self.client.provider['server_port'], filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def pool_id(self):
filter = "(name%20eq%20'{0}')".format(self.pool)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses?$filter={2}&$top=1'.format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No pool with the specified name was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['items'][0]['id'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def member_id(self):
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "deviceName+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "deviceMachineId+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/' \
'?$filter={4}'.format(self.client.provider['server'], self.client.provider['server_port'],
self.pool_id, self.key, filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = response['items'][0]['id']
return result | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_port(self):
if self._values['managed']:
return None
return self._values['device_port'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_username(self):
if self._values['managed']:
return None
return self._values['device_username'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_password(self):
if self._values['managed']:
return None
return self._values['device_password'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_reference(self):
if not self._values['managed']:
return None
return self._values['device_reference'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def device_address(self):
if self._values['managed']:
return None
return self._values['device_address'] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def managed(self):
return None | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def __init__(self, want, have=None):
self.want = want
self.have = have | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1 | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges() | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def exec_module(self):
start = datetime.now().isoformat()
version = bigiq_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def present(self):
if self.exists():
return False
return self.create() | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def remove(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
# Artificial sleeping to wait for remote licensing (on BIG-IP) to complete
#
# This should be something that BIG-IQ can do natively in 6.1-ish time.
time.sleep(60)
return True | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def create_on_device(self):
params = self.changes.api_params()
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses/{2}/offerings/{3}/members/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.pool_id,
self.want.key
)
if not self.want.managed:
params['username'] = self.want.device_username
params['password'] = self.want.device_password
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content) | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def absent(self):
if self.exists():
return self.remove()
return False | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
pool=dict(required=True),
key=dict(required=True, no_log=True),
device=dict(required=True),
managed=dict(type='bool'),
device_port=dict(type='int', default=443),
device_username=dict(no_log=True),
device_password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['key', 'managed']],
['managed', False, ['device', 'device_username', 'device_password']],
['managed', True, ['device']]
] | F5Networks/f5-ansible-modules | [
357,
221,
357,
48,
1448045671
] |
def find_version(filename):
_version_re = re.compile(r"__version__ = '(.*)'")
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1) | tgbugs/pyontutils | [
15,
123,
15,
42,
1455242132
] |
def __init__(self, pool_size=2, strides=None, padding='valid'):
if strides is None:
strides = pool_size
assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.pool_length = pool_size
self.stride = strides
self.border_mode = padding | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=2, strides=None, padding='valid'):
super(LW_MaxPooling1D, self).__init__(pool_size, strides, padding) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=2, strides=None, padding='valid'):
super(LW_AveragePooling1D, self).__init__(pool_size, strides, padding) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
if data_format == 'default':
data_format = default_data_format
assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
self.pool_size = tuple(pool_size)
if strides is None:
strides = self.pool_size
self.strides = tuple(strides)
assert padding in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = padding
self.dim_ordering = data_format | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
super(LW_MaxPooling2D, self).__init__(pool_size, strides, padding, data_format) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2), strides=None, padding='valid', data_format='default'):
super(LW_AveragePooling2D, self).__init__(pool_size, strides, padding, data_format) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = default_data_format
assert dim_ordering in {'channels_last', 'channels_first'}, 'data_format must be in {channels_last, channels_first}'
self.pool_size = tuple(pool_size)
if strides is None:
strides = self.pool_size
self.strides = tuple(strides)
assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
self.border_mode = border_mode
self.dim_ordering = dim_ordering | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
super(LW_MaxPooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, pool_size=(2, 2, 2), strides=None, border_mode='valid', dim_ordering='default'):
super(LW_AveragePooling3D, self).__init__(pool_size, strides, border_mode, dim_ordering) | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self):
pass | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, data_format='default'):
if data_format == 'default':
data_format = default_data_format
self.dim_ordering = data_format | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def __init__(self, data_format='default'):
if data_format == 'default':
data_format = default_data_format
self.dim_ordering = data_format | SummaLabs/DLS | [
30,
19,
30,
4,
1501015843
] |
def fetch_queue_lengths(queue_names):
"""Connect to Redis server and request queue lengths. | alphagov/govuk-puppet | [
124,
43,
124,
14,
1449848184
] |
def configure_callback(conf):
"""Receive configuration block"""
global REDIS_HOST, REDIS_PORT, VERBOSE_LOGGING, QUEUE_NAMES
for node in conf.children:
if node.key == 'Host':
REDIS_HOST = node.values[0]
elif node.key == 'Port':
REDIS_PORT = int(node.values[0])
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
elif node.key == 'Queues':
QUEUE_NAMES = list(node.values)
else:
collectd.warning('redis_queues plugin: Unknown config key: %s.'
% node.key)
log_verbose('Configured with host=%s, port=%s' % (REDIS_HOST, REDIS_PORT))
for queue in QUEUE_NAMES:
log_verbose('Watching queue %s' % queue)
if not QUEUE_NAMES:
log_verbose('Not watching any queues') | alphagov/govuk-puppet | [
124,
43,
124,
14,
1449848184
] |
def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('redis plugin [verbose]: %s' % msg) | alphagov/govuk-puppet | [
124,
43,
124,
14,
1449848184
] |
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
history = context.browser.find_element_by_id("HistoryPopup")
entries = history.find_elements_by_xpath('.//li[not(@data-clone-template)]')
assert len(entries) > 0, "There are no entries in the history"
item = entries[0]
item.find_elements_by_xpath('.//*[@data-share-item]')[0].click() | jsargiot/restman | [
56,
12,
56,
4,
1399144829
] |
def step_impl(context, url):
# Wait for modal to appear
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ShareRequestForm')))
output = context.browser.execute_script("return restman.ui.editors.get('#ShareRequestEditor').getValue();")
snippet = json.loads(output)
assert url == snippet["url"], "URL: \"{}\" not in output.\nOutput: {}".format(value, output)
for row in context.table:
assert row['key'] in snippet['headers'], "Header {} is not in output".format(row['key'])
assert row['value'] == snippet['headers'][row['key']], "Header value is not correct. Expected: {}; Actual: {}".format(value, snippet['headers'][name]) | jsargiot/restman | [
56,
12,
56,
4,
1399144829
] |
def step_impl(context):
context.execute_steps(u'''
given I open History dialog
''')
# Click on import
context.browser.find_element_by_id('ImportHistory').click()
WebDriverWait(context.browser, 10).until(
expected_conditions.visibility_of_element_located(
(By.ID, 'ImportRequestForm'))) | jsargiot/restman | [
56,
12,
56,
4,
1399144829
] |
def step_impl(context, url):
req = json.dumps({
"method": "POST",
"url": url,
"headers": {
"Content-Type": "application/json",
"X-Test-Header": "shared_request"
},
"body": {
"type": "form",
"content": {
"SomeKey": "SomeValue11233",
"SomeOtherKey": "SomeOtherValue019",
}
}
})
context.browser.execute_script("return restman.ui.editors.setValue('#ImportRequestEditor', atob('{}'));".format(base64.b64encode(req))) | jsargiot/restman | [
56,
12,
56,
4,
1399144829
] |
def __init__(self):
self.events_count = 0
self.events_count_by_type = dict() | baroquehq/baroque | [
5,
2,
5,
1,
1490877723
] |
def count_all(self):
"""Tells how many events have been counted globally
Returns:
int
"""
return self.events_count | baroquehq/baroque | [
5,
2,
5,
1,
1490877723
] |
def __init__(self, uri=None, path=None, host=None):
"""
ContributorOrcid - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'uri': 'str',
'path': 'str',
'host': 'str'
}
self.attribute_map = {
'uri': 'uri',
'path': 'path',
'host': 'host'
}
self._uri = uri
self._path = path
self._host = host | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def uri(self):
"""
Gets the uri of this ContributorOrcid.
:return: The uri of this ContributorOrcid.
:rtype: str
"""
return self._uri | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def uri(self, uri):
"""
Sets the uri of this ContributorOrcid.
:param uri: The uri of this ContributorOrcid.
:type: str
"""
self._uri = uri | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def path(self):
"""
Gets the path of this ContributorOrcid.
:return: The path of this ContributorOrcid.
:rtype: str
"""
return self._path | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def path(self, path):
"""
Sets the path of this ContributorOrcid.
:param path: The path of this ContributorOrcid.
:type: str
"""
self._path = path | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def host(self):
"""
Gets the host of this ContributorOrcid.
:return: The host of this ContributorOrcid.
:rtype: str
"""
return self._host | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def host(self, host):
"""
Sets the host of this ContributorOrcid.
:param host: The host of this ContributorOrcid.
:type: str
"""
self._host = host | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict()) | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ContributorOrcid):
return False
return self.__dict__ == other.__dict__ | Royal-Society-of-New-Zealand/NZ-ORCID-Hub | [
13,
7,
13,
28,
1486087622
] |
def test_correct_login(self):
# Ensure login behaves correctly with correct credentials.
with self.client:
response = self.client.post(
"/login",
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True,
)
self.assertIn(b"Welcome", response.data)
self.assertIn(b"Logout", response.data)
self.assertIn(b"Members", response.data)
self.assertTrue(current_user.email == "ad@min.com")
self.assertTrue(current_user.is_active())
self.assertEqual(response.status_code, 200) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def test_logout_route_requires_login(self):
# Ensure logout route requres logged in user.
response = self.client.get("/logout", follow_redirects=True)
self.assertIn(b"Please log in to access this page", response.data) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def test_validate_success_login_form(self):
# Ensure correct data validates.
form = LoginForm(email="ad@min.com", password="admin_user")
self.assertTrue(form.validate()) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def test_get_by_id(self):
# Ensure id is correct for the current/logged in user.
with self.client:
self.client.post(
"/login",
data=dict(email="ad@min.com", password="admin_user"),
follow_redirects=True,
)
self.assertTrue(current_user.id == 1) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def test_check_password(self):
# Ensure given password is correct after unhashing.
user = User.query.filter_by(email="ad@min.com").first()
self.assertTrue(
bcrypt.check_password_hash(user.password, "admin_user")
)
self.assertFalse(bcrypt.check_password_hash(user.password, "foobar")) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def test_register_route(self):
# Ensure about route behaves correctly.
response = self.client.get("/register", follow_redirects=True)
self.assertIn(b"<h1>Register</h1>\n", response.data) | realpython/flask-skeleton | [
424,
136,
424,
2,
1421857705
] |
def filename(self):
pass | lasote/conan | [
2,
3,
2,
2,
1448989002
] |
def test___doc__(self):
self.assertEqual(
ctds.Parameter.__doc__,
'''\ | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def test_parameter(self):
param1 = ctds.Parameter(b'123', output=True)
self.assertEqual(param1.value, b'123')
self.assertTrue(isinstance(param1, ctds.Parameter))
param2 = ctds.Parameter(b'123')
self.assertEqual(param1.value, b'123')
self.assertEqual(type(param1), type(param2))
self.assertTrue(isinstance(param2, ctds.Parameter)) | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def _test__cmp__(self, __cmp__, expected, oper):
cases = (
(ctds.Parameter(b'1234'), ctds.Parameter(b'123')),
(ctds.Parameter(b'123'), ctds.Parameter(b'123')),
(ctds.Parameter(b'123'), ctds.Parameter(b'123', output=True)),
(ctds.Parameter(b'123'), ctds.Parameter(b'1234')),
(ctds.Parameter(b'123'), b'123'),
(ctds.Parameter(b'123'), ctds.Parameter(123)),
(ctds.Parameter(b'123'), unicode_('123')),
(ctds.Parameter(b'123'), ctds.SqlBinary(None)),
(ctds.Parameter(b'123'), 123),
(ctds.Parameter(b'123'), None),
)
for index, args in enumerate(cases):
operation = '[{0}]: {1} {2} {3}'.format(index, repr(args[0]), oper, repr(args[1]))
if expected[index] == TypeError:
try:
__cmp__(*args)
except TypeError as ex:
regex = (
r"'{0}' not supported between instances of '[^']+' and '[^']+'".format(oper)
if not PY3 or PY36
else
r'unorderable types: \S+ {0} \S+'.format(oper)
)
self.assertTrue(re.match(regex, str(ex)), ex)
else:
self.fail('{0} did not fail as expected'.format(operation)) # pragma: nocover
else:
self.assertEqual(__cmp__(*args), expected[index], operation) | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def test___cmp__ne(self):
self._test__cmp__(
lambda left, right: left != right,
(
True,
False,
False,
True,
False,
True,
PY3,
True,
True,
True,
),
'!='
) | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def test___cmp__le(self):
self._test__cmp__(
lambda left, right: left <= right,
(
False,
True,
True,
True,
True,
TypeError if PY3 else False,
TypeError if PY3 else True,
TypeError if PY3 else False,
TypeError if PY3 else False,
TypeError if PY3 else False,
),
'<='
) | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def test___cmp__ge(self):
self._test__cmp__(
lambda left, right: left >= right,
(
True,
True,
True,
False,
True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
TypeError if PY3 else True,
),
'>='
) | zillow/ctds | [
79,
13,
79,
21,
1457558644
] |
def __init__(self, c_data: _C_MultiHMatrix, **params):
# Users should use one of the two constructors below.
self.c_data = c_data
self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data))
self.size = self.lib.nbhmats(c_data)
self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
self.hmatrices = []
for l in range(0,self.size):
c_data_hmatrix = self.lib.getHMatrix(self.c_data,l)
self.hmatrices.append(HMatrix(c_data_hmatrix,**params))
self.params = params.copy() | PierreMarchand20/htool | [
18,
4,
18,
2,
1483540397
] |
def from_coefs(cls, getcoefs, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
getcoefs: Callable
A function evaluating an array of matrices at given coordinates.
points_target: np.ndarray of shape (N, 3)
The coordinates of the target points. If points_source=None, also the coordinates of the target points
points_source: np.ndarray of shape (N, 3)
If not None; the coordinates of the source points.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
MultiHMatrix or ComplexMultiHMatrix
"""
# Set params.
cls._set_building_params(**params) | PierreMarchand20/htool | [
18,
4,
18,
2,
1483540397
] |
def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
points: np.ndarray of shape (N, 3)
The coordinates of the points.
getsubmatrix: Callable
A function evaluating the matrix in a given range.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
HMatrix or ComplexHMatrix
"""
# Set params.
cls._set_building_params(**params)
# Boilerplate code for Python/C++ interface.
_getsumatrix_func_type = ctypes.CFUNCTYPE(
None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)
)
if points_source is None:
cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
else:
cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
return cls(c_data, **params) | PierreMarchand20/htool | [
18,
4,
18,
2,
1483540397
] |
def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None):
"""Put the parameters in the C++ backend."""
if epsilon is not None:
cls.lib.setepsilon.restype = None
cls.lib.setepsilon.argtypes = [ ctypes.c_double ]
cls.lib.setepsilon(epsilon)
if eta is not None:
cls.lib.seteta.restype = None
cls.lib.seteta.argtypes = [ ctypes.c_double ]
cls.lib.seteta(eta)
if minclustersize is not None:
cls.lib.setminclustersize.restype = None
cls.lib.setminclustersize.argtypes = [ ctypes.c_int ]
cls.lib.setminclustersize(minclustersize)
if maxblocksize is not None:
cls.lib.setmaxblocksize.restype = None
cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ]
cls.lib.setmaxblocksize(maxblocksize) | PierreMarchand20/htool | [
18,
4,
18,
2,
1483540397
] |
def __getitem__(self, key):
# self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
# self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
# c_data_hmatrix = self.lib.getHMatrix(self.c_data,key)
# return HMatrix(c_data_hmatrix,**self.params)
return self.hmatrices[key] | PierreMarchand20/htool | [
18,
4,
18,
2,
1483540397
] |
def f():
x = `1` # 4 str | kmod/icbd | [
12,
5,
12,
1,
1397488176
] |
def get_bug_info(self, repository, bug_id):
"""Get the information for the specified bug.
This should return a dictionary with 'summary', 'description', and
'status' keys.
This is cached for 60 seconds to reduce the number of queries to the
bug trackers and make things seem fast after the first infobox load,
but is still a short enough time to give relatively fresh data.
"""
return cache_memoize(self.make_bug_cache_key(repository, bug_id),
lambda: self.get_bug_info_uncached(repository,
bug_id),
expiration=60) | reviewboard/reviewboard | [
1464,
419,
1464,
1,
1250977189
] |
def getParentAndBase(path):
match = PREFIX.match(path)
if match is None:
if path.endswith('/'):
stripped_path = path[:-1]
else:
stripped_path = path
base = FNAME_MATCH.search(stripped_path)
if base is None:
raise ValueError('Invalid path')
parent = FNAME_MATCH.sub('', stripped_path)
return parent, base.group(1)
else:
prefix, leading_slash, uri = match.groups()
parts = uri.split('/')
parent_path = '/'.join(parts[:-1])
if leading_slash is not None:
parent_path = '{prefix}/{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
else:
parent_path = '{prefix}{uri}'.format(prefix=prefix, uri='/'.join(parts[:-1]))
return parent_path, parts[-1] | algorithmiaio/algorithmia-python | [
138,
40,
138,
9,
1436379917
] |
def md5_for_file(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return str(hash_md5.hexdigest()) | algorithmiaio/algorithmia-python | [
138,
40,
138,
9,
1436379917
] |
def setUp(self):
self.latitude = 32.074322
self.longitude = 34.792081
self.radius_meters = 100
self.number_of_vertices = 36
self.polycircle = \
polycircles.Polycircle(latitude=self.latitude,
longitude=self.longitude,
radius=self.radius_meters,
number_of_vertices=self.number_of_vertices) | adamatan/polycircles | [
13,
7,
13,
8,
1397365713
] |
def test_lon_lat_output(self):
"""Asserts that the vertices in the lat-lon output are in the
right order (lat before long)."""
for vertex in self.polycircle.to_lon_lat():
assert_almost_equal(vertex[0], self.longitude, places=2)
assert_almost_equal(vertex[1], self.latitude, places=2) | adamatan/polycircles | [
13,
7,
13,
8,
1397365713
] |
def test_kml_equals_lon_lat(self):
"""Asserts that the return value of to_kml() property is identical to
the return value of to_lon_lat()."""
assert_equal(self.polycircle.to_kml(), self.polycircle.to_lon_lat()) | adamatan/polycircles | [
13,
7,
13,
8,
1397365713
] |
def naturaltime(seconds: float, future=False) -> str:
assert future
if seconds < 120:
return "in {} second{}".format(
int(seconds), "s" if seconds >= 2 else ""
)
minutes = seconds / 60
if minutes < 120:
return "in {} minute{}".format(
int(minutes), "s" if minutes >= 2 else ""
)
hours = minutes / 60
if hours < 48:
return "in {} hour{}".format(int(hours), "s" if hours >= 2 else "")
days = hours / 24
return "in {} day{}".format(int(days), "s" if days >= 2 else "") | gjcarneiro/yacron | [
400,
38,
400,
9,
1500314642
] |
def next_sleep_interval() -> float:
now = get_now(datetime.timezone.utc)
target = now.replace(second=0) + WAKEUP_INTERVAL
return (target - now).total_seconds() | gjcarneiro/yacron | [
400,
38,
400,
9,
1500314642
] |
def web_site_from_url(runner: web.AppRunner, url: str) -> web.BaseSite:
parsed = urlparse(url)
if parsed.scheme == "http":
assert parsed.hostname is not None
assert parsed.port is not None
return web.TCPSite(runner, parsed.hostname, parsed.port)
elif parsed.scheme == "unix":
return web.UnixSite(runner, parsed.path)
else:
logger.warning(
"Ignoring web listen url %s: scheme %r not supported",
url,
parsed.scheme,
)
raise ValueError(url) | gjcarneiro/yacron | [
400,
38,
400,
9,
1500314642
] |
def __init__(
self, config_arg: Optional[str], *, config_yaml: Optional[str] = None | gjcarneiro/yacron | [
400,
38,
400,
9,
1500314642
] |