text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
texts
sequence
scores
sequence
num_lines
int64
3
2.77k
avg_score
float64
0
0.37
def skip_cycles(self) -> int: """The number of cycles dedicated to skips.""" return sum((int(re.sub(r'\D', '', op)) for op in self.skip_tokens))
[ "def", "skip_cycles", "(", "self", ")", "->", "int", ":", "return", "sum", "(", "(", "int", "(", "re", ".", "sub", "(", "r'\\D'", ",", "''", ",", "op", ")", ")", "for", "op", "in", "self", ".", "skip_tokens", ")", ")" ]
52.666667
0.0125
[ "def skip_cycles(self) -> int:\n", " \"\"\"The number of cycles dedicated to skips.\"\"\"\n", " return sum((int(re.sub(r'\\D', '', op)) for op in self.skip_tokens))" ]
[ 0, 0.01818181818181818, 0.013333333333333334 ]
3
0.010505
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
[ "def", "block17", "(", "net", ",", "scale", "=", "1.0", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "scope", "=", "None", ",", "reuse", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'Block17'", ",", "[", "net", "]", ",", "reuse", "=", "reuse", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "tower_conv", "=", "slim", ".", "conv2d", "(", "net", ",", "192", ",", "1", ",", "scope", "=", "'Conv2d_1x1'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "tower_conv1_0", "=", "slim", ".", "conv2d", "(", "net", ",", "128", ",", "1", ",", "scope", "=", "'Conv2d_0a_1x1'", ")", "tower_conv1_1", "=", "slim", ".", "conv2d", "(", "tower_conv1_0", ",", "160", ",", "[", "1", ",", "7", "]", ",", "scope", "=", "'Conv2d_0b_1x7'", ")", "tower_conv1_2", "=", "slim", ".", "conv2d", "(", "tower_conv1_1", ",", "192", ",", "[", "7", ",", "1", "]", ",", "scope", "=", "'Conv2d_0c_7x1'", ")", "mixed", "=", "tf", ".", "concat", "(", "axis", "=", "3", ",", "values", "=", "[", "tower_conv", ",", "tower_conv1_2", "]", ")", "up", "=", "slim", ".", "conv2d", "(", "mixed", ",", "net", ".", "get_shape", "(", ")", "[", "3", "]", ",", "1", ",", "normalizer_fn", "=", "None", ",", "activation_fn", "=", "None", ",", "scope", "=", "'Conv2d_1x1'", ")", "net", "+=", "scale", "*", "up", "if", "activation_fn", ":", "net", "=", "activation_fn", "(", "net", ")", "return", "net" ]
50.111111
0.009793
[ "def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):\n", " \"\"\"Builds the 17x17 resnet block.\"\"\"\n", " with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):\n", " with tf.variable_scope('Branch_0'):\n", " tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')\n", " with tf.variable_scope('Branch_1'):\n", " tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')\n", " tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],\n", " scope='Conv2d_0b_1x7')\n", " tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],\n", " scope='Conv2d_0c_7x1')\n", " mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])\n", " up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,\n", " activation_fn=None, scope='Conv2d_1x1')\n", " net += scale * up\n", " if activation_fn:\n", " net = activation_fn(net)\n", " return net" ]
[ 0, 0.02564102564102564, 0.015625, 0, 0.015625, 0, 0.014285714285714285, 0.016129032258064516, 0, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0.16666666666666666 ]
18
0.016798
def lognormcdf(x, mu, tau): """Log-normal cumulative density function""" x = np.atleast_1d(x) return np.array( [0.5 * (1 - flib.derf(-(np.sqrt(tau / 2)) * (np.log(y) - mu))) for y in x])
[ "def", "lognormcdf", "(", "x", ",", "mu", ",", "tau", ")", ":", "x", "=", "np", ".", "atleast_1d", "(", "x", ")", "return", "np", ".", "array", "(", "[", "0.5", "*", "(", "1", "-", "flib", ".", "derf", "(", "-", "(", "np", ".", "sqrt", "(", "tau", "/", "2", ")", ")", "*", "(", "np", ".", "log", "(", "y", ")", "-", "mu", ")", ")", ")", "for", "y", "in", "x", "]", ")" ]
40.4
0.009709
[ "def lognormcdf(x, mu, tau):\n", " \"\"\"Log-normal cumulative density function\"\"\"\n", " x = np.atleast_1d(x)\n", " return np.array(\n", " [0.5 * (1 - flib.derf(-(np.sqrt(tau / 2)) * (np.log(y) - mu))) for y in x])" ]
[ 0, 0, 0, 0, 0.024096385542168676 ]
5
0.004819
def magfit(logfile): '''find best magnetometer offset fit to a log file''' print("Processing log %s" % filename) mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps) flying = False gps_heading = 0.0 data = [] # get the current mag offsets m = mlog.recv_match(type='SENSOR_OFFSETS',condition=args.condition) offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z) attitude = mlog.recv_match(type='ATTITUDE',condition=args.condition) # now gather all the data while True: m = mlog.recv_match(condition=args.condition) if m is None: break if m.get_type() == "GPS_RAW": # flying if groundspeed more than 5 m/s flying = (m.v > args.minspeed and m.fix_type == 2) gps_heading = m.hdg if m.get_type() == "GPS_RAW_INT": # flying if groundspeed more than 5 m/s flying = (m.vel/100 > args.minspeed and m.fix_type == 3) gps_heading = m.cog/100 if m.get_type() == "ATTITUDE": attitude = m if m.get_type() == "SENSOR_OFFSETS": # update current offsets offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z) if not flying: continue if m.get_type() == "RAW_IMU": data.append((m.xmag - offsets.x, m.ymag - offsets.y, m.zmag - offsets.z, attitude.roll, attitude.pitch, gps_heading)) print("Extracted %u data points" % len(data)) print("Current offsets: %s" % offsets) ofs2 = fit_data(data) print("Declination estimate: %.1f" % ofs2[-1]) new_offsets = vec3(ofs2[0], ofs2[1], ofs2[2]) a = [[ofs2[3], ofs2[4], ofs2[5]], [ofs2[6], ofs2[7], ofs2[8]], [ofs2[9], ofs2[10], ofs2[11]]] print(a) print("New offsets : %s" % new_offsets)
[ "def", "magfit", "(", "logfile", ")", ":", "print", "(", "\"Processing log %s\"", "%", "filename", ")", "mlog", "=", "mavutil", ".", "mavlink_connection", "(", "filename", ",", "notimestamps", "=", "args", ".", "notimestamps", ")", "flying", "=", "False", "gps_heading", "=", "0.0", "data", "=", "[", "]", "# get the current mag offsets", "m", "=", "mlog", ".", "recv_match", "(", "type", "=", "'SENSOR_OFFSETS'", ",", "condition", "=", "args", ".", "condition", ")", "offsets", "=", "vec3", "(", "m", ".", "mag_ofs_x", ",", "m", ".", "mag_ofs_y", ",", "m", ".", "mag_ofs_z", ")", "attitude", "=", "mlog", ".", "recv_match", "(", "type", "=", "'ATTITUDE'", ",", "condition", "=", "args", ".", "condition", ")", "# now gather all the data", "while", "True", ":", "m", "=", "mlog", ".", "recv_match", "(", "condition", "=", "args", ".", "condition", ")", "if", "m", "is", "None", ":", "break", "if", "m", ".", "get_type", "(", ")", "==", "\"GPS_RAW\"", ":", "# flying if groundspeed more than 5 m/s", "flying", "=", "(", "m", ".", "v", ">", "args", ".", "minspeed", "and", "m", ".", "fix_type", "==", "2", ")", "gps_heading", "=", "m", ".", "hdg", "if", "m", ".", "get_type", "(", ")", "==", "\"GPS_RAW_INT\"", ":", "# flying if groundspeed more than 5 m/s", "flying", "=", "(", "m", ".", "vel", "/", "100", ">", "args", ".", "minspeed", "and", "m", ".", "fix_type", "==", "3", ")", "gps_heading", "=", "m", ".", "cog", "/", "100", "if", "m", ".", "get_type", "(", ")", "==", "\"ATTITUDE\"", ":", "attitude", "=", "m", "if", "m", ".", "get_type", "(", ")", "==", "\"SENSOR_OFFSETS\"", ":", "# update current offsets", "offsets", "=", "vec3", "(", "m", ".", "mag_ofs_x", ",", "m", ".", "mag_ofs_y", ",", "m", ".", "mag_ofs_z", ")", "if", "not", "flying", ":", "continue", "if", "m", ".", "get_type", "(", ")", "==", "\"RAW_IMU\"", ":", "data", ".", "append", "(", "(", "m", ".", "xmag", "-", "offsets", ".", "x", ",", "m", ".", "ymag", "-", "offsets", ".", "y", ",", "m", ".", "zmag", "-", "offsets", ".", "z", ",", "attitude", ".", "roll", ",", "attitude", ".", "pitch", ",", "gps_heading", ")", ")", "print", "(", "\"Extracted %u data points\"", "%", "len", "(", "data", ")", ")", "print", "(", "\"Current offsets: %s\"", "%", "offsets", ")", "ofs2", "=", "fit_data", "(", "data", ")", "print", "(", "\"Declination estimate: %.1f\"", "%", "ofs2", "[", "-", "1", "]", ")", "new_offsets", "=", "vec3", "(", "ofs2", "[", "0", "]", ",", "ofs2", "[", "1", "]", ",", "ofs2", "[", "2", "]", ")", "a", "=", "[", "[", "ofs2", "[", "3", "]", ",", "ofs2", "[", "4", "]", ",", "ofs2", "[", "5", "]", "]", ",", "[", "ofs2", "[", "6", "]", ",", "ofs2", "[", "7", "]", ",", "ofs2", "[", "8", "]", "]", ",", "[", "ofs2", "[", "9", "]", ",", "ofs2", "[", "10", "]", ",", "ofs2", "[", "11", "]", "]", "]", "print", "(", "a", ")", "print", "(", "\"New offsets : %s\"", "%", "new_offsets", ")" ]
37.375
0.002173
[ "def magfit(logfile):\n", " '''find best magnetometer offset fit to a log file'''\n", " print(\"Processing log %s\" % filename)\n", " mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)\n", "\n", " flying = False\n", " gps_heading = 0.0\n", "\n", " data = []\n", "\n", " # get the current mag offsets\n", " m = mlog.recv_match(type='SENSOR_OFFSETS',condition=args.condition)\n", " offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)\n", "\n", " attitude = mlog.recv_match(type='ATTITUDE',condition=args.condition)\n", "\n", " # now gather all the data\n", " while True:\n", " m = mlog.recv_match(condition=args.condition)\n", " if m is None:\n", " break\n", " if m.get_type() == \"GPS_RAW\":\n", " # flying if groundspeed more than 5 m/s\n", " flying = (m.v > args.minspeed and m.fix_type == 2)\n", " gps_heading = m.hdg\n", " if m.get_type() == \"GPS_RAW_INT\":\n", " # flying if groundspeed more than 5 m/s\n", " flying = (m.vel/100 > args.minspeed and m.fix_type == 3)\n", " gps_heading = m.cog/100\n", " if m.get_type() == \"ATTITUDE\":\n", " attitude = m\n", " if m.get_type() == \"SENSOR_OFFSETS\":\n", " # update current offsets\n", " offsets = vec3(m.mag_ofs_x, m.mag_ofs_y, m.mag_ofs_z)\n", " if not flying:\n", " continue\n", " if m.get_type() == \"RAW_IMU\":\n", " data.append((m.xmag - offsets.x, m.ymag - offsets.y, m.zmag - offsets.z, attitude.roll, attitude.pitch, gps_heading))\n", " print(\"Extracted %u data points\" % len(data))\n", " print(\"Current offsets: %s\" % offsets)\n", " ofs2 = fit_data(data)\n", " print(\"Declination estimate: %.1f\" % ofs2[-1])\n", " new_offsets = vec3(ofs2[0], ofs2[1], ofs2[2])\n", " a = [[ofs2[3], ofs2[4], ofs2[5]],\n", " [ofs2[6], ofs2[7], ofs2[8]],\n", " [ofs2[9], ofs2[10], ofs2[11]]]\n", " print(a)\n", " print(\"New offsets : %s\" % new_offsets)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013888888888888888, 0, 0, 0.0136986301369863, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.007692307692307693, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608 ]
48
0.001188
def get_metric(self, timestamp): """Get a metric including all current time series. Get a :class:`opencensus.metrics.export.metric.Metric` with one :class:`opencensus.metrics.export.time_series.TimeSeries` for each set of label values with a recorded measurement. Each `TimeSeries` has a single point that represents the last recorded value. :type timestamp: :class:`datetime.datetime` :param timestamp: Recording time to report, usually the current time. :rtype: :class:`opencensus.metrics.export.metric.Metric` or None :return: A converted metric for all current measurements. """ if not self.points: return None with self._points_lock: ts_list = get_timeseries_list(self.points, timestamp) return metric.Metric(self.descriptor, ts_list)
[ "def", "get_metric", "(", "self", ",", "timestamp", ")", ":", "if", "not", "self", ".", "points", ":", "return", "None", "with", "self", ".", "_points_lock", ":", "ts_list", "=", "get_timeseries_list", "(", "self", ".", "points", ",", "timestamp", ")", "return", "metric", ".", "Metric", "(", "self", ".", "descriptor", ",", "ts_list", ")" ]
42.6
0.002296
[ "def get_metric(self, timestamp):\n", " \"\"\"Get a metric including all current time series.\n", "\n", " Get a :class:`opencensus.metrics.export.metric.Metric` with one\n", " :class:`opencensus.metrics.export.time_series.TimeSeries` for each\n", " set of label values with a recorded measurement. Each `TimeSeries`\n", " has a single point that represents the last recorded value.\n", "\n", " :type timestamp: :class:`datetime.datetime`\n", " :param timestamp: Recording time to report, usually the current time.\n", "\n", " :rtype: :class:`opencensus.metrics.export.metric.Metric` or None\n", " :return: A converted metric for all current measurements.\n", " \"\"\"\n", " if not self.points:\n", " return None\n", "\n", " with self._points_lock:\n", " ts_list = get_timeseries_list(self.points, timestamp)\n", " return metric.Metric(self.descriptor, ts_list)" ]
[ 0, 0.01694915254237288, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517 ]
20
0.001773
def list_job(jid, ext_source=None, display_progress=False): ''' List a specific job given by its jid ext_source If provided, specifies which external job cache to use. display_progress : False If ``True``, fire progress events. .. versionadded:: 2015.8.8 CLI Example: .. code-block:: bash salt-run jobs.list_job 20130916125524463507 salt-run jobs.list_job 20130916125524463507 --out=pprint ''' ret = {'jid': jid} mminion = salt.minion.MasterMinion(__opts__) returner = _get_returner(( __opts__['ext_job_cache'], ext_source, __opts__['master_job_cache'] )) if display_progress: __jid_event__.fire_event( {'message': 'Querying returner: {0}'.format(returner)}, 'progress' ) job = mminion.returners['{0}.get_load'.format(returner)](jid) ret.update(_format_jid_instance(jid, job)) ret['Result'] = mminion.returners['{0}.get_jid'.format(returner)](jid) fstr = '{0}.get_endtime'.format(__opts__['master_job_cache']) if (__opts__.get('job_cache_store_endtime') and fstr in mminion.returners): endtime = mminion.returners[fstr](jid) if endtime: ret['EndTime'] = endtime return ret
[ "def", "list_job", "(", "jid", ",", "ext_source", "=", "None", ",", "display_progress", "=", "False", ")", ":", "ret", "=", "{", "'jid'", ":", "jid", "}", "mminion", "=", "salt", ".", "minion", ".", "MasterMinion", "(", "__opts__", ")", "returner", "=", "_get_returner", "(", "(", "__opts__", "[", "'ext_job_cache'", "]", ",", "ext_source", ",", "__opts__", "[", "'master_job_cache'", "]", ")", ")", "if", "display_progress", ":", "__jid_event__", ".", "fire_event", "(", "{", "'message'", ":", "'Querying returner: {0}'", ".", "format", "(", "returner", ")", "}", ",", "'progress'", ")", "job", "=", "mminion", ".", "returners", "[", "'{0}.get_load'", ".", "format", "(", "returner", ")", "]", "(", "jid", ")", "ret", ".", "update", "(", "_format_jid_instance", "(", "jid", ",", "job", ")", ")", "ret", "[", "'Result'", "]", "=", "mminion", ".", "returners", "[", "'{0}.get_jid'", ".", "format", "(", "returner", ")", "]", "(", "jid", ")", "fstr", "=", "'{0}.get_endtime'", ".", "format", "(", "__opts__", "[", "'master_job_cache'", "]", ")", "if", "(", "__opts__", ".", "get", "(", "'job_cache_store_endtime'", ")", "and", "fstr", "in", "mminion", ".", "returners", ")", ":", "endtime", "=", "mminion", ".", "returners", "[", "fstr", "]", "(", "jid", ")", "if", "endtime", ":", "ret", "[", "'EndTime'", "]", "=", "endtime", "return", "ret" ]
28.5
0.000771
[ "def list_job(jid, ext_source=None, display_progress=False):\n", " '''\n", " List a specific job given by its jid\n", "\n", " ext_source\n", " If provided, specifies which external job cache to use.\n", "\n", " display_progress : False\n", " If ``True``, fire progress events.\n", "\n", " .. versionadded:: 2015.8.8\n", "\n", " CLI Example:\n", "\n", " .. code-block:: bash\n", "\n", " salt-run jobs.list_job 20130916125524463507\n", " salt-run jobs.list_job 20130916125524463507 --out=pprint\n", " '''\n", " ret = {'jid': jid}\n", " mminion = salt.minion.MasterMinion(__opts__)\n", " returner = _get_returner((\n", " __opts__['ext_job_cache'],\n", " ext_source,\n", " __opts__['master_job_cache']\n", " ))\n", " if display_progress:\n", " __jid_event__.fire_event(\n", " {'message': 'Querying returner: {0}'.format(returner)},\n", " 'progress'\n", " )\n", "\n", " job = mminion.returners['{0}.get_load'.format(returner)](jid)\n", " ret.update(_format_jid_instance(jid, job))\n", " ret['Result'] = mminion.returners['{0}.get_jid'.format(returner)](jid)\n", "\n", " fstr = '{0}.get_endtime'.format(__opts__['master_job_cache'])\n", " if (__opts__.get('job_cache_store_endtime')\n", " and fstr in mminion.returners):\n", " endtime = mminion.returners[fstr](jid)\n", " if endtime:\n", " ret['EndTime'] = endtime\n", "\n", " return ret" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
44
0.001623
def unravel(txt, binding, msgtype="response"): """ Will unpack the received text. Depending on the context the original response may have been transformed before transmission. :param txt: :param binding: :param msgtype: :return: """ # logger.debug("unravel '%s'", txt) if binding not in [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, BINDING_SOAP, BINDING_URI, BINDING_HTTP_ARTIFACT, None]: raise UnknownBinding("Don't know how to handle '%s'" % binding) else: try: if binding == BINDING_HTTP_REDIRECT: xmlstr = decode_base64_and_inflate(txt) elif binding == BINDING_HTTP_POST: xmlstr = base64.b64decode(txt) elif binding == BINDING_SOAP: func = getattr(soap, "parse_soap_enveloped_saml_%s" % msgtype) xmlstr = func(txt) elif binding == BINDING_HTTP_ARTIFACT: xmlstr = base64.b64decode(txt) else: xmlstr = txt except Exception: raise UnravelError("Unravelling binding '%s' failed" % binding) return xmlstr
[ "def", "unravel", "(", "txt", ",", "binding", ",", "msgtype", "=", "\"response\"", ")", ":", "# logger.debug(\"unravel '%s'\", txt)", "if", "binding", "not", "in", "[", "BINDING_HTTP_REDIRECT", ",", "BINDING_HTTP_POST", ",", "BINDING_SOAP", ",", "BINDING_URI", ",", "BINDING_HTTP_ARTIFACT", ",", "None", "]", ":", "raise", "UnknownBinding", "(", "\"Don't know how to handle '%s'\"", "%", "binding", ")", "else", ":", "try", ":", "if", "binding", "==", "BINDING_HTTP_REDIRECT", ":", "xmlstr", "=", "decode_base64_and_inflate", "(", "txt", ")", "elif", "binding", "==", "BINDING_HTTP_POST", ":", "xmlstr", "=", "base64", ".", "b64decode", "(", "txt", ")", "elif", "binding", "==", "BINDING_SOAP", ":", "func", "=", "getattr", "(", "soap", ",", "\"parse_soap_enveloped_saml_%s\"", "%", "msgtype", ")", "xmlstr", "=", "func", "(", "txt", ")", "elif", "binding", "==", "BINDING_HTTP_ARTIFACT", ":", "xmlstr", "=", "base64", ".", "b64decode", "(", "txt", ")", "else", ":", "xmlstr", "=", "txt", "except", "Exception", ":", "raise", "UnravelError", "(", "\"Unravelling binding '%s' failed\"", "%", "binding", ")", "return", "xmlstr" ]
40.90625
0.001493
[ "def unravel(txt, binding, msgtype=\"response\"):\n", " \"\"\"\n", " Will unpack the received text. Depending on the context the original\n", " response may have been transformed before transmission.\n", " :param txt:\n", " :param binding:\n", " :param msgtype:\n", " :return:\n", " \"\"\"\n", " # logger.debug(\"unravel '%s'\", txt)\n", " if binding not in [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,\n", " BINDING_SOAP, BINDING_URI, BINDING_HTTP_ARTIFACT,\n", " None]:\n", " raise UnknownBinding(\"Don't know how to handle '%s'\" % binding)\n", " else:\n", " try:\n", " if binding == BINDING_HTTP_REDIRECT:\n", " xmlstr = decode_base64_and_inflate(txt)\n", " elif binding == BINDING_HTTP_POST:\n", " xmlstr = base64.b64decode(txt)\n", " elif binding == BINDING_SOAP:\n", " func = getattr(soap,\n", " \"parse_soap_enveloped_saml_%s\" % msgtype)\n", " xmlstr = func(txt)\n", " elif binding == BINDING_HTTP_ARTIFACT:\n", " xmlstr = base64.b64decode(txt)\n", " else:\n", " xmlstr = txt\n", " except Exception:\n", " raise UnravelError(\"Unravelling binding '%s' failed\" % binding)\n", "\n", " return xmlstr" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
32
0.004092
def get_upper_triangle(correlation_matrix): ''' Extract upper triangle from a square matrix. Negative values are set to 0. Args: correlation_matrix (pandas df): Correlations between all replicates Returns: upper_tri_df (pandas df): Upper triangle extracted from correlation_matrix; rid is the row index, cid is the column index, corr is the extracted correlation value ''' upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool)) # convert matrix into long form description upper_tri_df = upper_triangle.stack().reset_index(level=1) upper_tri_df.columns = ['rid', 'corr'] # Index at this point is cid, it now becomes a column upper_tri_df.reset_index(level=0, inplace=True) # Get rid of negative values upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0) return upper_tri_df.round(rounding_precision)
[ "def", "get_upper_triangle", "(", "correlation_matrix", ")", ":", "upper_triangle", "=", "correlation_matrix", ".", "where", "(", "np", ".", "triu", "(", "np", ".", "ones", "(", "correlation_matrix", ".", "shape", ")", ",", "k", "=", "1", ")", ".", "astype", "(", "np", ".", "bool", ")", ")", "# convert matrix into long form description", "upper_tri_df", "=", "upper_triangle", ".", "stack", "(", ")", ".", "reset_index", "(", "level", "=", "1", ")", "upper_tri_df", ".", "columns", "=", "[", "'rid'", ",", "'corr'", "]", "# Index at this point is cid, it now becomes a column", "upper_tri_df", ".", "reset_index", "(", "level", "=", "0", ",", "inplace", "=", "True", ")", "# Get rid of negative values", "upper_tri_df", "[", "'corr'", "]", "=", "upper_tri_df", "[", "'corr'", "]", ".", "clip", "(", "lower", "=", "0", ")", "return", "upper_tri_df", ".", "round", "(", "rounding_precision", ")" ]
36.72
0.002123
[ "def get_upper_triangle(correlation_matrix):\n", " ''' Extract upper triangle from a square matrix. Negative values are\n", " set to 0.\n", "\n", " Args:\n", " correlation_matrix (pandas df): Correlations between all replicates\n", "\n", " Returns:\n", " upper_tri_df (pandas df): Upper triangle extracted from\n", " correlation_matrix; rid is the row index, cid is the column index,\n", " corr is the extracted correlation value\n", " '''\n", " upper_triangle = correlation_matrix.where(np.triu(np.ones(correlation_matrix.shape), k=1).astype(np.bool))\n", "\n", " # convert matrix into long form description\n", " upper_tri_df = upper_triangle.stack().reset_index(level=1)\n", " upper_tri_df.columns = ['rid', 'corr']\n", "\n", " # Index at this point is cid, it now becomes a column\n", " upper_tri_df.reset_index(level=0, inplace=True)\n", "\n", " # Get rid of negative values\n", " upper_tri_df['corr'] = upper_tri_df['corr'].clip(lower=0)\n", "\n", " return upper_tri_df.round(rounding_precision)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009009009009009009, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612 ]
25
0.001177
def popen_uci(cls, command: Union[str, List[str]], *, timeout: Optional[float] = 10.0, debug: bool = False, setpgrp: bool = False, **popen_args: Any) -> "SimpleEngine": """ Spawns and initializes an UCI engine. Returns a :class:`~chess.engine.SimpleEngine` instance. """ return cls.popen(UciProtocol, command, timeout=timeout, debug=debug, setpgrp=setpgrp, **popen_args)
[ "def", "popen_uci", "(", "cls", ",", "command", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ",", "*", ",", "timeout", ":", "Optional", "[", "float", "]", "=", "10.0", ",", "debug", ":", "bool", "=", "False", ",", "setpgrp", ":", "bool", "=", "False", ",", "*", "*", "popen_args", ":", "Any", ")", "->", "\"SimpleEngine\"", ":", "return", "cls", ".", "popen", "(", "UciProtocol", ",", "command", ",", "timeout", "=", "timeout", ",", "debug", "=", "debug", ",", "setpgrp", "=", "setpgrp", ",", "*", "*", "popen_args", ")" ]
67.5
0.009756
[ "def popen_uci(cls, command: Union[str, List[str]], *, timeout: Optional[float] = 10.0, debug: bool = False, setpgrp: bool = False, **popen_args: Any) -> \"SimpleEngine\":\n", " \"\"\"\n", " Spawns and initializes an UCI engine.\n", " Returns a :class:`~chess.engine.SimpleEngine` instance.\n", " \"\"\"\n", " return cls.popen(UciProtocol, command, timeout=timeout, debug=debug, setpgrp=setpgrp, **popen_args)" ]
[ 0.005917159763313609, 0.08333333333333333, 0, 0, 0, 0.018691588785046728 ]
6
0.01799
def get_ip_address_info(ip_address, cache=None, nameservers=None, timeout=2.0, parallel=False): """ Returns reverse DNS and country information for the given IP address Args: ip_address (str): The IP address to check cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds parallel (bool): parallel processing Returns: OrderedDict: ``ip_address``, ``reverse_dns`` """ ip_address = ip_address.lower() if cache: info = cache.get(ip_address, None) if info: return info info = OrderedDict() info["ip_address"] = ip_address reverse_dns = get_reverse_dns(ip_address, nameservers=nameservers, timeout=timeout) country = get_ip_address_country(ip_address, parallel=parallel) info["country"] = country info["reverse_dns"] = reverse_dns info["base_domain"] = None if reverse_dns is not None: base_domain = get_base_domain(reverse_dns) info["base_domain"] = base_domain return info
[ "def", "get_ip_address_info", "(", "ip_address", ",", "cache", "=", "None", ",", "nameservers", "=", "None", ",", "timeout", "=", "2.0", ",", "parallel", "=", "False", ")", ":", "ip_address", "=", "ip_address", ".", "lower", "(", ")", "if", "cache", ":", "info", "=", "cache", ".", "get", "(", "ip_address", ",", "None", ")", "if", "info", ":", "return", "info", "info", "=", "OrderedDict", "(", ")", "info", "[", "\"ip_address\"", "]", "=", "ip_address", "reverse_dns", "=", "get_reverse_dns", "(", "ip_address", ",", "nameservers", "=", "nameservers", ",", "timeout", "=", "timeout", ")", "country", "=", "get_ip_address_country", "(", "ip_address", ",", "parallel", "=", "parallel", ")", "info", "[", "\"country\"", "]", "=", "country", "info", "[", "\"reverse_dns\"", "]", "=", "reverse_dns", "info", "[", "\"base_domain\"", "]", "=", "None", "if", "reverse_dns", "is", "not", "None", ":", "base_domain", "=", "get_base_domain", "(", "reverse_dns", ")", "info", "[", "\"base_domain\"", "]", "=", "base_domain", "return", "info" ]
34.222222
0.000789
[ "def get_ip_address_info(ip_address, cache=None, nameservers=None,\n", " timeout=2.0, parallel=False):\n", " \"\"\"\n", " Returns reverse DNS and country information for the given IP address\n", "\n", " Args:\n", " ip_address (str): The IP address to check\n", " cache (ExpiringDict): Cache storage\n", " nameservers (list): A list of one or more nameservers to use\n", " (Cloudflare's public DNS resolvers by default)\n", " timeout (float): Sets the DNS timeout in seconds\n", " parallel (bool): parallel processing\n", "\n", " Returns:\n", " OrderedDict: ``ip_address``, ``reverse_dns``\n", "\n", " \"\"\"\n", " ip_address = ip_address.lower()\n", " if cache:\n", " info = cache.get(ip_address, None)\n", " if info:\n", " return info\n", " info = OrderedDict()\n", " info[\"ip_address\"] = ip_address\n", " reverse_dns = get_reverse_dns(ip_address,\n", " nameservers=nameservers,\n", " timeout=timeout)\n", " country = get_ip_address_country(ip_address, parallel=parallel)\n", " info[\"country\"] = country\n", " info[\"reverse_dns\"] = reverse_dns\n", " info[\"base_domain\"] = None\n", " if reverse_dns is not None:\n", " base_domain = get_base_domain(reverse_dns)\n", " info[\"base_domain\"] = base_domain\n", "\n", " return info" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667 ]
36
0.001852
def zoomTo(self, bbox): 'set visible area to bbox, maintaining aspectRatio if applicable' self.fixPoint(self.plotviewBox.xymin, bbox.xymin) self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)
[ "def", "zoomTo", "(", "self", ",", "bbox", ")", ":", "self", ".", "fixPoint", "(", "self", ".", "plotviewBox", ".", "xymin", ",", "bbox", ".", "xymin", ")", "self", ".", "zoomlevel", "=", "max", "(", "bbox", ".", "w", "/", "self", ".", "canvasBox", ".", "w", ",", "bbox", ".", "h", "/", "self", ".", "canvasBox", ".", "h", ")" ]
57.25
0.012931
[ "def zoomTo(self, bbox):\n", " 'set visible area to bbox, maintaining aspectRatio if applicable'\n", " self.fixPoint(self.plotviewBox.xymin, bbox.xymin)\n", " self.zoomlevel=max(bbox.w/self.canvasBox.w, bbox.h/self.canvasBox.h)" ]
[ 0, 0.013513513513513514, 0, 0.02631578947368421 ]
4
0.009957
def transcribe(records, transcribe): """ Perform transcription or back-transcription. transcribe must be one of the following: dna2rna rna2dna """ logging.info('Applying _transcribe generator: ' 'operation to perform is ' + transcribe + '.') for record in records: sequence = str(record.seq) description = record.description name = record.id if transcribe == 'dna2rna': dna = Seq(sequence, IUPAC.ambiguous_dna) rna = dna.transcribe() yield SeqRecord(rna, id=name, description=description) elif transcribe == 'rna2dna': rna = Seq(sequence, IUPAC.ambiguous_rna) dna = rna.back_transcribe() yield SeqRecord(dna, id=name, description=description)
[ "def", "transcribe", "(", "records", ",", "transcribe", ")", ":", "logging", ".", "info", "(", "'Applying _transcribe generator: '", "'operation to perform is '", "+", "transcribe", "+", "'.'", ")", "for", "record", "in", "records", ":", "sequence", "=", "str", "(", "record", ".", "seq", ")", "description", "=", "record", ".", "description", "name", "=", "record", ".", "id", "if", "transcribe", "==", "'dna2rna'", ":", "dna", "=", "Seq", "(", "sequence", ",", "IUPAC", ".", "ambiguous_dna", ")", "rna", "=", "dna", ".", "transcribe", "(", ")", "yield", "SeqRecord", "(", "rna", ",", "id", "=", "name", ",", "description", "=", "description", ")", "elif", "transcribe", "==", "'rna2dna'", ":", "rna", "=", "Seq", "(", "sequence", ",", "IUPAC", ".", "ambiguous_rna", ")", "dna", "=", "rna", ".", "back_transcribe", "(", ")", "yield", "SeqRecord", "(", "dna", ",", "id", "=", "name", ",", "description", "=", "description", ")" ]
37.666667
0.001233
[ "def transcribe(records, transcribe):\n", " \"\"\"\n", " Perform transcription or back-transcription.\n", " transcribe must be one of the following:\n", " dna2rna\n", " rna2dna\n", " \"\"\"\n", " logging.info('Applying _transcribe generator: '\n", " 'operation to perform is ' + transcribe + '.')\n", " for record in records:\n", " sequence = str(record.seq)\n", " description = record.description\n", " name = record.id\n", " if transcribe == 'dna2rna':\n", " dna = Seq(sequence, IUPAC.ambiguous_dna)\n", " rna = dna.transcribe()\n", " yield SeqRecord(rna, id=name, description=description)\n", " elif transcribe == 'rna2dna':\n", " rna = Seq(sequence, IUPAC.ambiguous_rna)\n", " dna = rna.back_transcribe()\n", " yield SeqRecord(dna, id=name, description=description)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015151515151515152 ]
21
0.000722
def deleteSNPs(setName) : """deletes a set of polymorphisms""" con = conf.db try : SMaster = SNPMaster(setName = setName) con.beginTransaction() SNPType = SMaster.SNPType con.delete(SNPType, 'setName = ?', (setName,)) SMaster.delete() con.endTransaction() except KeyError : raise KeyError("Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name" % setName) #~ printf("can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name" % setName) return False return True
[ "def", "deleteSNPs", "(", "setName", ")", ":", "con", "=", "conf", ".", "db", "try", ":", "SMaster", "=", "SNPMaster", "(", "setName", "=", "setName", ")", "con", ".", "beginTransaction", "(", ")", "SNPType", "=", "SMaster", ".", "SNPType", "con", ".", "delete", "(", "SNPType", ",", "'setName = ?'", ",", "(", "setName", ",", ")", ")", "SMaster", ".", "delete", "(", ")", "con", ".", "endTransaction", "(", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name\"", "%", "setName", ")", "#~ printf(\"can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name\" % setName)", "return", "False", "return", "True" ]
37.266667
0.04014
[ "def deleteSNPs(setName) :\n", "\t\"\"\"deletes a set of polymorphisms\"\"\"\n", "\tcon = conf.db\n", "\ttry :\n", "\t\tSMaster = SNPMaster(setName = setName)\n", "\t\tcon.beginTransaction()\n", "\t\tSNPType = SMaster.SNPType\n", "\t\tcon.delete(SNPType, 'setName = ?', (setName,))\n", "\t\tSMaster.delete()\n", "\t\tcon.endTransaction()\n", "\texcept KeyError :\n", "\t\traise KeyError(\"Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name\" % setName)\n", "\t\t#~ printf(\"can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name\" % setName)\n", "\t\treturn False\n", "\treturn True" ]
[ 0.038461538461538464, 0.02631578947368421, 0.06666666666666667, 0.2857142857142857, 0.07317073170731707, 0.04, 0.03571428571428571, 0.02040816326530612, 0.05263157894736842, 0.043478260869565216, 0.10526315789473684, 0.015267175572519083, 0.024, 0.06666666666666667, 0.16666666666666666 ]
15
0.070695
def parse_slab_stats(slab_stats): """Convert output from memcached's `stats slabs` into a Python dict. Newlines are returned by memcached along with carriage returns (i.e. '\r\n'). >>> parse_slab_stats( "STAT 1:chunk_size 96\r\nSTAT 1:chunks_per_page 10922\r\nSTAT " "active_slabs 1\r\nSTAT total_malloced 1048512\r\nEND\r\n") { 'slabs': { 1: { 'chunk_size': 96, 'chunks_per_page': 10922, # ... }, }, 'active_slabs': 1, 'total_malloced': 1048512, } """ stats_dict = {'slabs': defaultdict(lambda: {})} for line in slab_stats.splitlines(): if line == 'END': break # e.g.: "STAT 1:chunks_per_page 10922" cmd, key, value = line.split(' ') if cmd != 'STAT': continue # e.g.: "STAT active_slabs 1" if ":" not in key: stats_dict[key] = int(value) continue slab, key = key.split(':') stats_dict['slabs'][int(slab)][key] = int(value) return stats_dict
[ "def", "parse_slab_stats", "(", "slab_stats", ")", ":", "stats_dict", "=", "{", "'slabs'", ":", "defaultdict", "(", "lambda", ":", "{", "}", ")", "}", "for", "line", "in", "slab_stats", ".", "splitlines", "(", ")", ":", "if", "line", "==", "'END'", ":", "break", "# e.g.: \"STAT 1:chunks_per_page 10922\"", "cmd", ",", "key", ",", "value", "=", "line", ".", "split", "(", "' '", ")", "if", "cmd", "!=", "'STAT'", ":", "continue", "# e.g.: \"STAT active_slabs 1\"", "if", "\":\"", "not", "in", "key", ":", "stats_dict", "[", "key", "]", "=", "int", "(", "value", ")", "continue", "slab", ",", "key", "=", "key", ".", "split", "(", "':'", ")", "stats_dict", "[", "'slabs'", "]", "[", "int", "(", "slab", ")", "]", "[", "key", "]", "=", "int", "(", "value", ")", "return", "stats_dict" ]
28.657895
0.000888
[ "def parse_slab_stats(slab_stats):\n", " \"\"\"Convert output from memcached's `stats slabs` into a Python dict.\n", "\n", " Newlines are returned by memcached along with carriage returns\n", " (i.e. '\\r\\n').\n", "\n", " >>> parse_slab_stats(\n", " \"STAT 1:chunk_size 96\\r\\nSTAT 1:chunks_per_page 10922\\r\\nSTAT \"\n", " \"active_slabs 1\\r\\nSTAT total_malloced 1048512\\r\\nEND\\r\\n\")\n", " {\n", " 'slabs': {\n", " 1: {\n", " 'chunk_size': 96,\n", " 'chunks_per_page': 10922,\n", " # ...\n", " },\n", " },\n", " 'active_slabs': 1,\n", " 'total_malloced': 1048512,\n", " }\n", " \"\"\"\n", " stats_dict = {'slabs': defaultdict(lambda: {})}\n", "\n", " for line in slab_stats.splitlines():\n", " if line == 'END':\n", " break\n", " # e.g.: \"STAT 1:chunks_per_page 10922\"\n", " cmd, key, value = line.split(' ')\n", " if cmd != 'STAT':\n", " continue\n", " # e.g.: \"STAT active_slabs 1\"\n", " if \":\" not in key:\n", " stats_dict[key] = int(value)\n", " continue\n", " slab, key = key.split(':')\n", " stats_dict['slabs'][int(slab)][key] = int(value)\n", "\n", " return stats_dict" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
38
0.001253
def items(self): "Returns a list of (key, value) pairs as 2-tuples." return (list(self._pb.IntMap.items()) + list(self._pb.StringMap.items()) + list(self._pb.FloatMap.items()) + list(self._pb.BoolMap.items()))
[ "def", "items", "(", "self", ")", ":", "return", "(", "list", "(", "self", ".", "_pb", ".", "IntMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "StringMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "FloatMap", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "_pb", ".", "BoolMap", ".", "items", "(", ")", ")", ")" ]
59.5
0.016598
[ "def items(self):\n", " \"Returns a list of (key, value) pairs as 2-tuples.\"\n", " return (list(self._pb.IntMap.items()) + list(self._pb.StringMap.items()) +\n", " list(self._pb.FloatMap.items()) + list(self._pb.BoolMap.items()))" ]
[ 0, 0.016666666666666666, 0.012048192771084338, 0.024691358024691357 ]
4
0.013352
def array_to_schema(arr, **options): """ Generate a JSON schema object with type annotation added for given object. :param arr: Array of mapping objects like dicts :param options: Other keyword options such as: - ac_schema_strict: True if more strict (precise) schema is needed - ac_schema_typemap: Type to JSON schema type mappings :return: Another mapping objects represents JSON schema of items """ (typemap, strict) = _process_options(**options) arr = list(arr) scm = dict(type=typemap[list], items=gen_schema(arr[0] if arr else "str", **options)) if strict: nitems = len(arr) scm["minItems"] = nitems scm["uniqueItems"] = len(set(arr)) == nitems return scm
[ "def", "array_to_schema", "(", "arr", ",", "*", "*", "options", ")", ":", "(", "typemap", ",", "strict", ")", "=", "_process_options", "(", "*", "*", "options", ")", "arr", "=", "list", "(", "arr", ")", "scm", "=", "dict", "(", "type", "=", "typemap", "[", "list", "]", ",", "items", "=", "gen_schema", "(", "arr", "[", "0", "]", "if", "arr", "else", "\"str\"", ",", "*", "*", "options", ")", ")", "if", "strict", ":", "nitems", "=", "len", "(", "arr", ")", "scm", "[", "\"minItems\"", "]", "=", "nitems", "scm", "[", "\"uniqueItems\"", "]", "=", "len", "(", "set", "(", "arr", ")", ")", "==", "nitems", "return", "scm" ]
32.304348
0.001307
[ "def array_to_schema(arr, **options):\n", " \"\"\"\n", " Generate a JSON schema object with type annotation added for given object.\n", "\n", " :param arr: Array of mapping objects like dicts\n", " :param options: Other keyword options such as:\n", "\n", " - ac_schema_strict: True if more strict (precise) schema is needed\n", " - ac_schema_typemap: Type to JSON schema type mappings\n", "\n", " :return: Another mapping objects represents JSON schema of items\n", " \"\"\"\n", " (typemap, strict) = _process_options(**options)\n", "\n", " arr = list(arr)\n", " scm = dict(type=typemap[list],\n", " items=gen_schema(arr[0] if arr else \"str\", **options))\n", " if strict:\n", " nitems = len(arr)\n", " scm[\"minItems\"] = nitems\n", " scm[\"uniqueItems\"] = len(set(arr)) == nitems\n", "\n", " return scm" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
23
0.003106
def _spin(coordinates, theta, around): """Rotate a set of coordinates in place around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being spun. theta : float The angle by which to spin the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the coordinates. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot spin around a zero vector') center_pos = np.mean(coordinates, axis=0) coordinates -= center_pos coordinates = _rotate(coordinates, theta, around) coordinates += center_pos return coordinates
[ "def", "_spin", "(", "coordinates", ",", "theta", ",", "around", ")", ":", "around", "=", "np", ".", "asarray", "(", "around", ")", ".", "reshape", "(", "3", ")", "if", "np", ".", "array_equal", "(", "around", ",", "np", ".", "zeros", "(", "3", ")", ")", ":", "raise", "ValueError", "(", "'Cannot spin around a zero vector'", ")", "center_pos", "=", "np", ".", "mean", "(", "coordinates", ",", "axis", "=", "0", ")", "coordinates", "-=", "center_pos", "coordinates", "=", "_rotate", "(", "coordinates", ",", "theta", ",", "around", ")", "coordinates", "+=", "center_pos", "return", "coordinates" ]
35.095238
0.001321
[ "def _spin(coordinates, theta, around):\n", " \"\"\"Rotate a set of coordinates in place around an arbitrary vector.\n", "\n", " Parameters\n", " ----------\n", " coordinates : np.ndarray, shape=(n,3), dtype=float\n", " The coordinates being spun.\n", " theta : float\n", " The angle by which to spin the coordinates, in radians.\n", " around : np.ndarray, shape=(3,), dtype=float\n", " The axis about which to spin the coordinates.\n", "\n", " \"\"\"\n", " around = np.asarray(around).reshape(3)\n", " if np.array_equal(around, np.zeros(3)):\n", " raise ValueError('Cannot spin around a zero vector')\n", " center_pos = np.mean(coordinates, axis=0)\n", " coordinates -= center_pos\n", " coordinates = _rotate(coordinates, theta, around)\n", " coordinates += center_pos\n", " return coordinates" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
21
0.002165
def write_data(self, data, dstart=None, swap_axes=True): """Write ``data`` to `file`. Parameters ---------- data : `array-like` Data that should be written to `file`. dstart : non-negative int, optional Offset in bytes of the start position of the written data. If provided, reshaping and axis swapping of ``data`` is skipped. For ``None``, `header_size` is used. swap_axes : bool, optional If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries to swap the axes in the ``data`` before writing. Use ``False`` only if the data is already consistent with the final axis order. """ if dstart is None: shape = self.data_shape dstart = int(self.header_size) elif dstart < 0: raise ValueError('`dstart` must be non-negative, got {}' ''.format(dstart)) else: shape = -1 dstart = int(dstart) if dstart < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart, self.header_size)) data = np.asarray(data, dtype=self.data_dtype).reshape(shape) if swap_axes: # Need to argsort here since `data_axis_order` tells # "which axis comes from where", which is the inverse of what the # `transpose` function needs. data = np.transpose(data, axes=np.argsort(self.data_axis_order)) assert data.shape == self.data_storage_shape data = data.reshape(-1, order='F') self.file.seek(dstart) data.tofile(self.file)
[ "def", "write_data", "(", "self", ",", "data", ",", "dstart", "=", "None", ",", "swap_axes", "=", "True", ")", ":", "if", "dstart", "is", "None", ":", "shape", "=", "self", ".", "data_shape", "dstart", "=", "int", "(", "self", ".", "header_size", ")", "elif", "dstart", "<", "0", ":", "raise", "ValueError", "(", "'`dstart` must be non-negative, got {}'", "''", ".", "format", "(", "dstart", ")", ")", "else", ":", "shape", "=", "-", "1", "dstart", "=", "int", "(", "dstart", ")", "if", "dstart", "<", "self", ".", "header_size", ":", "raise", "ValueError", "(", "'invalid `dstart`, resulting in absolute '", "'`dstart` < `header_size` ({} < {})'", "''", ".", "format", "(", "dstart", ",", "self", ".", "header_size", ")", ")", "data", "=", "np", ".", "asarray", "(", "data", ",", "dtype", "=", "self", ".", "data_dtype", ")", ".", "reshape", "(", "shape", ")", "if", "swap_axes", ":", "# Need to argsort here since `data_axis_order` tells", "# \"which axis comes from where\", which is the inverse of what the", "# `transpose` function needs.", "data", "=", "np", ".", "transpose", "(", "data", ",", "axes", "=", "np", ".", "argsort", "(", "self", ".", "data_axis_order", ")", ")", "assert", "data", ".", "shape", "==", "self", ".", "data_storage_shape", "data", "=", "data", ".", "reshape", "(", "-", "1", ",", "order", "=", "'F'", ")", "self", ".", "file", ".", "seek", "(", "dstart", ")", "data", ".", "tofile", "(", "self", ".", "file", ")" ]
40.613636
0.001093
[ "def write_data(self, data, dstart=None, swap_axes=True):\n", " \"\"\"Write ``data`` to `file`.\n", "\n", " Parameters\n", " ----------\n", " data : `array-like`\n", " Data that should be written to `file`.\n", " dstart : non-negative int, optional\n", " Offset in bytes of the start position of the written data.\n", " If provided, reshaping and axis swapping of ``data`` is\n", " skipped.\n", " For ``None``, `header_size` is used.\n", " swap_axes : bool, optional\n", " If ``True``, use the ``'mapc', 'mapr', 'maps'`` header entries\n", " to swap the axes in the ``data`` before writing. Use ``False``\n", " only if the data is already consistent with the final axis\n", " order.\n", " \"\"\"\n", " if dstart is None:\n", " shape = self.data_shape\n", " dstart = int(self.header_size)\n", " elif dstart < 0:\n", " raise ValueError('`dstart` must be non-negative, got {}'\n", " ''.format(dstart))\n", " else:\n", " shape = -1\n", " dstart = int(dstart)\n", "\n", " if dstart < self.header_size:\n", " raise ValueError('invalid `dstart`, resulting in absolute '\n", " '`dstart` < `header_size` ({} < {})'\n", " ''.format(dstart, self.header_size))\n", "\n", " data = np.asarray(data, dtype=self.data_dtype).reshape(shape)\n", " if swap_axes:\n", " # Need to argsort here since `data_axis_order` tells\n", " # \"which axis comes from where\", which is the inverse of what the\n", " # `transpose` function needs.\n", " data = np.transpose(data, axes=np.argsort(self.data_axis_order))\n", " assert data.shape == self.data_storage_shape\n", "\n", " data = data.reshape(-1, order='F')\n", " self.file.seek(dstart)\n", " data.tofile(self.file)" ]
[ 0, 0.02702702702702703, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333 ]
44
0.001372
def num_available_breakpoints(self, arm=False, thumb=False, ram=False, flash=False, hw=False): """Returns the number of available breakpoints of the specified type. If ``arm`` is set, gets the number of available ARM breakpoint units. If ``thumb`` is set, gets the number of available THUMB breakpoint units. If ``ram`` is set, gets the number of available software RAM breakpoint units. If ``flash`` is set, gets the number of available software flash breakpoint units. If ``hw`` is set, gets the number of available hardware breakpoint units. If a combination of the flags is given, then ``num_available_breakpoints()`` returns the number of breakpoints specified by the given flags. If no flags are specified, then the count of available breakpoint units is returned. Args: self (JLink): the ``JLink`` instance arm (bool): Boolean indicating to get number of ARM breakpoints. thumb (bool): Boolean indicating to get number of THUMB breakpoints. ram (bool): Boolean indicating to get number of SW RAM breakpoints. flash (bool): Boolean indicating to get number of Flash breakpoints. hw (bool): Boolean indicating to get number of Hardware breakpoints. Returns: The number of available breakpoint units of the specified type. """ flags = [ enums.JLinkBreakpoint.ARM, enums.JLinkBreakpoint.THUMB, enums.JLinkBreakpoint.SW_RAM, enums.JLinkBreakpoint.SW_FLASH, enums.JLinkBreakpoint.HW ] set_flags = [ arm, thumb, ram, flash, hw ] if not any(set_flags): flags = enums.JLinkBreakpoint.ANY else: flags = list(f for i, f in enumerate(flags) if set_flags[i]) flags = functools.reduce(operator.__or__, flags, 0) return self._dll.JLINKARM_GetNumBPUnits(flags)
[ "def", "num_available_breakpoints", "(", "self", ",", "arm", "=", "False", ",", "thumb", "=", "False", ",", "ram", "=", "False", ",", "flash", "=", "False", ",", "hw", "=", "False", ")", ":", "flags", "=", "[", "enums", ".", "JLinkBreakpoint", ".", "ARM", ",", "enums", ".", "JLinkBreakpoint", ".", "THUMB", ",", "enums", ".", "JLinkBreakpoint", ".", "SW_RAM", ",", "enums", ".", "JLinkBreakpoint", ".", "SW_FLASH", ",", "enums", ".", "JLinkBreakpoint", ".", "HW", "]", "set_flags", "=", "[", "arm", ",", "thumb", ",", "ram", ",", "flash", ",", "hw", "]", "if", "not", "any", "(", "set_flags", ")", ":", "flags", "=", "enums", ".", "JLinkBreakpoint", ".", "ANY", "else", ":", "flags", "=", "list", "(", "f", "for", "i", ",", "f", "in", "enumerate", "(", "flags", ")", "if", "set_flags", "[", "i", "]", ")", "flags", "=", "functools", ".", "reduce", "(", "operator", ".", "__or__", ",", "flags", ",", "0", ")", "return", "self", ".", "_dll", ".", "JLINKARM_GetNumBPUnits", "(", "flags", ")" ]
40.979592
0.001459
[ "def num_available_breakpoints(self, arm=False, thumb=False, ram=False, flash=False, hw=False):\n", " \"\"\"Returns the number of available breakpoints of the specified type.\n", "\n", " If ``arm`` is set, gets the number of available ARM breakpoint units.\n", " If ``thumb`` is set, gets the number of available THUMB breakpoint\n", " units. If ``ram`` is set, gets the number of available software RAM\n", " breakpoint units. If ``flash`` is set, gets the number of available\n", " software flash breakpoint units. If ``hw`` is set, gets the number of\n", " available hardware breakpoint units.\n", "\n", " If a combination of the flags is given, then\n", " ``num_available_breakpoints()`` returns the number of breakpoints\n", " specified by the given flags. If no flags are specified, then the\n", " count of available breakpoint units is returned.\n", "\n", " Args:\n", " self (JLink): the ``JLink`` instance\n", " arm (bool): Boolean indicating to get number of ARM breakpoints.\n", " thumb (bool): Boolean indicating to get number of THUMB breakpoints.\n", " ram (bool): Boolean indicating to get number of SW RAM breakpoints.\n", " flash (bool): Boolean indicating to get number of Flash breakpoints.\n", " hw (bool): Boolean indicating to get number of Hardware breakpoints.\n", "\n", " Returns:\n", " The number of available breakpoint units of the specified type.\n", " \"\"\"\n", " flags = [\n", " enums.JLinkBreakpoint.ARM,\n", " enums.JLinkBreakpoint.THUMB,\n", " enums.JLinkBreakpoint.SW_RAM,\n", " enums.JLinkBreakpoint.SW_FLASH,\n", " enums.JLinkBreakpoint.HW\n", " ]\n", "\n", " set_flags = [\n", " arm,\n", " thumb,\n", " ram,\n", " flash,\n", " hw\n", " ]\n", "\n", " if not any(set_flags):\n", " flags = enums.JLinkBreakpoint.ANY\n", " else:\n", " flags = list(f for i, f in enumerate(flags) if set_flags[i])\n", " flags = functools.reduce(operator.__or__, flags, 0)\n", "\n", " return self._dll.JLINKARM_GetNumBPUnits(flags)" ]
[ 0.010526315789473684, 0.01282051282051282, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517 ]
49
0.000854
def convert(in_file, out_file, in_fmt="", out_fmt=""): """ Converts in_file to out_file, guessing datatype in the absence of in_fmt and out_fmt. Arguments: in_file: The name of the (existing) datafile to read out_file: The name of the file to create with converted data in_fmt: Optional. The format of incoming data, if not guessable out_fmt: Optional. The format of outgoing data, if not guessable Returns: String. Output filename """ # First verify that in_file exists and out_file doesn't. in_file = os.path.expanduser(in_file) out_file = os.path.expanduser(out_file) if not os.path.exists(in_file): raise IOError("Input file {0} does not exist, stopping..." .format(in_file)) # Get formats, either by explicitly naming them or by guessing. # TODO: It'd be neat to check here if an explicit fmt matches the guess. in_fmt = in_fmt.lower() or _guess_format_from_extension( in_file.split('.')[-1].lower()) out_fmt = out_fmt.lower() or _guess_format_from_extension( out_file.split('.')[-1].lower()) if not in_fmt or not out_fmt: raise ValueError("Cannot determine conversion formats.") return False if in_fmt is out_fmt: # This is the case when this module (intended for LONI) is used # indescriminately to 'funnel' data into one format. shutil.copyfileobj(in_file, out_file) return out_file # Import if in_fmt == 'hdf5': from . import hdf5 data = hdf5.load(in_file) elif in_fmt == 'tiff': from . import tiff data = tiff.load(in_file) elif in_fmt == 'png': from . import png data = png.load(in_file) else: return _fail_pair_conversion(in_fmt, out_fmt) # Export if out_fmt == 'hdf5': from . import hdf5 return hdf5.save(out_file, data) elif out_fmt == 'tiff': from . import tiff return tiff.save(out_file, data) elif out_fmt == 'png': from . import png return png.export_png(out_file, data) else: return _fail_pair_conversion(in_fmt, out_fmt) return _fail_pair_conversion(in_fmt, out_fmt)
[ "def", "convert", "(", "in_file", ",", "out_file", ",", "in_fmt", "=", "\"\"", ",", "out_fmt", "=", "\"\"", ")", ":", "# First verify that in_file exists and out_file doesn't.", "in_file", "=", "os", ".", "path", ".", "expanduser", "(", "in_file", ")", "out_file", "=", "os", ".", "path", ".", "expanduser", "(", "out_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "in_file", ")", ":", "raise", "IOError", "(", "\"Input file {0} does not exist, stopping...\"", ".", "format", "(", "in_file", ")", ")", "# Get formats, either by explicitly naming them or by guessing.", "# TODO: It'd be neat to check here if an explicit fmt matches the guess.", "in_fmt", "=", "in_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "in_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "out_fmt", "=", "out_fmt", ".", "lower", "(", ")", "or", "_guess_format_from_extension", "(", "out_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "if", "not", "in_fmt", "or", "not", "out_fmt", ":", "raise", "ValueError", "(", "\"Cannot determine conversion formats.\"", ")", "return", "False", "if", "in_fmt", "is", "out_fmt", ":", "# This is the case when this module (intended for LONI) is used", "# indescriminately to 'funnel' data into one format.", "shutil", ".", "copyfileobj", "(", "in_file", ",", "out_file", ")", "return", "out_file", "# Import", "if", "in_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "data", "=", "hdf5", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "data", "=", "tiff", ".", "load", "(", "in_file", ")", "elif", "in_fmt", "==", "'png'", ":", "from", ".", "import", "png", "data", "=", "png", ".", "load", "(", "in_file", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "# Export", "if", "out_fmt", "==", "'hdf5'", ":", "from", ".", "import", "hdf5", "return", "hdf5", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'tiff'", ":", "from", ".", "import", "tiff", "return", "tiff", ".", "save", "(", "out_file", ",", "data", ")", "elif", "out_fmt", "==", "'png'", ":", "from", ".", "import", "png", "return", "png", ".", "export_png", "(", "out_file", ",", "data", ")", "else", ":", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")", "return", "_fail_pair_conversion", "(", "in_fmt", ",", "out_fmt", ")" ]
33.272727
0.000442
[ "def convert(in_file, out_file, in_fmt=\"\", out_fmt=\"\"):\n", " \"\"\"\n", " Converts in_file to out_file, guessing datatype in the absence of\n", " in_fmt and out_fmt.\n", "\n", " Arguments:\n", " in_file: The name of the (existing) datafile to read\n", " out_file: The name of the file to create with converted data\n", " in_fmt: Optional. The format of incoming data, if not guessable\n", " out_fmt: Optional. The format of outgoing data, if not guessable\n", "\n", " Returns:\n", " String. Output filename\n", " \"\"\"\n", " # First verify that in_file exists and out_file doesn't.\n", " in_file = os.path.expanduser(in_file)\n", " out_file = os.path.expanduser(out_file)\n", "\n", " if not os.path.exists(in_file):\n", " raise IOError(\"Input file {0} does not exist, stopping...\"\n", " .format(in_file))\n", "\n", " # Get formats, either by explicitly naming them or by guessing.\n", " # TODO: It'd be neat to check here if an explicit fmt matches the guess.\n", " in_fmt = in_fmt.lower() or _guess_format_from_extension(\n", " in_file.split('.')[-1].lower())\n", " out_fmt = out_fmt.lower() or _guess_format_from_extension(\n", " out_file.split('.')[-1].lower())\n", "\n", " if not in_fmt or not out_fmt:\n", " raise ValueError(\"Cannot determine conversion formats.\")\n", " return False\n", "\n", " if in_fmt is out_fmt:\n", " # This is the case when this module (intended for LONI) is used\n", " # indescriminately to 'funnel' data into one format.\n", " shutil.copyfileobj(in_file, out_file)\n", " return out_file\n", "\n", " # Import\n", " if in_fmt == 'hdf5':\n", " from . import hdf5\n", " data = hdf5.load(in_file)\n", " elif in_fmt == 'tiff':\n", " from . import tiff\n", " data = tiff.load(in_file)\n", " elif in_fmt == 'png':\n", " from . import png\n", " data = png.load(in_file)\n", " else:\n", " return _fail_pair_conversion(in_fmt, out_fmt)\n", "\n", " # Export\n", " if out_fmt == 'hdf5':\n", " from . import hdf5\n", " return hdf5.save(out_file, data)\n", " elif out_fmt == 'tiff':\n", " from . import tiff\n", " return tiff.save(out_file, data)\n", " elif out_fmt == 'png':\n", " from . import png\n", " return png.export_png(out_file, data)\n", " else:\n", " return _fail_pair_conversion(in_fmt, out_fmt)\n", "\n", " return _fail_pair_conversion(in_fmt, out_fmt)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612 ]
66
0.000309
def is_first(self, value): """The is_first property. Args: value (string). the property value. """ if value == self._defaults['ai.session.isFirst'] and 'ai.session.isFirst' in self._values: del self._values['ai.session.isFirst'] else: self._values['ai.session.isFirst'] = value
[ "def", "is_first", "(", "self", ",", "value", ")", ":", "if", "value", "==", "self", ".", "_defaults", "[", "'ai.session.isFirst'", "]", "and", "'ai.session.isFirst'", "in", "self", ".", "_values", ":", "del", "self", ".", "_values", "[", "'ai.session.isFirst'", "]", "else", ":", "self", ".", "_values", "[", "'ai.session.isFirst'", "]", "=", "value" ]
35.3
0.01105
[ "def is_first(self, value):\n", " \"\"\"The is_first property.\n", " \n", " Args:\n", " value (string). the property value.\n", " \"\"\"\n", " if value == self._defaults['ai.session.isFirst'] and 'ai.session.isFirst' in self._values:\n", " del self._values['ai.session.isFirst']\n", " else:\n", " self._values['ai.session.isFirst'] = value" ]
[ 0, 0.029411764705882353, 0.1111111111111111, 0, 0, 0, 0.010101010101010102, 0, 0, 0.018518518518518517 ]
10
0.016914
def value_nth_person(self, n, array, default = 0): """ Get the value of array for the person whose position in the entity is n. Note that this position is arbitrary, and that members are not sorted. If the nth person does not exist, return ``default`` instead. The result is a vector which dimension is the number of entities. """ self.members.check_array_compatible_with_entity(array) positions = self.members_position nb_persons_per_entity = self.nb_persons() members_map = self.ordered_members_map result = self.filled_array(default, dtype = array.dtype) # For households that have at least n persons, set the result as the value of criteria for the person for which the position is n. # The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order. result[nb_persons_per_entity > n] = array[members_map][positions[members_map] == n] return result
[ "def", "value_nth_person", "(", "self", ",", "n", ",", "array", ",", "default", "=", "0", ")", ":", "self", ".", "members", ".", "check_array_compatible_with_entity", "(", "array", ")", "positions", "=", "self", ".", "members_position", "nb_persons_per_entity", "=", "self", ".", "nb_persons", "(", ")", "members_map", "=", "self", ".", "ordered_members_map", "result", "=", "self", ".", "filled_array", "(", "default", ",", "dtype", "=", "array", ".", "dtype", ")", "# For households that have at least n persons, set the result as the value of criteria for the person for which the position is n.", "# The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order.", "result", "[", "nb_persons_per_entity", ">", "n", "]", "=", "array", "[", "members_map", "]", "[", "positions", "[", "members_map", "]", "==", "n", "]", "return", "result" ]
52.75
0.010242
[ "def value_nth_person(self, n, array, default = 0):\n", " \"\"\"\n", " Get the value of array for the person whose position in the entity is n.\n", "\n", " Note that this position is arbitrary, and that members are not sorted.\n", "\n", " If the nth person does not exist, return ``default`` instead.\n", "\n", " The result is a vector which dimension is the number of entities.\n", " \"\"\"\n", " self.members.check_array_compatible_with_entity(array)\n", " positions = self.members_position\n", " nb_persons_per_entity = self.nb_persons()\n", " members_map = self.ordered_members_map\n", " result = self.filled_array(default, dtype = array.dtype)\n", " # For households that have at least n persons, set the result as the value of criteria for the person for which the position is n.\n", " # The map is needed b/c the order of the nth persons of each household in the persons vector is not necessarily the same than the household order.\n", " result[nb_persons_per_entity > n] = array[members_map][positions[members_map] == n]\n", "\n", " return result" ]
[ 0.0392156862745098, 0.08333333333333333, 0.011764705882352941, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03076923076923077, 0.007194244604316547, 0.0064516129032258064, 0.010869565217391304, 0, 0.047619047619047616 ]
20
0.012463
def login_required(function=None, message=None, login_url=None): """ Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary. """ actual_decorator = user_passes_test( lambda u: u.is_authenticated(), message=message, login_url=login_url ) if function: return actual_decorator(function) return actual_decorator
[ "def", "login_required", "(", "function", "=", "None", ",", "message", "=", "None", ",", "login_url", "=", "None", ")", ":", "actual_decorator", "=", "user_passes_test", "(", "lambda", "u", ":", "u", ".", "is_authenticated", "(", ")", ",", "message", "=", "message", ",", "login_url", "=", "login_url", ")", "if", "function", ":", "return", "actual_decorator", "(", "function", ")", "return", "actual_decorator" ]
29.142857
0.002375
[ "def login_required(function=None, message=None, login_url=None):\n", " \"\"\"\n", " Decorator for views that checks that the user is logged in, redirecting\n", " to the log-in page if necessary.\n", " \"\"\"\n", " actual_decorator = user_passes_test(\n", " lambda u: u.is_authenticated(),\n", " message=message,\n", " login_url=login_url\n", " )\n", "\n", " if function:\n", " return actual_decorator(function)\n", " return actual_decorator" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035 ]
14
0.002646
def path_regex(self): """Return the regex for the path to the build folder.""" if self.locale_build: return self.build_list_regex return '%s/' % urljoin(self.build_list_regex, self.builds[self.build_index])
[ "def", "path_regex", "(", "self", ")", ":", "if", "self", ".", "locale_build", ":", "return", "self", ".", "build_list_regex", "return", "'%s/'", "%", "urljoin", "(", "self", ".", "build_list_regex", ",", "self", ".", "builds", "[", "self", ".", "build_index", "]", ")" ]
39.666667
0.012346
[ "def path_regex(self):\n", " \"\"\"Return the regex for the path to the build folder.\"\"\"\n", " if self.locale_build:\n", " return self.build_list_regex\n", "\n", " return '%s/' % urljoin(self.build_list_regex, self.builds[self.build_index])" ]
[ 0, 0.015384615384615385, 0, 0, 0, 0.023809523809523808 ]
6
0.006532
def dropzoneAt(self, point): """ Returns the dropzone at the inputed point. :param point | <QPoint> """ for dropzone in self._dropzones: rect = dropzone.rect() if ( rect.contains(point) ): return dropzone return None
[ "def", "dropzoneAt", "(", "self", ",", "point", ")", ":", "for", "dropzone", "in", "self", ".", "_dropzones", ":", "rect", "=", "dropzone", ".", "rect", "(", ")", "if", "(", "rect", ".", "contains", "(", "point", ")", ")", ":", "return", "dropzone", "return", "None" ]
28
0.015723
[ "def dropzoneAt(self, point):\n", " \"\"\"\n", " Returns the dropzone at the inputed point.\n", " \n", " :param point | <QPoint>\n", " \"\"\"\n", " for dropzone in self._dropzones:\n", " rect = dropzone.rect()\n", " if ( rect.contains(point) ):\n", " return dropzone\n", " return None" ]
[ 0, 0.08333333333333333, 0, 0.1111111111111111, 0, 0, 0, 0, 0.04878048780487805, 0, 0.05263157894736842 ]
11
0.026896
def on_click(self, button, **kwargs): """ Maps a click event with its associated callback. Currently implemented events are: ============ ================ ========= Event Callback setting Button ID ============ ================ ========= Left click on_leftclick 1 Middle click on_middleclick 2 Right click on_rightclick 3 Scroll up on_upscroll 4 Scroll down on_downscroll 5 Others on_otherclick > 5 ============ ================ ========= The action is determined by the nature (type and value) of the callback setting in the following order: 1. If null callback (``None``), no action is taken. 2. If it's a `python function`, call it and pass any additional arguments. 3. If it's name of a `member method` of current module (string), call it and pass any additional arguments. 4. If the name does not match with `member method` name execute program with such name. .. seealso:: :ref:`callbacks` for more information about callback settings and examples. :param button: The ID of button event received from i3bar. :param kwargs: Further information received from i3bar like the positions of the mouse where the click occured. :return: Returns ``True`` if a valid callback action was executed. ``False`` otherwise. """ actions = ['leftclick', 'middleclick', 'rightclick', 'upscroll', 'downscroll'] try: action = actions[button - 1] except (TypeError, IndexError): self.__log_button_event(button, None, None, "Other button") action = "otherclick" m_click = self.__multi_click with m_click.lock: double = m_click.check_double(button) double_action = 'double%s' % action if double: action = double_action # Get callback function cb = getattr(self, 'on_%s' % action, None) double_handler = getattr(self, 'on_%s' % double_action, None) delay_execution = (not double and double_handler) if delay_execution: m_click.set_timer(button, cb, **kwargs) else: self.__button_callback_handler(button, cb, **kwargs)
[ "def", "on_click", "(", "self", ",", "button", ",", "*", "*", "kwargs", ")", ":", "actions", "=", "[", "'leftclick'", ",", "'middleclick'", ",", "'rightclick'", ",", "'upscroll'", ",", "'downscroll'", "]", "try", ":", "action", "=", "actions", "[", "button", "-", "1", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "self", ".", "__log_button_event", "(", "button", ",", "None", ",", "None", ",", "\"Other button\"", ")", "action", "=", "\"otherclick\"", "m_click", "=", "self", ".", "__multi_click", "with", "m_click", ".", "lock", ":", "double", "=", "m_click", ".", "check_double", "(", "button", ")", "double_action", "=", "'double%s'", "%", "action", "if", "double", ":", "action", "=", "double_action", "# Get callback function", "cb", "=", "getattr", "(", "self", ",", "'on_%s'", "%", "action", ",", "None", ")", "double_handler", "=", "getattr", "(", "self", ",", "'on_%s'", "%", "double_action", ",", "None", ")", "delay_execution", "=", "(", "not", "double", "and", "double_handler", ")", "if", "delay_execution", ":", "m_click", ".", "set_timer", "(", "button", ",", "cb", ",", "*", "*", "kwargs", ")", "else", ":", "self", ".", "__button_callback_handler", "(", "button", ",", "cb", ",", "*", "*", "kwargs", ")" ]
37.076923
0.000808
[ "def on_click(self, button, **kwargs):\n", " \"\"\"\n", " Maps a click event with its associated callback.\n", "\n", " Currently implemented events are:\n", "\n", " ============ ================ =========\n", " Event Callback setting Button ID\n", " ============ ================ =========\n", " Left click on_leftclick 1\n", " Middle click on_middleclick 2\n", " Right click on_rightclick 3\n", " Scroll up on_upscroll 4\n", " Scroll down on_downscroll 5\n", " Others on_otherclick > 5\n", " ============ ================ =========\n", "\n", " The action is determined by the nature (type and value) of the callback\n", " setting in the following order:\n", "\n", " 1. If null callback (``None``), no action is taken.\n", " 2. If it's a `python function`, call it and pass any additional\n", " arguments.\n", " 3. If it's name of a `member method` of current module (string), call\n", " it and pass any additional arguments.\n", " 4. If the name does not match with `member method` name execute program\n", " with such name.\n", "\n", " .. seealso:: :ref:`callbacks` for more information about\n", " callback settings and examples.\n", "\n", " :param button: The ID of button event received from i3bar.\n", " :param kwargs: Further information received from i3bar like the\n", " positions of the mouse where the click occured.\n", " :return: Returns ``True`` if a valid callback action was executed.\n", " ``False`` otherwise.\n", " \"\"\"\n", "\n", " actions = ['leftclick', 'middleclick', 'rightclick',\n", " 'upscroll', 'downscroll']\n", " try:\n", " action = actions[button - 1]\n", " except (TypeError, IndexError):\n", " self.__log_button_event(button, None, None, \"Other button\")\n", " action = \"otherclick\"\n", "\n", " m_click = self.__multi_click\n", "\n", " with m_click.lock:\n", " double = m_click.check_double(button)\n", " double_action = 'double%s' % action\n", "\n", " if double:\n", " action = double_action\n", "\n", " # Get callback function\n", " cb = getattr(self, 'on_%s' % action, None)\n", "\n", " double_handler = getattr(self, 'on_%s' % double_action, None)\n", " delay_execution = (not double and double_handler)\n", "\n", " if delay_execution:\n", " m_click.set_timer(button, cb, **kwargs)\n", " else:\n", " self.__button_callback_handler(button, cb, **kwargs)" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176 ]
65
0.001508
def variations(word): """Create variations of the word based on letter combinations like oo, sh, etc.""" if len(word) == 1: return [[word[0]]] elif word == 'aa': return [['A']] elif word == 'ee': return [['i']] elif word == 'ei': return [['ei']] elif word in ['oo', 'ou']: return [['u']] elif word == 'kha': return [['kha'], ['kh', 'a']] elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return [[word]] elif word in ["'ee", "'ei"]: return [["'i"]] elif word in ["'oo", "'ou"]: return [["'u"]] elif word in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[0] + "'"]] elif word in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [["'" + word[1]]] elif len(word) == 2 and word[0] == word[1]: return [[word[0]]] if word[:2] == 'aa': return [['A'] + i for i in variations(word[2:])] elif word[:2] == 'ee': return [['i'] + i for i in variations(word[2:])] elif word[:2] in ['oo', 'ou']: return [['u'] + i for i in variations(word[2:])] elif word[:3] == 'kha': return \ [['kha'] + i for i in variations(word[3:])] + \ [['kh', 'a'] + i for i in variations(word[3:])] + \ [['k', 'h', 'a'] + i for i in variations(word[3:])] elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']: return \ [[word[:2]] + i for i in variations(word[2:])] + \ [[word[0]] + i for i in variations(word[1:])] elif word[:2] in ["a'", "e'", "o'", "i'", "u'", "A'"]: return [[word[:2]] + i for i in variations(word[2:])] elif word[:3] in ["'ee", "'ei"]: return [["'i"] + i for i in variations(word[3:])] elif word[:3] in ["'oo", "'ou"]: return [["'u"] + i for i in variations(word[3:])] elif word[:2] in ["'a", "'e", "'o", "'i", "'u", "'A"]: return [[word[:2]] + i for i in variations(word[2:])] elif len(word) >= 2 and word[0] == word[1]: return [[word[0]] + i for i in variations(word[2:])] else: return [[word[0]] + i for i in variations(word[1:])]
[ "def", "variations", "(", "word", ")", ":", "if", "len", "(", "word", ")", "==", "1", ":", "return", "[", "[", "word", "[", "0", "]", "]", "]", "elif", "word", "==", "'aa'", ":", "return", "[", "[", "'A'", "]", "]", "elif", "word", "==", "'ee'", ":", "return", "[", "[", "'i'", "]", "]", "elif", "word", "==", "'ei'", ":", "return", "[", "[", "'ei'", "]", "]", "elif", "word", "in", "[", "'oo'", ",", "'ou'", "]", ":", "return", "[", "[", "'u'", "]", "]", "elif", "word", "==", "'kha'", ":", "return", "[", "[", "'kha'", "]", ",", "[", "'kh'", ",", "'a'", "]", "]", "elif", "word", "in", "[", "'kh'", ",", "'gh'", ",", "'ch'", ",", "'sh'", ",", "'zh'", ",", "'ck'", "]", ":", "return", "[", "[", "word", "]", "]", "elif", "word", "in", "[", "\"'ee\"", ",", "\"'ei\"", "]", ":", "return", "[", "[", "\"'i\"", "]", "]", "elif", "word", "in", "[", "\"'oo\"", ",", "\"'ou\"", "]", ":", "return", "[", "[", "\"'u\"", "]", "]", "elif", "word", "in", "[", "\"a'\"", ",", "\"e'\"", ",", "\"o'\"", ",", "\"i'\"", ",", "\"u'\"", ",", "\"A'\"", "]", ":", "return", "[", "[", "word", "[", "0", "]", "+", "\"'\"", "]", "]", "elif", "word", "in", "[", "\"'a\"", ",", "\"'e\"", ",", "\"'o\"", ",", "\"'i\"", ",", "\"'u\"", ",", "\"'A\"", "]", ":", "return", "[", "[", "\"'\"", "+", "word", "[", "1", "]", "]", "]", "elif", "len", "(", "word", ")", "==", "2", "and", "word", "[", "0", "]", "==", "word", "[", "1", "]", ":", "return", "[", "[", "word", "[", "0", "]", "]", "]", "if", "word", "[", ":", "2", "]", "==", "'aa'", ":", "return", "[", "[", "'A'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "==", "'ee'", ":", "return", "[", "[", "'i'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "'oo'", ",", "'ou'", "]", ":", "return", "[", "[", "'u'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "==", "'kha'", ":", "return", "[", "[", "'kha'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "+", "[", "[", "'kh'", ",", "'a'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "+", "[", "[", "'k'", ",", "'h'", ",", "'a'", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "'kh'", ",", "'gh'", ",", "'ch'", ",", "'sh'", ",", "'zh'", ",", "'ck'", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "+", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "1", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "\"a'\"", ",", "\"e'\"", ",", "\"o'\"", ",", "\"i'\"", ",", "\"u'\"", ",", "\"A'\"", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "in", "[", "\"'ee\"", ",", "\"'ei\"", "]", ":", "return", "[", "[", "\"'i\"", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "3", "]", "in", "[", "\"'oo\"", ",", "\"'ou\"", "]", ":", "return", "[", "[", "\"'u\"", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "3", ":", "]", ")", "]", "elif", "word", "[", ":", "2", "]", "in", "[", "\"'a\"", ",", "\"'e\"", ",", "\"'o\"", ",", "\"'i\"", ",", "\"'u\"", ",", "\"'A\"", "]", ":", "return", "[", "[", "word", "[", ":", "2", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "elif", "len", "(", "word", ")", ">=", "2", "and", "word", "[", "0", "]", "==", "word", "[", "1", "]", ":", "return", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "2", ":", "]", ")", "]", "else", ":", "return", "[", "[", "word", "[", "0", "]", "]", "+", "i", "for", "i", "in", "variations", "(", "word", "[", "1", ":", "]", ")", "]" ]
37.571429
0.000463
[ "def variations(word):\n", " \"\"\"Create variations of the word based on letter combinations like oo,\n", "sh, etc.\"\"\"\n", "\n", " if len(word) == 1:\n", " return [[word[0]]]\n", " elif word == 'aa':\n", " return [['A']]\n", " elif word == 'ee':\n", " return [['i']]\n", " elif word == 'ei':\n", " return [['ei']]\n", " elif word in ['oo', 'ou']:\n", " return [['u']]\n", " elif word == 'kha':\n", " return [['kha'], ['kh', 'a']]\n", " elif word in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:\n", " return [[word]]\n", " elif word in [\"'ee\", \"'ei\"]:\n", " return [[\"'i\"]]\n", " elif word in [\"'oo\", \"'ou\"]:\n", " return [[\"'u\"]]\n", " elif word in [\"a'\", \"e'\", \"o'\", \"i'\", \"u'\", \"A'\"]:\n", " return [[word[0] + \"'\"]]\n", " elif word in [\"'a\", \"'e\", \"'o\", \"'i\", \"'u\", \"'A\"]:\n", " return [[\"'\" + word[1]]]\n", " elif len(word) == 2 and word[0] == word[1]:\n", " return [[word[0]]]\n", "\n", " if word[:2] == 'aa':\n", " return [['A'] + i for i in variations(word[2:])]\n", " elif word[:2] == 'ee':\n", " return [['i'] + i for i in variations(word[2:])]\n", " elif word[:2] in ['oo', 'ou']:\n", " return [['u'] + i for i in variations(word[2:])]\n", " elif word[:3] == 'kha':\n", " return \\\n", " [['kha'] + i for i in variations(word[3:])] + \\\n", " [['kh', 'a'] + i for i in variations(word[3:])] + \\\n", " [['k', 'h', 'a'] + i for i in variations(word[3:])]\n", " elif word[:2] in ['kh', 'gh', 'ch', 'sh', 'zh', 'ck']:\n", " return \\\n", " [[word[:2]] + i for i in variations(word[2:])] + \\\n", " [[word[0]] + i for i in variations(word[1:])]\n", " elif word[:2] in [\"a'\", \"e'\", \"o'\", \"i'\", \"u'\", \"A'\"]:\n", " return [[word[:2]] + i for i in variations(word[2:])]\n", " elif word[:3] in [\"'ee\", \"'ei\"]:\n", " return [[\"'i\"] + i for i in variations(word[3:])]\n", " elif word[:3] in [\"'oo\", \"'ou\"]:\n", " return [[\"'u\"] + i for i in variations(word[3:])]\n", " elif word[:2] in [\"'a\", \"'e\", \"'o\", \"'i\", \"'u\", \"'A\"]:\n", " return [[word[:2]] + i for i in variations(word[2:])]\n", " elif len(word) >= 2 and word[0] == word[1]:\n", " return [[word[0]] + i for i in variations(word[2:])]\n", " else:\n", " return [[word[0]] + i for i in variations(word[1:])]" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.016666666666666666 ]
56
0.000298
def tokenizer(text): """A lexical analyzer for the `mwtab` formatted files. :param str text: `mwtab` formatted text. :return: Tuples of data. :rtype: py:class:`~collections.namedtuple` """ stream = deque(text.split("\n")) while len(stream) > 0: line = stream.popleft() if line.startswith("#METABOLOMICS WORKBENCH"): yield KeyValue("#METABOLOMICS WORKBENCH", "\n") yield KeyValue("HEADER", line) for identifier in line.split(" "): if ":" in identifier: key, value = identifier.split(":") yield KeyValue(key, value) elif line.startswith("#ANALYSIS TYPE"): yield KeyValue("HEADER", line) elif line.startswith("#SUBJECT_SAMPLE_FACTORS:"): yield KeyValue("#ENDSECTION", "\n") yield KeyValue("#SUBJECT_SAMPLE_FACTORS", "\n") elif line.startswith("#"): yield KeyValue("#ENDSECTION", "\n") yield KeyValue(line.strip(), "\n") elif line.startswith("SUBJECT_SAMPLE_FACTORS"): key, subject_type, local_sample_id, factors, additional_sample_data = line.split("\t") # factors = [dict([[i.strip() for i in f.split(":")]]) for f in factors.split("|")] yield SubjectSampleFactors(key.strip(), subject_type, local_sample_id, factors, additional_sample_data) elif line.endswith("_START"): yield KeyValue(line, "\n") while not line.endswith("_END"): line = stream.popleft() if line.endswith("_END"): yield KeyValue(line.strip(), "\n") else: data = line.split("\t") yield KeyValue(data[0], tuple(data)) else: if line: if line.startswith("MS:MS_RESULTS_FILE") or line.startswith("NM:NMR_RESULTS_FILE"): try: key, value, extra = line.split("\t") extra_key, extra_value = extra.strip().split(":") yield KeyValueExtra(key.strip()[3:], value, extra_key, extra_value) except ValueError: key, value = line.split("\t") yield KeyValue(key.strip()[3:], value) else: try: key, value = line.split("\t") if ":" in key: if key.startswith("MS_METABOLITE_DATA:UNITS"): yield KeyValue(key.strip(), value) else: yield KeyValue(key.strip()[3:], value) else: yield KeyValue(key.strip(), value) except ValueError: print("LINE WITH ERROR:\n\t", repr(line)) raise yield KeyValue("#ENDSECTION", "\n") yield KeyValue("!#ENDFILE", "\n")
[ "def", "tokenizer", "(", "text", ")", ":", "stream", "=", "deque", "(", "text", ".", "split", "(", "\"\\n\"", ")", ")", "while", "len", "(", "stream", ")", ">", "0", ":", "line", "=", "stream", ".", "popleft", "(", ")", "if", "line", ".", "startswith", "(", "\"#METABOLOMICS WORKBENCH\"", ")", ":", "yield", "KeyValue", "(", "\"#METABOLOMICS WORKBENCH\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"HEADER\"", ",", "line", ")", "for", "identifier", "in", "line", ".", "split", "(", "\" \"", ")", ":", "if", "\":\"", "in", "identifier", ":", "key", ",", "value", "=", "identifier", ".", "split", "(", "\":\"", ")", "yield", "KeyValue", "(", "key", ",", "value", ")", "elif", "line", ".", "startswith", "(", "\"#ANALYSIS TYPE\"", ")", ":", "yield", "KeyValue", "(", "\"HEADER\"", ",", "line", ")", "elif", "line", ".", "startswith", "(", "\"#SUBJECT_SAMPLE_FACTORS:\"", ")", ":", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"#SUBJECT_SAMPLE_FACTORS\"", ",", "\"\\n\"", ")", "elif", "line", ".", "startswith", "(", "\"#\"", ")", ":", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "line", ".", "strip", "(", ")", ",", "\"\\n\"", ")", "elif", "line", ".", "startswith", "(", "\"SUBJECT_SAMPLE_FACTORS\"", ")", ":", "key", ",", "subject_type", ",", "local_sample_id", ",", "factors", ",", "additional_sample_data", "=", "line", ".", "split", "(", "\"\\t\"", ")", "# factors = [dict([[i.strip() for i in f.split(\":\")]]) for f in factors.split(\"|\")]", "yield", "SubjectSampleFactors", "(", "key", ".", "strip", "(", ")", ",", "subject_type", ",", "local_sample_id", ",", "factors", ",", "additional_sample_data", ")", "elif", "line", ".", "endswith", "(", "\"_START\"", ")", ":", "yield", "KeyValue", "(", "line", ",", "\"\\n\"", ")", "while", "not", "line", ".", "endswith", "(", "\"_END\"", ")", ":", "line", "=", "stream", ".", "popleft", "(", ")", "if", "line", ".", "endswith", "(", "\"_END\"", ")", ":", "yield", "KeyValue", "(", "line", ".", "strip", "(", ")", ",", "\"\\n\"", ")", "else", ":", "data", "=", "line", ".", "split", "(", "\"\\t\"", ")", "yield", "KeyValue", "(", "data", "[", "0", "]", ",", "tuple", "(", "data", ")", ")", "else", ":", "if", "line", ":", "if", "line", ".", "startswith", "(", "\"MS:MS_RESULTS_FILE\"", ")", "or", "line", ".", "startswith", "(", "\"NM:NMR_RESULTS_FILE\"", ")", ":", "try", ":", "key", ",", "value", ",", "extra", "=", "line", ".", "split", "(", "\"\\t\"", ")", "extra_key", ",", "extra_value", "=", "extra", ".", "strip", "(", ")", ".", "split", "(", "\":\"", ")", "yield", "KeyValueExtra", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ",", "extra_key", ",", "extra_value", ")", "except", "ValueError", ":", "key", ",", "value", "=", "line", ".", "split", "(", "\"\\t\"", ")", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ")", "else", ":", "try", ":", "key", ",", "value", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "\":\"", "in", "key", ":", "if", "key", ".", "startswith", "(", "\"MS_METABOLITE_DATA:UNITS\"", ")", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", ",", "value", ")", "else", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", "[", "3", ":", "]", ",", "value", ")", "else", ":", "yield", "KeyValue", "(", "key", ".", "strip", "(", ")", ",", "value", ")", "except", "ValueError", ":", "print", "(", "\"LINE WITH ERROR:\\n\\t\"", ",", "repr", "(", "line", ")", ")", "raise", "yield", "KeyValue", "(", "\"#ENDSECTION\"", ",", "\"\\n\"", ")", "yield", "KeyValue", "(", "\"!#ENDFILE\"", ",", "\"\\n\"", ")" ]
39.466667
0.001978
[ "def tokenizer(text):\n", " \"\"\"A lexical analyzer for the `mwtab` formatted files.\n", "\n", " :param str text: `mwtab` formatted text.\n", " :return: Tuples of data.\n", " :rtype: py:class:`~collections.namedtuple`\n", " \"\"\"\n", "\n", " stream = deque(text.split(\"\\n\"))\n", "\n", " while len(stream) > 0:\n", " line = stream.popleft()\n", "\n", " if line.startswith(\"#METABOLOMICS WORKBENCH\"):\n", " yield KeyValue(\"#METABOLOMICS WORKBENCH\", \"\\n\")\n", " yield KeyValue(\"HEADER\", line)\n", "\n", " for identifier in line.split(\" \"):\n", " if \":\" in identifier:\n", " key, value = identifier.split(\":\")\n", " yield KeyValue(key, value)\n", "\n", " elif line.startswith(\"#ANALYSIS TYPE\"):\n", " yield KeyValue(\"HEADER\", line)\n", "\n", " elif line.startswith(\"#SUBJECT_SAMPLE_FACTORS:\"):\n", " yield KeyValue(\"#ENDSECTION\", \"\\n\")\n", " yield KeyValue(\"#SUBJECT_SAMPLE_FACTORS\", \"\\n\")\n", "\n", " elif line.startswith(\"#\"):\n", " yield KeyValue(\"#ENDSECTION\", \"\\n\")\n", " yield KeyValue(line.strip(), \"\\n\")\n", "\n", " elif line.startswith(\"SUBJECT_SAMPLE_FACTORS\"):\n", " key, subject_type, local_sample_id, factors, additional_sample_data = line.split(\"\\t\")\n", " # factors = [dict([[i.strip() for i in f.split(\":\")]]) for f in factors.split(\"|\")]\n", " yield SubjectSampleFactors(key.strip(), subject_type, local_sample_id, factors, additional_sample_data)\n", "\n", " elif line.endswith(\"_START\"):\n", " yield KeyValue(line, \"\\n\")\n", "\n", " while not line.endswith(\"_END\"):\n", " line = stream.popleft()\n", " if line.endswith(\"_END\"):\n", " yield KeyValue(line.strip(), \"\\n\")\n", " else:\n", " data = line.split(\"\\t\")\n", " yield KeyValue(data[0], tuple(data))\n", "\n", " else:\n", " if line:\n", " if line.startswith(\"MS:MS_RESULTS_FILE\") or line.startswith(\"NM:NMR_RESULTS_FILE\"):\n", " try:\n", " key, value, extra = line.split(\"\\t\")\n", " extra_key, extra_value = extra.strip().split(\":\")\n", " yield KeyValueExtra(key.strip()[3:], value, extra_key, extra_value)\n", " except ValueError:\n", " key, value = line.split(\"\\t\")\n", " yield KeyValue(key.strip()[3:], value)\n", " else:\n", " try:\n", " key, value = line.split(\"\\t\")\n", " if \":\" in key:\n", " if key.startswith(\"MS_METABOLITE_DATA:UNITS\"):\n", " yield KeyValue(key.strip(), value)\n", " else:\n", " yield KeyValue(key.strip()[3:], value)\n", " else:\n", " yield KeyValue(key.strip(), value)\n", " except ValueError:\n", " print(\"LINE WITH ERROR:\\n\\t\", repr(line))\n", " raise\n", "\n", " yield KeyValue(\"#ENDSECTION\", \"\\n\")\n", " yield KeyValue(\"!#ENDFILE\", \"\\n\")" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0.010416666666666666, 0.008620689655172414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703 ]
75
0.001027
def getAttributeName(self, name): '''represents the aname ''' if self.func_aname is None: return name assert callable(self.func_aname), \ 'expecting callable method for attribute func_aname, not %s' %type(self.func_aname) f = self.func_aname return f(name)
[ "def", "getAttributeName", "(", "self", ",", "name", ")", ":", "if", "self", ".", "func_aname", "is", "None", ":", "return", "name", "assert", "callable", "(", "self", ".", "func_aname", ")", ",", "'expecting callable method for attribute func_aname, not %s'", "%", "type", "(", "self", ".", "func_aname", ")", "f", "=", "self", ".", "func_aname", "return", "f", "(", "name", ")" ]
35.555556
0.012195
[ "def getAttributeName(self, name):\n", " '''represents the aname\n", " '''\n", " if self.func_aname is None:\n", " return name\n", " assert callable(self.func_aname), \\\n", " 'expecting callable method for attribute func_aname, not %s' %type(self.func_aname)\n", " f = self.func_aname\n", " return f(name)" ]
[ 0, 0.03125, 0, 0, 0, 0, 0.020833333333333332, 0, 0.045454545454545456 ]
9
0.010838
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None): """Creates the shell""" manage_dict = manage_dict or MANAGE_DICT _vars = globals() _vars.update(locals()) auto_imported = import_objects(manage_dict) if extra_vars: auto_imported.update(extra_vars) _vars.update(auto_imported) msgs = [] if manage_dict['shell']['banner']['enabled']: msgs.append( manage_dict['shell']['banner']['message'].format(**manage_dict) ) if auto_imported and manage_dict['shell']['auto_import']['display']: auto_imported_names = [ key for key in auto_imported.keys() if key not in ['__builtins__', 'builtins'] ] msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names)) banner_msg = u'\n'.join(msgs) exec_init(manage_dict, _vars) exec_init_script(manage_dict, _vars) atexit_functions = [ import_string(func_name) for func_name in manage_dict['shell'].get('exit_hooks', []) ] atexit_functions += exit_hooks or [] for atexit_function in atexit_functions: atexit.register(atexit_function) if console == 'ptpython': try: from ptpython.repl import embed embed({}, _vars) except ImportError: click.echo("ptpython is not installed!") return if console == 'bpython': try: from bpython import embed embed(locals_=_vars, banner=banner_msg) except ImportError: click.echo("bpython is not installed!") return try: if console == 'ipython': from IPython import start_ipython from traitlets.config import Config c = Config() c.TerminalInteractiveShell.banner2 = banner_msg c.InteractiveShellApp.extensions = [ extension for extension in manage_dict['shell'].get('ipython_extensions', []) ] c.InteractiveShellApp.exec_lines = [ exec_line for exec_line in manage_dict['shell'].get('ipython_exec_lines', []) ] if manage_dict['shell'].get('ipython_auto_reload', True) is True: c.InteractiveShellApp.extensions.append('autoreload') c.InteractiveShellApp.exec_lines.append('%autoreload 2') start_ipython(argv=[], user_ns=_vars, config=c) else: raise ImportError except ImportError: if manage_dict['shell']['readline_enabled']: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(_vars).complete) readline.parse_and_bind('tab: complete') shell = code.InteractiveConsole(_vars) shell.interact(banner=banner_msg)
[ "def", "create_shell", "(", "console", ",", "manage_dict", "=", "None", ",", "extra_vars", "=", "None", ",", "exit_hooks", "=", "None", ")", ":", "manage_dict", "=", "manage_dict", "or", "MANAGE_DICT", "_vars", "=", "globals", "(", ")", "_vars", ".", "update", "(", "locals", "(", ")", ")", "auto_imported", "=", "import_objects", "(", "manage_dict", ")", "if", "extra_vars", ":", "auto_imported", ".", "update", "(", "extra_vars", ")", "_vars", ".", "update", "(", "auto_imported", ")", "msgs", "=", "[", "]", "if", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'enabled'", "]", ":", "msgs", ".", "append", "(", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'message'", "]", ".", "format", "(", "*", "*", "manage_dict", ")", ")", "if", "auto_imported", "and", "manage_dict", "[", "'shell'", "]", "[", "'auto_import'", "]", "[", "'display'", "]", ":", "auto_imported_names", "=", "[", "key", "for", "key", "in", "auto_imported", ".", "keys", "(", ")", "if", "key", "not", "in", "[", "'__builtins__'", ",", "'builtins'", "]", "]", "msgs", ".", "append", "(", "'\\tAuto imported: {0}\\n'", ".", "format", "(", "auto_imported_names", ")", ")", "banner_msg", "=", "u'\\n'", ".", "join", "(", "msgs", ")", "exec_init", "(", "manage_dict", ",", "_vars", ")", "exec_init_script", "(", "manage_dict", ",", "_vars", ")", "atexit_functions", "=", "[", "import_string", "(", "func_name", ")", "for", "func_name", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'exit_hooks'", ",", "[", "]", ")", "]", "atexit_functions", "+=", "exit_hooks", "or", "[", "]", "for", "atexit_function", "in", "atexit_functions", ":", "atexit", ".", "register", "(", "atexit_function", ")", "if", "console", "==", "'ptpython'", ":", "try", ":", "from", "ptpython", ".", "repl", "import", "embed", "embed", "(", "{", "}", ",", "_vars", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"ptpython is not installed!\"", ")", "return", "if", "console", "==", "'bpython'", ":", "try", ":", "from", "bpython", "import", "embed", "embed", "(", "locals_", "=", "_vars", ",", "banner", "=", "banner_msg", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"bpython is not installed!\"", ")", "return", "try", ":", "if", "console", "==", "'ipython'", ":", "from", "IPython", "import", "start_ipython", "from", "traitlets", ".", "config", "import", "Config", "c", "=", "Config", "(", ")", "c", ".", "TerminalInteractiveShell", ".", "banner2", "=", "banner_msg", "c", ".", "InteractiveShellApp", ".", "extensions", "=", "[", "extension", "for", "extension", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_extensions'", ",", "[", "]", ")", "]", "c", ".", "InteractiveShellApp", ".", "exec_lines", "=", "[", "exec_line", "for", "exec_line", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_exec_lines'", ",", "[", "]", ")", "]", "if", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_auto_reload'", ",", "True", ")", "is", "True", ":", "c", ".", "InteractiveShellApp", ".", "extensions", ".", "append", "(", "'autoreload'", ")", "c", ".", "InteractiveShellApp", ".", "exec_lines", ".", "append", "(", "'%autoreload 2'", ")", "start_ipython", "(", "argv", "=", "[", "]", ",", "user_ns", "=", "_vars", ",", "config", "=", "c", ")", "else", ":", "raise", "ImportError", "except", "ImportError", ":", "if", "manage_dict", "[", "'shell'", "]", "[", "'readline_enabled'", "]", ":", "import", "readline", "import", "rlcompleter", "readline", ".", "set_completer", "(", "rlcompleter", ".", "Completer", "(", "_vars", ")", ".", "complete", ")", "readline", ".", "parse_and_bind", "(", "'tab: complete'", ")", "shell", "=", "code", ".", "InteractiveConsole", "(", "_vars", ")", "shell", ".", "interact", "(", "banner", "=", "banner_msg", ")" ]
35.679487
0.00035
[ "def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None):\n", " \"\"\"Creates the shell\"\"\"\n", " manage_dict = manage_dict or MANAGE_DICT\n", " _vars = globals()\n", " _vars.update(locals())\n", " auto_imported = import_objects(manage_dict)\n", " if extra_vars:\n", " auto_imported.update(extra_vars)\n", " _vars.update(auto_imported)\n", " msgs = []\n", " if manage_dict['shell']['banner']['enabled']:\n", " msgs.append(\n", " manage_dict['shell']['banner']['message'].format(**manage_dict)\n", " )\n", " if auto_imported and manage_dict['shell']['auto_import']['display']:\n", " auto_imported_names = [\n", " key for key in auto_imported.keys()\n", " if key not in ['__builtins__', 'builtins']\n", " ]\n", " msgs.append('\\tAuto imported: {0}\\n'.format(auto_imported_names))\n", "\n", " banner_msg = u'\\n'.join(msgs)\n", "\n", " exec_init(manage_dict, _vars)\n", " exec_init_script(manage_dict, _vars)\n", "\n", " atexit_functions = [\n", " import_string(func_name) for func_name in\n", " manage_dict['shell'].get('exit_hooks', [])\n", " ]\n", " atexit_functions += exit_hooks or []\n", " for atexit_function in atexit_functions:\n", " atexit.register(atexit_function)\n", "\n", " if console == 'ptpython':\n", " try:\n", " from ptpython.repl import embed\n", " embed({}, _vars)\n", " except ImportError:\n", " click.echo(\"ptpython is not installed!\")\n", " return\n", "\n", " if console == 'bpython':\n", " try:\n", " from bpython import embed\n", " embed(locals_=_vars, banner=banner_msg)\n", " except ImportError:\n", " click.echo(\"bpython is not installed!\")\n", " return\n", "\n", " try:\n", " if console == 'ipython':\n", " from IPython import start_ipython\n", " from traitlets.config import Config\n", " c = Config()\n", " c.TerminalInteractiveShell.banner2 = banner_msg\n", " c.InteractiveShellApp.extensions = [\n", " extension for extension in\n", " manage_dict['shell'].get('ipython_extensions', [])\n", " ]\n", " c.InteractiveShellApp.exec_lines = [\n", " exec_line for exec_line in\n", " manage_dict['shell'].get('ipython_exec_lines', [])\n", " ]\n", " if manage_dict['shell'].get('ipython_auto_reload', True) is True:\n", " c.InteractiveShellApp.extensions.append('autoreload')\n", " c.InteractiveShellApp.exec_lines.append('%autoreload 2')\n", " start_ipython(argv=[], user_ns=_vars, config=c)\n", " else:\n", " raise ImportError\n", " except ImportError:\n", " if manage_dict['shell']['readline_enabled']:\n", " import readline\n", " import rlcompleter\n", " readline.set_completer(rlcompleter.Completer(_vars).complete)\n", " readline.parse_and_bind('tab: complete')\n", " shell = code.InteractiveConsole(_vars)\n", " shell.interact(banner=banner_msg)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025 ]
78
0.000313
def pt2leaf(self, x): """ Get the leaf which domain contains x. """ if self.leafnode: return self else: if x[self.split_dim] < self.split_value: return self.lower.pt2leaf(x) else: return self.greater.pt2leaf(x)
[ "def", "pt2leaf", "(", "self", ",", "x", ")", ":", "if", "self", ".", "leafnode", ":", "return", "self", "else", ":", "if", "x", "[", "self", ".", "split_dim", "]", "<", "self", ".", "split_value", ":", "return", "self", ".", "lower", ".", "pt2leaf", "(", "x", ")", "else", ":", "return", "self", ".", "greater", ".", "pt2leaf", "(", "x", ")" ]
26.333333
0.009174
[ "def pt2leaf(self, x):\n", " \"\"\"\n", " Get the leaf which domain contains x.\n", " \n", " \"\"\"\n", " if self.leafnode:\n", " return self\n", " else:\n", " if x[self.split_dim] < self.split_value:\n", " return self.lower.pt2leaf(x)\n", " else:\n", " return self.greater.pt2leaf(x)" ]
[ 0, 0.08333333333333333, 0, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608 ]
12
0.018015
def cmd_init_pull_from_cloud(args): """Initiate the local catalog by downloading the cloud catalog""" (lcat, ccat) = (args.local_catalog, args.cloud_catalog) logging.info("[init-pull-from-cloud]: %s => %s"%(ccat, lcat)) if isfile(lcat): args.error("[init-pull-from-cloud] The local catalog already exist: %s"%lcat) if not isfile(ccat): args.error("[init-pull-from-cloud] The cloud catalog does not exist: %s"%ccat) (lmeta, cmeta) = ("%s.lrcloud"%lcat, "%s.lrcloud"%ccat) if isfile(lmeta): args.error("[init-pull-from-cloud] The local meta-data already exist: %s"%lmeta) if not isfile(cmeta): args.error("[init-pull-from-cloud] The cloud meta-data does not exist: %s"%cmeta) #Let's "lock" the local catalog logging.info("Locking local catalog: %s"%(lcat)) if not lock_file(lcat): raise RuntimeError("The catalog %s is locked!"%lcat) #Copy base from cloud to local util.copy(ccat, lcat) #Apply changesets cloudDAG = ChangesetDAG(ccat) path = cloudDAG.path(cloudDAG.root.hash, cloudDAG.leafs[0].hash) util.apply_changesets(args, path, lcat) # Write meta-data both to local and cloud mfile = MetaFile(lmeta) utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-4] mfile['catalog']['hash'] = hashsum(lcat) mfile['catalog']['modification_utc'] = utcnow mfile['catalog']['filename'] = lcat mfile['last_push']['filename'] = cloudDAG.leafs[0].mfile['changeset']['filename'] mfile['last_push']['hash'] = cloudDAG.leafs[0].mfile['changeset']['hash'] mfile['last_push']['modification_utc'] = cloudDAG.leafs[0].mfile['changeset']['modification_utc'] mfile.flush() #Let's copy Smart Previews if not args.no_smart_previews: copy_smart_previews(lcat, ccat, local2cloud=False) #Finally, let's unlock the catalog files logging.info("Unlocking local catalog: %s"%(lcat)) unlock_file(lcat) logging.info("[init-pull-from-cloud]: Success!")
[ "def", "cmd_init_pull_from_cloud", "(", "args", ")", ":", "(", "lcat", ",", "ccat", ")", "=", "(", "args", ".", "local_catalog", ",", "args", ".", "cloud_catalog", ")", "logging", ".", "info", "(", "\"[init-pull-from-cloud]: %s => %s\"", "%", "(", "ccat", ",", "lcat", ")", ")", "if", "isfile", "(", "lcat", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The local catalog already exist: %s\"", "%", "lcat", ")", "if", "not", "isfile", "(", "ccat", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The cloud catalog does not exist: %s\"", "%", "ccat", ")", "(", "lmeta", ",", "cmeta", ")", "=", "(", "\"%s.lrcloud\"", "%", "lcat", ",", "\"%s.lrcloud\"", "%", "ccat", ")", "if", "isfile", "(", "lmeta", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The local meta-data already exist: %s\"", "%", "lmeta", ")", "if", "not", "isfile", "(", "cmeta", ")", ":", "args", ".", "error", "(", "\"[init-pull-from-cloud] The cloud meta-data does not exist: %s\"", "%", "cmeta", ")", "#Let's \"lock\" the local catalog", "logging", ".", "info", "(", "\"Locking local catalog: %s\"", "%", "(", "lcat", ")", ")", "if", "not", "lock_file", "(", "lcat", ")", ":", "raise", "RuntimeError", "(", "\"The catalog %s is locked!\"", "%", "lcat", ")", "#Copy base from cloud to local", "util", ".", "copy", "(", "ccat", ",", "lcat", ")", "#Apply changesets", "cloudDAG", "=", "ChangesetDAG", "(", "ccat", ")", "path", "=", "cloudDAG", ".", "path", "(", "cloudDAG", ".", "root", ".", "hash", ",", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "hash", ")", "util", ".", "apply_changesets", "(", "args", ",", "path", ",", "lcat", ")", "# Write meta-data both to local and cloud", "mfile", "=", "MetaFile", "(", "lmeta", ")", "utcnow", "=", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "DATETIME_FORMAT", ")", "[", ":", "-", "4", "]", "mfile", "[", "'catalog'", "]", "[", "'hash'", "]", "=", "hashsum", "(", "lcat", ")", "mfile", "[", "'catalog'", "]", "[", "'modification_utc'", "]", "=", "utcnow", "mfile", "[", "'catalog'", "]", "[", "'filename'", "]", "=", "lcat", "mfile", "[", "'last_push'", "]", "[", "'filename'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'filename'", "]", "mfile", "[", "'last_push'", "]", "[", "'hash'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'hash'", "]", "mfile", "[", "'last_push'", "]", "[", "'modification_utc'", "]", "=", "cloudDAG", ".", "leafs", "[", "0", "]", ".", "mfile", "[", "'changeset'", "]", "[", "'modification_utc'", "]", "mfile", ".", "flush", "(", ")", "#Let's copy Smart Previews", "if", "not", "args", ".", "no_smart_previews", ":", "copy_smart_previews", "(", "lcat", ",", "ccat", ",", "local2cloud", "=", "False", ")", "#Finally, let's unlock the catalog files", "logging", ".", "info", "(", "\"Unlocking local catalog: %s\"", "%", "(", "lcat", ")", ")", "unlock_file", "(", "lcat", ")", "logging", ".", "info", "(", "\"[init-pull-from-cloud]: Success!\"", ")" ]
39.22
0.010945
[ "def cmd_init_pull_from_cloud(args):\n", " \"\"\"Initiate the local catalog by downloading the cloud catalog\"\"\"\n", "\n", " (lcat, ccat) = (args.local_catalog, args.cloud_catalog)\n", " logging.info(\"[init-pull-from-cloud]: %s => %s\"%(ccat, lcat))\n", "\n", " if isfile(lcat):\n", " args.error(\"[init-pull-from-cloud] The local catalog already exist: %s\"%lcat)\n", " if not isfile(ccat):\n", " args.error(\"[init-pull-from-cloud] The cloud catalog does not exist: %s\"%ccat)\n", "\n", " (lmeta, cmeta) = (\"%s.lrcloud\"%lcat, \"%s.lrcloud\"%ccat)\n", " if isfile(lmeta):\n", " args.error(\"[init-pull-from-cloud] The local meta-data already exist: %s\"%lmeta)\n", " if not isfile(cmeta):\n", " args.error(\"[init-pull-from-cloud] The cloud meta-data does not exist: %s\"%cmeta)\n", "\n", " #Let's \"lock\" the local catalog\n", " logging.info(\"Locking local catalog: %s\"%(lcat))\n", " if not lock_file(lcat):\n", " raise RuntimeError(\"The catalog %s is locked!\"%lcat)\n", "\n", " #Copy base from cloud to local\n", " util.copy(ccat, lcat)\n", "\n", " #Apply changesets\n", " cloudDAG = ChangesetDAG(ccat)\n", " path = cloudDAG.path(cloudDAG.root.hash, cloudDAG.leafs[0].hash)\n", " util.apply_changesets(args, path, lcat)\n", "\n", " # Write meta-data both to local and cloud\n", " mfile = MetaFile(lmeta)\n", " utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-4]\n", " mfile['catalog']['hash'] = hashsum(lcat)\n", " mfile['catalog']['modification_utc'] = utcnow\n", " mfile['catalog']['filename'] = lcat\n", " mfile['last_push']['filename'] = cloudDAG.leafs[0].mfile['changeset']['filename']\n", " mfile['last_push']['hash'] = cloudDAG.leafs[0].mfile['changeset']['hash']\n", " mfile['last_push']['modification_utc'] = cloudDAG.leafs[0].mfile['changeset']['modification_utc']\n", " mfile.flush()\n", "\n", " #Let's copy Smart Previews\n", " if not args.no_smart_previews:\n", " copy_smart_previews(lcat, ccat, local2cloud=False)\n", "\n", " #Finally, let's unlock the catalog files\n", " logging.info(\"Unlocking local catalog: %s\"%(lcat))\n", " unlock_file(lcat)\n", "\n", " logging.info(\"[init-pull-from-cloud]: Success!\")" ]
[ 0, 0, 0, 0, 0.015151515151515152, 0, 0, 0.023255813953488372, 0, 0.022988505747126436, 0, 0.03333333333333333, 0, 0.02247191011235955, 0, 0.022222222222222223, 0, 0.027777777777777776, 0.018867924528301886, 0, 0.01639344262295082, 0, 0.02857142857142857, 0, 0, 0.045454545454545456, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0.00980392156862745, 0, 0, 0.03225806451612903, 0, 0, 0, 0.022222222222222223, 0.01818181818181818, 0, 0, 0.019230769230769232 ]
50
0.007796
def avail_locations(call=None): ''' List all available locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) ret = {} conn = get_conn(service='SoftLayer_Product_Package') locations = conn.getLocations(id=50) for location in locations: ret[location['id']] = { 'id': location['id'], 'name': location['name'], 'location': location['longName'], } available = conn.getAvailableLocations(id=50) for location in available: if location.get('isAvailable', 0) is 0: continue ret[location['locationId']]['available'] = True return ret
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "ret", "=", "{", "}", "conn", "=", "get_conn", "(", "service", "=", "'SoftLayer_Product_Package'", ")", "locations", "=", "conn", ".", "getLocations", "(", "id", "=", "50", ")", "for", "location", "in", "locations", ":", "ret", "[", "location", "[", "'id'", "]", "]", "=", "{", "'id'", ":", "location", "[", "'id'", "]", ",", "'name'", ":", "location", "[", "'name'", "]", ",", "'location'", ":", "location", "[", "'longName'", "]", ",", "}", "available", "=", "conn", ".", "getAvailableLocations", "(", "id", "=", "50", ")", "for", "location", "in", "available", ":", "if", "location", ".", "get", "(", "'isAvailable'", ",", "0", ")", "is", "0", ":", "continue", "ret", "[", "location", "[", "'locationId'", "]", "]", "[", "'available'", "]", "=", "True", "return", "ret" ]
27.928571
0.001236
[ "def avail_locations(call=None):\n", " '''\n", " List all available locations\n", " '''\n", " if call == 'action':\n", " raise SaltCloudSystemExit(\n", " 'The avail_locations function must be called with '\n", " '-f or --function, or with the --list-locations option'\n", " )\n", "\n", " ret = {}\n", " conn = get_conn(service='SoftLayer_Product_Package')\n", "\n", " locations = conn.getLocations(id=50)\n", " for location in locations:\n", " ret[location['id']] = {\n", " 'id': location['id'],\n", " 'name': location['name'],\n", " 'location': location['longName'],\n", " }\n", "\n", " available = conn.getAvailableLocations(id=50)\n", " for location in available:\n", " if location.get('isAvailable', 0) is 0:\n", " continue\n", " ret[location['locationId']]['available'] = True\n", "\n", " return ret" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
28
0.002551
def _CamelCaseToSnakeCase(path_name): """Converts a field name from camelCase to snake_case.""" result = [] for c in path_name: if c == '_': raise ParseError('Fail to parse FieldMask: Path name ' '{0} must not contain "_"s.'.format(path_name)) if c.isupper(): result += '_' result += c.lower() else: result += c return ''.join(result)
[ "def", "_CamelCaseToSnakeCase", "(", "path_name", ")", ":", "result", "=", "[", "]", "for", "c", "in", "path_name", ":", "if", "c", "==", "'_'", ":", "raise", "ParseError", "(", "'Fail to parse FieldMask: Path name '", "'{0} must not contain \"_\"s.'", ".", "format", "(", "path_name", ")", ")", "if", "c", ".", "isupper", "(", ")", ":", "result", "+=", "'_'", "result", "+=", "c", ".", "lower", "(", ")", "else", ":", "result", "+=", "c", "return", "''", ".", "join", "(", "result", ")" ]
29.923077
0.022444
[ "def _CamelCaseToSnakeCase(path_name):\n", " \"\"\"Converts a field name from camelCase to snake_case.\"\"\"\n", " result = []\n", " for c in path_name:\n", " if c == '_':\n", " raise ParseError('Fail to parse FieldMask: Path name '\n", " '{0} must not contain \"_\"s.'.format(path_name))\n", " if c.isupper():\n", " result += '_'\n", " result += c.lower()\n", " else:\n", " result += c\n", " return ''.join(result)" ]
[ 0, 0.016666666666666666, 0.07142857142857142, 0.045454545454545456, 0, 0.01639344262295082, 0, 0, 0.05, 0.038461538461538464, 0, 0.05555555555555555, 0.08333333333333333 ]
13
0.029023
def _is_valid_url(url): """ Helper function to validate that URLs are well formed, i.e that it contains a valid protocol and a valid domain. It does not actually check if the URL exists """ try: parsed = urlparse(url) mandatory_parts = [parsed.scheme, parsed.netloc] return all(mandatory_parts) except: return False
[ "def", "_is_valid_url", "(", "url", ")", ":", "try", ":", "parsed", "=", "urlparse", "(", "url", ")", "mandatory_parts", "=", "[", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", "]", "return", "all", "(", "mandatory_parts", ")", "except", ":", "return", "False" ]
39.8
0.012285
[ "def _is_valid_url(url):\n", " \"\"\" Helper function to validate that URLs are well formed, i.e that it contains a valid\n", " protocol and a valid domain. It does not actually check if the URL exists\n", " \"\"\"\n", " try:\n", " parsed = urlparse(url)\n", " mandatory_parts = [parsed.scheme, parsed.netloc]\n", " return all(mandatory_parts)\n", " except:\n", " return False" ]
[ 0, 0.020833333333333332, 0.011627906976744186, 0, 0, 0, 0, 0, 0.0625, 0.041666666666666664 ]
10
0.013663
async def fetch(self, method, url, params=None, headers=None, data=None): """Make an HTTP request. Automatically uses configured HTTP proxy, and adds Google authorization header and cookies. Failures will be retried MAX_RETRIES times before raising NetworkError. Args: method (str): Request method. url (str): Request URL. params (dict): (optional) Request query string parameters. headers (dict): (optional) Request headers. data: (str): (optional) Request body data. Returns: FetchResponse: Response data. Raises: NetworkError: If the request fails. """ logger.debug('Sending request %s %s:\n%r', method, url, data) for retry_num in range(MAX_RETRIES): try: async with self.fetch_raw(method, url, params=params, headers=headers, data=data) as res: async with async_timeout.timeout(REQUEST_TIMEOUT): body = await res.read() logger.debug('Received response %d %s:\n%r', res.status, res.reason, body) except asyncio.TimeoutError: error_msg = 'Request timed out' except aiohttp.ServerDisconnectedError as err: error_msg = 'Server disconnected error: {}'.format(err) except (aiohttp.ClientError, ValueError) as err: error_msg = 'Request connection error: {}'.format(err) else: break logger.info('Request attempt %d failed: %s', retry_num, error_msg) else: logger.info('Request failed after %d attempts', MAX_RETRIES) raise exceptions.NetworkError(error_msg) if res.status != 200: logger.info('Request returned unexpected status: %d %s', res.status, res.reason) raise exceptions.NetworkError( 'Request return unexpected status: {}: {}' .format(res.status, res.reason) ) return FetchResponse(res.status, body)
[ "async", "def", "fetch", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "data", "=", "None", ")", ":", "logger", ".", "debug", "(", "'Sending request %s %s:\\n%r'", ",", "method", ",", "url", ",", "data", ")", "for", "retry_num", "in", "range", "(", "MAX_RETRIES", ")", ":", "try", ":", "async", "with", "self", ".", "fetch_raw", "(", "method", ",", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "as", "res", ":", "async", "with", "async_timeout", ".", "timeout", "(", "REQUEST_TIMEOUT", ")", ":", "body", "=", "await", "res", ".", "read", "(", ")", "logger", ".", "debug", "(", "'Received response %d %s:\\n%r'", ",", "res", ".", "status", ",", "res", ".", "reason", ",", "body", ")", "except", "asyncio", ".", "TimeoutError", ":", "error_msg", "=", "'Request timed out'", "except", "aiohttp", ".", "ServerDisconnectedError", "as", "err", ":", "error_msg", "=", "'Server disconnected error: {}'", ".", "format", "(", "err", ")", "except", "(", "aiohttp", ".", "ClientError", ",", "ValueError", ")", "as", "err", ":", "error_msg", "=", "'Request connection error: {}'", ".", "format", "(", "err", ")", "else", ":", "break", "logger", ".", "info", "(", "'Request attempt %d failed: %s'", ",", "retry_num", ",", "error_msg", ")", "else", ":", "logger", ".", "info", "(", "'Request failed after %d attempts'", ",", "MAX_RETRIES", ")", "raise", "exceptions", ".", "NetworkError", "(", "error_msg", ")", "if", "res", ".", "status", "!=", "200", ":", "logger", ".", "info", "(", "'Request returned unexpected status: %d %s'", ",", "res", ".", "status", ",", "res", ".", "reason", ")", "raise", "exceptions", ".", "NetworkError", "(", "'Request return unexpected status: {}: {}'", ".", "format", "(", "res", ".", "status", ",", "res", ".", "reason", ")", ")", "return", "FetchResponse", "(", "res", ".", "status", ",", "body", ")" ]
41.288462
0.00091
[ "async def fetch(self, method, url, params=None, headers=None, data=None):\n", " \"\"\"Make an HTTP request.\n", "\n", " Automatically uses configured HTTP proxy, and adds Google authorization\n", " header and cookies.\n", "\n", " Failures will be retried MAX_RETRIES times before raising NetworkError.\n", "\n", " Args:\n", " method (str): Request method.\n", " url (str): Request URL.\n", " params (dict): (optional) Request query string parameters.\n", " headers (dict): (optional) Request headers.\n", " data: (str): (optional) Request body data.\n", "\n", " Returns:\n", " FetchResponse: Response data.\n", "\n", " Raises:\n", " NetworkError: If the request fails.\n", " \"\"\"\n", " logger.debug('Sending request %s %s:\\n%r', method, url, data)\n", " for retry_num in range(MAX_RETRIES):\n", " try:\n", " async with self.fetch_raw(method, url, params=params,\n", " headers=headers, data=data) as res:\n", " async with async_timeout.timeout(REQUEST_TIMEOUT):\n", " body = await res.read()\n", " logger.debug('Received response %d %s:\\n%r',\n", " res.status, res.reason, body)\n", " except asyncio.TimeoutError:\n", " error_msg = 'Request timed out'\n", " except aiohttp.ServerDisconnectedError as err:\n", " error_msg = 'Server disconnected error: {}'.format(err)\n", " except (aiohttp.ClientError, ValueError) as err:\n", " error_msg = 'Request connection error: {}'.format(err)\n", " else:\n", " break\n", " logger.info('Request attempt %d failed: %s', retry_num, error_msg)\n", " else:\n", " logger.info('Request failed after %d attempts', MAX_RETRIES)\n", " raise exceptions.NetworkError(error_msg)\n", "\n", " if res.status != 200:\n", " logger.info('Request returned unexpected status: %d %s',\n", " res.status, res.reason)\n", " raise exceptions.NetworkError(\n", " 'Request return unexpected status: {}: {}'\n", " .format(res.status, res.reason)\n", " )\n", "\n", " return FetchResponse(res.status, body)" ]
[ 0, 0.030303030303030304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.021739130434782608 ]
52
0.001001
def _get_config(**kwargs): ''' Return configuration ''' config = { 'filter_id_regex': ['.*!doc_skip'], 'filter_function_regex': [], 'replace_text_regex': {}, 'proccesser': 'highstate_doc.proccesser_markdown', 'max_render_file_size': 10000, 'note': None } if '__salt__' in globals(): config_key = '{0}.config'.format(__virtualname__) config.update(__salt__['config.get'](config_key, {})) # pylint: disable=C0201 for k in set(config.keys()) & set(kwargs.keys()): config[k] = kwargs[k] return config
[ "def", "_get_config", "(", "*", "*", "kwargs", ")", ":", "config", "=", "{", "'filter_id_regex'", ":", "[", "'.*!doc_skip'", "]", ",", "'filter_function_regex'", ":", "[", "]", ",", "'replace_text_regex'", ":", "{", "}", ",", "'proccesser'", ":", "'highstate_doc.proccesser_markdown'", ",", "'max_render_file_size'", ":", "10000", ",", "'note'", ":", "None", "}", "if", "'__salt__'", "in", "globals", "(", ")", ":", "config_key", "=", "'{0}.config'", ".", "format", "(", "__virtualname__", ")", "config", ".", "update", "(", "__salt__", "[", "'config.get'", "]", "(", "config_key", ",", "{", "}", ")", ")", "# pylint: disable=C0201", "for", "k", "in", "set", "(", "config", ".", "keys", "(", ")", ")", "&", "set", "(", "kwargs", ".", "keys", "(", ")", ")", ":", "config", "[", "k", "]", "=", "kwargs", "[", "k", "]", "return", "config" ]
30.842105
0.001656
[ "def _get_config(**kwargs):\n", " '''\n", " Return configuration\n", " '''\n", " config = {\n", " 'filter_id_regex': ['.*!doc_skip'],\n", " 'filter_function_regex': [],\n", " 'replace_text_regex': {},\n", " 'proccesser': 'highstate_doc.proccesser_markdown',\n", " 'max_render_file_size': 10000,\n", " 'note': None\n", " }\n", " if '__salt__' in globals():\n", " config_key = '{0}.config'.format(__virtualname__)\n", " config.update(__salt__['config.get'](config_key, {}))\n", " # pylint: disable=C0201\n", " for k in set(config.keys()) & set(kwargs.keys()):\n", " config[k] = kwargs[k]\n", " return config" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
19
0.003096
def load_json(json_file, **kwargs): """ Open and load data from a JSON file .. code:: python reusables.load_json("example.json") # {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}} :param json_file: Path to JSON file as string :param kwargs: Additional arguments for the json.load command :return: Dictionary """ with open(json_file) as f: return json.load(f, **kwargs)
[ "def", "load_json", "(", "json_file", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "json_file", ")", "as", "f", ":", "return", "json", ".", "load", "(", "f", ",", "*", "*", "kwargs", ")" ]
28.266667
0.002283
[ "def load_json(json_file, **kwargs):\n", " \"\"\"\n", " Open and load data from a JSON file\n", "\n", " .. code:: python\n", "\n", " reusables.load_json(\"example.json\")\n", " # {u'key_1': u'val_1', u'key_for_dict': {u'sub_dict_key': 8}}\n", "\n", " :param json_file: Path to JSON file as string\n", " :param kwargs: Additional arguments for the json.load command\n", " :return: Dictionary\n", " \"\"\"\n", " with open(json_file) as f:\n", " return json.load(f, **kwargs)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703 ]
15
0.001802
def generate_words(files): """ Transform list of files to list of words, removing new line character and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol """ repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''} words_all = [] for i, file in enumerate(files): lines = open(file, 'r') for line in lines: line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line) words = [word for word in line.split("|") if word is not '\n'] words_all.extend(words) return words_all
[ "def", "generate_words", "(", "files", ")", ":", "repls", "=", "{", "'<NE>'", ":", "''", ",", "'</NE>'", ":", "''", ",", "'<AB>'", ":", "''", ",", "'</AB>'", ":", "''", "}", "words_all", "=", "[", "]", "for", "i", ",", "file", "in", "enumerate", "(", "files", ")", ":", "lines", "=", "open", "(", "file", ",", "'r'", ")", "for", "line", "in", "lines", ":", "line", "=", "reduce", "(", "lambda", "a", ",", "kv", ":", "a", ".", "replace", "(", "*", "kv", ")", ",", "repls", ".", "items", "(", ")", ",", "line", ")", "words", "=", "[", "word", "for", "word", "in", "line", ".", "split", "(", "\"|\"", ")", "if", "word", "is", "not", "'\\n'", "]", "words_all", ".", "extend", "(", "words", ")", "return", "words_all" ]
33.764706
0.011864
[ "def generate_words(files):\n", " \"\"\"\n", " Transform list of files to list of words,\n", " removing new line character\n", " and replace name entity '<NE>...</NE>' and abbreviation '<AB>...</AB>' symbol\n", " \"\"\"\n", "\n", " repls = {'<NE>' : '','</NE>' : '','<AB>': '','</AB>': ''}\n", "\n", " words_all = []\n", " for i, file in enumerate(files):\n", " lines = open(file, 'r')\n", " for line in lines:\n", " line = reduce(lambda a, kv: a.replace(*kv), repls.items(), line)\n", " words = [word for word in line.split(\"|\") if word is not '\\n']\n", " words_all.extend(words)\n", " return words_all" ]
[ 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0.08064516129032258, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
17
0.008402
def message(self, message=None): """ Set response message """ if message is not None: self.response_model.message = message return self.response_model.message
[ "def", "message", "(", "self", ",", "message", "=", "None", ")", ":", "if", "message", "is", "not", "None", ":", "self", ".", "response_model", ".", "message", "=", "message", "return", "self", ".", "response_model", ".", "message" ]
38.8
0.010101
[ "def message(self, message=None):\r\n", " \"\"\" Set response message \"\"\"\r\n", " if message is not None:\r\n", " self.response_model.message = message\r\n", " return self.response_model.message" ]
[ 0, 0.02631578947368421, 0, 0, 0.023809523809523808 ]
5
0.010025
def getArguments(parser): "Provides additional validation of the arguments collected by argparse." args = parser.parse_args() if not '{}' in args.output: raise argparse.ArgumentError(args.output, 'The output argument string must contain the sequence "{}".') return args
[ "def", "getArguments", "(", "parser", ")", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "not", "'{}'", "in", "args", ".", "output", ":", "raise", "argparse", ".", "ArgumentError", "(", "args", ".", "output", ",", "'The output argument string must contain the sequence \"{}\".'", ")", "return", "args" ]
48
0.010239
[ "def getArguments(parser):\n", " \"Provides additional validation of the arguments collected by argparse.\"\n", " args = parser.parse_args()\n", " if not '{}' in args.output:\n", " raise argparse.ArgumentError(args.output, 'The output argument string must contain the sequence \"{}\".')\n", " return args" ]
[ 0, 0, 0, 0.03125, 0.008928571428571428, 0.06666666666666667 ]
6
0.017808
def srfrec(body, longitude, latitude): """ Convert planetocentric latitude and longitude of a surface point on a specified body to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfrec_c.html :param body: NAIF integer code of an extended body. :type body: int :param longitude: Longitude of point in radians. :type longitude: float :param latitude: Latitude of point in radians. :type latitude: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ body = ctypes.c_int(body) longitude = ctypes.c_double(longitude) latitude = ctypes.c_double(latitude) rectan = stypes.emptyDoubleVector(3) libspice.srfrec_c(body, longitude, latitude, rectan) return stypes.cVectorToPython(rectan)
[ "def", "srfrec", "(", "body", ",", "longitude", ",", "latitude", ")", ":", "body", "=", "ctypes", ".", "c_int", "(", "body", ")", "longitude", "=", "ctypes", ".", "c_double", "(", "longitude", ")", "latitude", "=", "ctypes", ".", "c_double", "(", "latitude", ")", "rectan", "=", "stypes", ".", "emptyDoubleVector", "(", "3", ")", "libspice", ".", "srfrec_c", "(", "body", ",", "longitude", ",", "latitude", ",", "rectan", ")", "return", "stypes", ".", "cVectorToPython", "(", "rectan", ")" ]
36.636364
0.001209
[ "def srfrec(body, longitude, latitude):\n", " \"\"\"\n", " Convert planetocentric latitude and longitude of a surface\n", " point on a specified body to rectangular coordinates.\n", "\n", " http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfrec_c.html\n", "\n", " :param body: NAIF integer code of an extended body.\n", " :type body: int\n", " :param longitude: Longitude of point in radians.\n", " :type longitude: float\n", " :param latitude: Latitude of point in radians.\n", " :type latitude: float\n", " :return: Rectangular coordinates of the point.\n", " :rtype: 3-Element Array of floats\n", " \"\"\"\n", " body = ctypes.c_int(body)\n", " longitude = ctypes.c_double(longitude)\n", " latitude = ctypes.c_double(latitude)\n", " rectan = stypes.emptyDoubleVector(3)\n", " libspice.srfrec_c(body, longitude, latitude, rectan)\n", " return stypes.cVectorToPython(rectan)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025 ]
22
0.001109
def connected_components(G): """ Check if G is connected and return list of sets. Every set contains all vertices in one connected component. """ result = [] vertices = set(G.vertices) while vertices: n = vertices.pop() group = {n} queue = Queue() queue.put(n) while not queue.empty(): n = queue.get() neighbors = set(G.vertices[n]) neighbors.difference_update(group) vertices.difference_update(neighbors) group.update(neighbors) for element in neighbors: queue.put(element) result.append(group) return result
[ "def", "connected_components", "(", "G", ")", ":", "result", "=", "[", "]", "vertices", "=", "set", "(", "G", ".", "vertices", ")", "while", "vertices", ":", "n", "=", "vertices", ".", "pop", "(", ")", "group", "=", "{", "n", "}", "queue", "=", "Queue", "(", ")", "queue", ".", "put", "(", "n", ")", "while", "not", "queue", ".", "empty", "(", ")", ":", "n", "=", "queue", ".", "get", "(", ")", "neighbors", "=", "set", "(", "G", ".", "vertices", "[", "n", "]", ")", "neighbors", ".", "difference_update", "(", "group", ")", "vertices", ".", "difference_update", "(", "neighbors", ")", "group", ".", "update", "(", "neighbors", ")", "for", "element", "in", "neighbors", ":", "queue", ".", "put", "(", "element", ")", "result", ".", "append", "(", "group", ")", "return", "result" ]
30.772727
0.001433
[ "def connected_components(G):\r\n", " \"\"\"\r\n", " Check if G is connected and return list of sets. Every\r\n", " set contains all vertices in one connected component.\r\n", " \"\"\"\r\n", " result = []\r\n", " vertices = set(G.vertices)\r\n", " while vertices:\r\n", " n = vertices.pop()\r\n", " group = {n}\r\n", " queue = Queue()\r\n", " queue.put(n)\r\n", " while not queue.empty():\r\n", " n = queue.get()\r\n", " neighbors = set(G.vertices[n])\r\n", " neighbors.difference_update(group)\r\n", " vertices.difference_update(neighbors)\r\n", " group.update(neighbors)\r\n", " for element in neighbors:\r\n", " queue.put(element)\r\n", " result.append(group)\r\n", " return result" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
22
0.002674
def fuse_list( mafs ): """ Try to fuse a list of blocks by progressively fusing each adjacent pair. """ last = None for m in mafs: if last is None: last = m else: fused = fuse( last, m ) if fused: last = fused else: yield last last = m if last: yield last
[ "def", "fuse_list", "(", "mafs", ")", ":", "last", "=", "None", "for", "m", "in", "mafs", ":", "if", "last", "is", "None", ":", "last", "=", "m", "else", ":", "fused", "=", "fuse", "(", "last", ",", "m", ")", "if", "fused", ":", "last", "=", "fused", "else", ":", "yield", "last", "last", "=", "m", "if", "last", ":", "yield", "last" ]
22.529412
0.012531
[ "def fuse_list( mafs ):\n", " \"\"\"\n", " Try to fuse a list of blocks by progressively fusing each adjacent pair.\n", " \"\"\"\n", " last = None\n", " for m in mafs:\n", " if last is None:\n", " last = m\n", " else:\n", " fused = fuse( last, m )\n", " if fused:\n", " last = fused\n", " else:\n", " yield last\n", " last = m\n", " if last:\n", " yield last" ]
[ 0.08695652173913043, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
17
0.011651
def replace(input, **params): """ Replaces field value :param input: :param params: :return: """ PARAM_REPLACE_LIST = 'replace' REPLACE_FIELD = 'field' REPLACE_FIND_VALUE = 'value.to_find' REPLACE_WITH_VALUE = 'value.replace_with' replace_list = params.get(PARAM_REPLACE_LIST) for row in input: for replace in replace_list: if row[replace[REPLACE_FIELD]] == replace[REPLACE_FIND_VALUE]: row[replace[REPLACE_FIELD]] = replace[REPLACE_WITH_VALUE] return input
[ "def", "replace", "(", "input", ",", "*", "*", "params", ")", ":", "PARAM_REPLACE_LIST", "=", "'replace'", "REPLACE_FIELD", "=", "'field'", "REPLACE_FIND_VALUE", "=", "'value.to_find'", "REPLACE_WITH_VALUE", "=", "'value.replace_with'", "replace_list", "=", "params", ".", "get", "(", "PARAM_REPLACE_LIST", ")", "for", "row", "in", "input", ":", "for", "replace", "in", "replace_list", ":", "if", "row", "[", "replace", "[", "REPLACE_FIELD", "]", "]", "==", "replace", "[", "REPLACE_FIND_VALUE", "]", ":", "row", "[", "replace", "[", "REPLACE_FIELD", "]", "]", "=", "replace", "[", "REPLACE_WITH_VALUE", "]", "return", "input" ]
29.388889
0.001832
[ "def replace(input, **params):\n", " \"\"\"\n", " Replaces field value\n", " :param input:\n", " :param params:\n", " :return:\n", " \"\"\"\n", " PARAM_REPLACE_LIST = 'replace'\n", " REPLACE_FIELD = 'field'\n", " REPLACE_FIND_VALUE = 'value.to_find'\n", " REPLACE_WITH_VALUE = 'value.replace_with'\n", "\n", " replace_list = params.get(PARAM_REPLACE_LIST)\n", " for row in input:\n", " for replace in replace_list:\n", " if row[replace[REPLACE_FIELD]] == replace[REPLACE_FIND_VALUE]:\n", " row[replace[REPLACE_FIELD]] = replace[REPLACE_WITH_VALUE]\n", " return input" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
18
0.003472
def print_map(self): """Open impact report dialog used to tune report when printing.""" # Check if selected layer is valid impact_layer = self.iface.activeLayer() if impact_layer is None: # noinspection PyCallByClass,PyTypeChecker QMessageBox.warning( self, self.tr('InaSAFE'), self.tr('Please select a valid impact layer before ' 'trying to print.')) return # Get output path from datastore # Fetch report for pdfs report report_path = os.path.dirname(impact_layer.source()) # Get the hazard and exposure definition used in current IF hazard = definition( QgsExpressionContextUtils.projectScope( QgsProject.instance()).variable( 'hazard_keywords__hazard') ) exposure = definition( QgsExpressionContextUtils.projectScope( QgsProject.instance()).variable( 'exposure_keywords__exposure') ) # TODO: temporary hack until Impact Function becomes serializable # need to have impact report standard_impact_report_metadata = ReportMetadata( metadata_dict=standard_impact_report_metadata_pdf) standard_map_report_metadata = ReportMetadata( metadata_dict=update_template_component( component=map_report, hazard=hazard, exposure=exposure )) standard_infographic_report_metadata = ReportMetadata( metadata_dict=update_template_component(infographic_report)) standard_report_metadata = [ standard_impact_report_metadata, standard_map_report_metadata, standard_infographic_report_metadata ] def retrieve_components(tags): products = [] for report_metadata in standard_report_metadata: products += (report_metadata.component_by_tags(tags)) return products def retrieve_paths(products, suffix=None): paths = [] for c in products: path = ImpactReport.absolute_output_path( os.path.join(report_path, 'output'), products, c.key) if isinstance(path, list): for p in path: paths.append(p) elif isinstance(path, dict): for p in list(path.values()): paths.append(p) else: paths.append(path) if suffix: paths = [p for p in paths if p.endswith(suffix)] paths = [p for p in paths if os.path.exists(p)] return paths def wrap_output_paths(paths): """Make sure the file paths can wrap nicely.""" return [p.replace(os.sep, '<wbr>' + os.sep) for p in paths] pdf_products = retrieve_components( [final_product_tag, pdf_product_tag]) pdf_output_paths = retrieve_paths(pdf_products, '.pdf') html_products = retrieve_components( [final_product_tag, html_product_tag]) html_output_paths = retrieve_paths(html_products, '.html') qpt_products = retrieve_components( [final_product_tag, qpt_product_tag]) qpt_output_paths = retrieve_paths(qpt_products, '.qpt') # create message to user status = m.Message( m.Heading(self.tr('Map Creator'), **INFO_STYLE), m.Paragraph(self.tr( 'Your PDF was created....opening using the default PDF ' 'viewer on your system.')), m.ImportantText(self.tr( 'The generated pdfs were saved ' 'as:'))) for path in wrap_output_paths(pdf_output_paths): status.add(m.Paragraph(path)) status.add(m.Paragraph( m.ImportantText(self.tr('The generated htmls were saved as:')))) for path in wrap_output_paths(html_output_paths): status.add(m.Paragraph(path)) status.add(m.Paragraph( m.ImportantText(self.tr('The generated qpts were saved as:')))) for path in wrap_output_paths(qpt_output_paths): status.add(m.Paragraph(path)) send_static_message(self, status) for path in pdf_output_paths: # noinspection PyCallByClass,PyTypeChecker,PyTypeChecker QDesktopServices.openUrl(QUrl.fromLocalFile(path))
[ "def", "print_map", "(", "self", ")", ":", "# Check if selected layer is valid", "impact_layer", "=", "self", ".", "iface", ".", "activeLayer", "(", ")", "if", "impact_layer", "is", "None", ":", "# noinspection PyCallByClass,PyTypeChecker", "QMessageBox", ".", "warning", "(", "self", ",", "self", ".", "tr", "(", "'InaSAFE'", ")", ",", "self", ".", "tr", "(", "'Please select a valid impact layer before '", "'trying to print.'", ")", ")", "return", "# Get output path from datastore", "# Fetch report for pdfs report", "report_path", "=", "os", ".", "path", ".", "dirname", "(", "impact_layer", ".", "source", "(", ")", ")", "# Get the hazard and exposure definition used in current IF", "hazard", "=", "definition", "(", "QgsExpressionContextUtils", ".", "projectScope", "(", "QgsProject", ".", "instance", "(", ")", ")", ".", "variable", "(", "'hazard_keywords__hazard'", ")", ")", "exposure", "=", "definition", "(", "QgsExpressionContextUtils", ".", "projectScope", "(", "QgsProject", ".", "instance", "(", ")", ")", ".", "variable", "(", "'exposure_keywords__exposure'", ")", ")", "# TODO: temporary hack until Impact Function becomes serializable", "# need to have impact report", "standard_impact_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "standard_impact_report_metadata_pdf", ")", "standard_map_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "update_template_component", "(", "component", "=", "map_report", ",", "hazard", "=", "hazard", ",", "exposure", "=", "exposure", ")", ")", "standard_infographic_report_metadata", "=", "ReportMetadata", "(", "metadata_dict", "=", "update_template_component", "(", "infographic_report", ")", ")", "standard_report_metadata", "=", "[", "standard_impact_report_metadata", ",", "standard_map_report_metadata", ",", "standard_infographic_report_metadata", "]", "def", "retrieve_components", "(", "tags", ")", ":", "products", "=", "[", "]", "for", "report_metadata", "in", "standard_report_metadata", ":", "products", "+=", "(", "report_metadata", ".", "component_by_tags", "(", "tags", ")", ")", "return", "products", "def", "retrieve_paths", "(", "products", ",", "suffix", "=", "None", ")", ":", "paths", "=", "[", "]", "for", "c", "in", "products", ":", "path", "=", "ImpactReport", ".", "absolute_output_path", "(", "os", ".", "path", ".", "join", "(", "report_path", ",", "'output'", ")", ",", "products", ",", "c", ".", "key", ")", "if", "isinstance", "(", "path", ",", "list", ")", ":", "for", "p", "in", "path", ":", "paths", ".", "append", "(", "p", ")", "elif", "isinstance", "(", "path", ",", "dict", ")", ":", "for", "p", "in", "list", "(", "path", ".", "values", "(", ")", ")", ":", "paths", ".", "append", "(", "p", ")", "else", ":", "paths", ".", "append", "(", "path", ")", "if", "suffix", ":", "paths", "=", "[", "p", "for", "p", "in", "paths", "if", "p", ".", "endswith", "(", "suffix", ")", "]", "paths", "=", "[", "p", "for", "p", "in", "paths", "if", "os", ".", "path", ".", "exists", "(", "p", ")", "]", "return", "paths", "def", "wrap_output_paths", "(", "paths", ")", ":", "\"\"\"Make sure the file paths can wrap nicely.\"\"\"", "return", "[", "p", ".", "replace", "(", "os", ".", "sep", ",", "'<wbr>'", "+", "os", ".", "sep", ")", "for", "p", "in", "paths", "]", "pdf_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "pdf_product_tag", "]", ")", "pdf_output_paths", "=", "retrieve_paths", "(", "pdf_products", ",", "'.pdf'", ")", "html_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "html_product_tag", "]", ")", "html_output_paths", "=", "retrieve_paths", "(", "html_products", ",", "'.html'", ")", "qpt_products", "=", "retrieve_components", "(", "[", "final_product_tag", ",", "qpt_product_tag", "]", ")", "qpt_output_paths", "=", "retrieve_paths", "(", "qpt_products", ",", "'.qpt'", ")", "# create message to user", "status", "=", "m", ".", "Message", "(", "m", ".", "Heading", "(", "self", ".", "tr", "(", "'Map Creator'", ")", ",", "*", "*", "INFO_STYLE", ")", ",", "m", ".", "Paragraph", "(", "self", ".", "tr", "(", "'Your PDF was created....opening using the default PDF '", "'viewer on your system.'", ")", ")", ",", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated pdfs were saved '", "'as:'", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "pdf_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated htmls were saved as:'", ")", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "html_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "m", ".", "ImportantText", "(", "self", ".", "tr", "(", "'The generated qpts were saved as:'", ")", ")", ")", ")", "for", "path", "in", "wrap_output_paths", "(", "qpt_output_paths", ")", ":", "status", ".", "add", "(", "m", ".", "Paragraph", "(", "path", ")", ")", "send_static_message", "(", "self", ",", "status", ")", "for", "path", "in", "pdf_output_paths", ":", "# noinspection PyCallByClass,PyTypeChecker,PyTypeChecker", "QDesktopServices", ".", "openUrl", "(", "QUrl", ".", "fromLocalFile", "(", "path", ")", ")" ]
37.239669
0.000432
[ "def print_map(self):\n", " \"\"\"Open impact report dialog used to tune report when printing.\"\"\"\n", " # Check if selected layer is valid\n", " impact_layer = self.iface.activeLayer()\n", " if impact_layer is None:\n", " # noinspection PyCallByClass,PyTypeChecker\n", " QMessageBox.warning(\n", " self,\n", " self.tr('InaSAFE'),\n", " self.tr('Please select a valid impact layer before '\n", " 'trying to print.'))\n", " return\n", "\n", " # Get output path from datastore\n", " # Fetch report for pdfs report\n", " report_path = os.path.dirname(impact_layer.source())\n", "\n", " # Get the hazard and exposure definition used in current IF\n", " hazard = definition(\n", " QgsExpressionContextUtils.projectScope(\n", " QgsProject.instance()).variable(\n", " 'hazard_keywords__hazard')\n", " )\n", " exposure = definition(\n", " QgsExpressionContextUtils.projectScope(\n", " QgsProject.instance()).variable(\n", " 'exposure_keywords__exposure')\n", " )\n", "\n", " # TODO: temporary hack until Impact Function becomes serializable\n", " # need to have impact report\n", " standard_impact_report_metadata = ReportMetadata(\n", " metadata_dict=standard_impact_report_metadata_pdf)\n", " standard_map_report_metadata = ReportMetadata(\n", " metadata_dict=update_template_component(\n", " component=map_report,\n", " hazard=hazard,\n", " exposure=exposure\n", " ))\n", " standard_infographic_report_metadata = ReportMetadata(\n", " metadata_dict=update_template_component(infographic_report))\n", "\n", " standard_report_metadata = [\n", " standard_impact_report_metadata,\n", " standard_map_report_metadata,\n", " standard_infographic_report_metadata\n", " ]\n", "\n", " def retrieve_components(tags):\n", " products = []\n", " for report_metadata in standard_report_metadata:\n", " products += (report_metadata.component_by_tags(tags))\n", " return products\n", "\n", " def retrieve_paths(products, suffix=None):\n", " paths = []\n", " for c in products:\n", " path = ImpactReport.absolute_output_path(\n", " os.path.join(report_path, 'output'),\n", " products,\n", " c.key)\n", " if isinstance(path, list):\n", " for p in path:\n", " paths.append(p)\n", " elif isinstance(path, dict):\n", " for p in list(path.values()):\n", " paths.append(p)\n", " else:\n", " paths.append(path)\n", " if suffix:\n", " paths = [p for p in paths if p.endswith(suffix)]\n", "\n", " paths = [p for p in paths if os.path.exists(p)]\n", " return paths\n", "\n", " def wrap_output_paths(paths):\n", " \"\"\"Make sure the file paths can wrap nicely.\"\"\"\n", " return [p.replace(os.sep, '<wbr>' + os.sep) for p in paths]\n", "\n", " pdf_products = retrieve_components(\n", " [final_product_tag, pdf_product_tag])\n", " pdf_output_paths = retrieve_paths(pdf_products, '.pdf')\n", "\n", " html_products = retrieve_components(\n", " [final_product_tag, html_product_tag])\n", " html_output_paths = retrieve_paths(html_products, '.html')\n", "\n", " qpt_products = retrieve_components(\n", " [final_product_tag, qpt_product_tag])\n", " qpt_output_paths = retrieve_paths(qpt_products, '.qpt')\n", "\n", " # create message to user\n", " status = m.Message(\n", " m.Heading(self.tr('Map Creator'), **INFO_STYLE),\n", " m.Paragraph(self.tr(\n", " 'Your PDF was created....opening using the default PDF '\n", " 'viewer on your system.')),\n", " m.ImportantText(self.tr(\n", " 'The generated pdfs were saved '\n", " 'as:')))\n", "\n", " for path in wrap_output_paths(pdf_output_paths):\n", " status.add(m.Paragraph(path))\n", "\n", " status.add(m.Paragraph(\n", " m.ImportantText(self.tr('The generated htmls were saved as:'))))\n", "\n", " for path in wrap_output_paths(html_output_paths):\n", " status.add(m.Paragraph(path))\n", "\n", " status.add(m.Paragraph(\n", " m.ImportantText(self.tr('The generated qpts were saved as:'))))\n", "\n", " for path in wrap_output_paths(qpt_output_paths):\n", " status.add(m.Paragraph(path))\n", "\n", " send_static_message(self, status)\n", "\n", " for path in pdf_output_paths:\n", " # noinspection PyCallByClass,PyTypeChecker,PyTypeChecker\n", " QDesktopServices.openUrl(QUrl.fromLocalFile(path))" ]
[ 0, 0.013333333333333334, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.016129032258064516 ]
121
0.000243
def deploy( config, name, bucket, timeout, memory, description, subnet_ids, security_group_ids ): """ Deploy/Update a function from a project directory """ # options should override config if it is there myname = name or config.name mybucket = bucket or config.bucket mytimeout = timeout or config.timeout mymemory = memory or config.memory mydescription = description or config.description mysubnet_ids = subnet_ids or config.subnet_ids mysecurity_group_ids = security_group_ids or config.security_group_ids vpc_config = {} if mysubnet_ids and mysecurity_group_ids: vpc_config = { 'SubnetIds': mysubnet_ids.split(','), 'SecurityGroupIds': mysecurity_group_ids.split(',') } click.echo('Deploying {} to {}'.format(myname, mybucket)) lambder.deploy_function( myname, mybucket, mytimeout, mymemory, mydescription, vpc_config )
[ "def", "deploy", "(", "config", ",", "name", ",", "bucket", ",", "timeout", ",", "memory", ",", "description", ",", "subnet_ids", ",", "security_group_ids", ")", ":", "# options should override config if it is there", "myname", "=", "name", "or", "config", ".", "name", "mybucket", "=", "bucket", "or", "config", ".", "bucket", "mytimeout", "=", "timeout", "or", "config", ".", "timeout", "mymemory", "=", "memory", "or", "config", ".", "memory", "mydescription", "=", "description", "or", "config", ".", "description", "mysubnet_ids", "=", "subnet_ids", "or", "config", ".", "subnet_ids", "mysecurity_group_ids", "=", "security_group_ids", "or", "config", ".", "security_group_ids", "vpc_config", "=", "{", "}", "if", "mysubnet_ids", "and", "mysecurity_group_ids", ":", "vpc_config", "=", "{", "'SubnetIds'", ":", "mysubnet_ids", ".", "split", "(", "','", ")", ",", "'SecurityGroupIds'", ":", "mysecurity_group_ids", ".", "split", "(", "','", ")", "}", "click", ".", "echo", "(", "'Deploying {} to {}'", ".", "format", "(", "myname", ",", "mybucket", ")", ")", "lambder", ".", "deploy_function", "(", "myname", ",", "mybucket", ",", "mytimeout", ",", "mymemory", ",", "mydescription", ",", "vpc_config", ")" ]
26.833333
0.000999
[ "def deploy(\n", " config,\n", " name,\n", " bucket,\n", " timeout,\n", " memory,\n", " description,\n", " subnet_ids,\n", " security_group_ids\n", "):\n", " \"\"\" Deploy/Update a function from a project directory \"\"\"\n", " # options should override config if it is there\n", " myname = name or config.name\n", " mybucket = bucket or config.bucket\n", " mytimeout = timeout or config.timeout\n", " mymemory = memory or config.memory\n", " mydescription = description or config.description\n", " mysubnet_ids = subnet_ids or config.subnet_ids\n", " mysecurity_group_ids = security_group_ids or config.security_group_ids\n", "\n", " vpc_config = {}\n", " if mysubnet_ids and mysecurity_group_ids:\n", " vpc_config = {\n", " 'SubnetIds': mysubnet_ids.split(','),\n", " 'SecurityGroupIds': mysecurity_group_ids.split(',')\n", " }\n", "\n", " click.echo('Deploying {} to {}'.format(myname, mybucket))\n", " lambder.deploy_function(\n", " myname,\n", " mybucket,\n", " mytimeout,\n", " mymemory,\n", " mydescription,\n", " vpc_config\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2 ]
36
0.005556
def cleanup_dataset(dataset, data_home=None, ext=".zip"): """ Removes the dataset directory and archive file from the data home directory. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- removed : int The number of objects removed from data_home. """ removed = 0 data_home = get_data_home(data_home) # Paths to remove datadir = os.path.join(data_home, dataset) archive = os.path.join(data_home, dataset+ext) # Remove directory and contents if os.path.exists(datadir): shutil.rmtree(datadir) removed += 1 # Remove the archive file if os.path.exists(archive): os.remove(archive) removed += 1 return removed
[ "def", "cleanup_dataset", "(", "dataset", ",", "data_home", "=", "None", ",", "ext", "=", "\".zip\"", ")", ":", "removed", "=", "0", "data_home", "=", "get_data_home", "(", "data_home", ")", "# Paths to remove", "datadir", "=", "os", ".", "path", ".", "join", "(", "data_home", ",", "dataset", ")", "archive", "=", "os", ".", "path", ".", "join", "(", "data_home", ",", "dataset", "+", "ext", ")", "# Remove directory and contents", "if", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "shutil", ".", "rmtree", "(", "datadir", ")", "removed", "+=", "1", "# Remove the archive file", "if", "os", ".", "path", ".", "exists", "(", "archive", ")", ":", "os", ".", "remove", "(", "archive", ")", "removed", "+=", "1", "return", "removed" ]
27.35
0.001765
[ "def cleanup_dataset(dataset, data_home=None, ext=\".zip\"):\n", " \"\"\"\n", " Removes the dataset directory and archive file from the data home directory.\n", "\n", " Parameters\n", " ----------\n", " dataset : str\n", " The name of the dataset; should either be a folder in data home or\n", " specified in the yellowbrick.datasets.DATASETS variable.\n", "\n", " data_home : str, optional\n", " The path on disk where data is stored. If not passed in, it is looked\n", " up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n", "\n", " ext : str, default: \".zip\"\n", " The extension of the archive file.\n", "\n", " Returns\n", " -------\n", " removed : int\n", " The number of objects removed from data_home.\n", " \"\"\"\n", " removed = 0\n", " data_home = get_data_home(data_home)\n", "\n", " # Paths to remove\n", " datadir = os.path.join(data_home, dataset)\n", " archive = os.path.join(data_home, dataset+ext)\n", "\n", " # Remove directory and contents\n", " if os.path.exists(datadir):\n", " shutil.rmtree(datadir)\n", " removed += 1\n", "\n", " # Remove the archive file\n", " if os.path.exists(archive):\n", " os.remove(archive)\n", " removed += 1\n", "\n", " return removed" ]
[ 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
40
0.001698
def submit(self, subreddit, title, text=None, url=None, captcha=None, save=None, send_replies=None, resubmit=None, **kwargs): """Submit a new link to the given subreddit. Accepts either a Subreddit object or a str containing the subreddit's display name. :param resubmit: If True, submit the link even if it has already been submitted. :param save: If True the new Submission will be saved after creation. :param send_replies: If True, inbox replies will be received when people comment on the submission. If set to None, the default of True for text posts and False for link posts will be used. :returns: The newly created Submission object if the reddit instance can access it. Otherwise, return the url to the submission. This function may result in a captcha challenge. PRAW will automatically prompt you for a response. See :ref:`handling-captchas` if you want to manually handle captchas. """ if isinstance(text, six.string_types) == bool(url): raise TypeError('One (and only one) of text or url is required!') data = {'sr': six.text_type(subreddit), 'title': title} if text or text == '': data['kind'] = 'self' data['text'] = text else: data['kind'] = 'link' data['url'] = url if captcha: data.update(captcha) if resubmit is not None: data['resubmit'] = resubmit if save is not None: data['save'] = save if send_replies is not None: data['sendreplies'] = send_replies result = self.request_json(self.config['submit'], data=data, retry_on_error=False) url = result['data']['url'] # Clear the OAuth setting when attempting to fetch the submission if self._use_oauth: self._use_oauth = False if url.startswith(self.config.oauth_url): url = self.config.api_url + url[len(self.config.oauth_url):] try: return self.get_submission(url) except errors.Forbidden: # While the user may be able to submit to a subreddit, # that does not guarantee they have read access. return url
[ "def", "submit", "(", "self", ",", "subreddit", ",", "title", ",", "text", "=", "None", ",", "url", "=", "None", ",", "captcha", "=", "None", ",", "save", "=", "None", ",", "send_replies", "=", "None", ",", "resubmit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "text", ",", "six", ".", "string_types", ")", "==", "bool", "(", "url", ")", ":", "raise", "TypeError", "(", "'One (and only one) of text or url is required!'", ")", "data", "=", "{", "'sr'", ":", "six", ".", "text_type", "(", "subreddit", ")", ",", "'title'", ":", "title", "}", "if", "text", "or", "text", "==", "''", ":", "data", "[", "'kind'", "]", "=", "'self'", "data", "[", "'text'", "]", "=", "text", "else", ":", "data", "[", "'kind'", "]", "=", "'link'", "data", "[", "'url'", "]", "=", "url", "if", "captcha", ":", "data", ".", "update", "(", "captcha", ")", "if", "resubmit", "is", "not", "None", ":", "data", "[", "'resubmit'", "]", "=", "resubmit", "if", "save", "is", "not", "None", ":", "data", "[", "'save'", "]", "=", "save", "if", "send_replies", "is", "not", "None", ":", "data", "[", "'sendreplies'", "]", "=", "send_replies", "result", "=", "self", ".", "request_json", "(", "self", ".", "config", "[", "'submit'", "]", ",", "data", "=", "data", ",", "retry_on_error", "=", "False", ")", "url", "=", "result", "[", "'data'", "]", "[", "'url'", "]", "# Clear the OAuth setting when attempting to fetch the submission", "if", "self", ".", "_use_oauth", ":", "self", ".", "_use_oauth", "=", "False", "if", "url", ".", "startswith", "(", "self", ".", "config", ".", "oauth_url", ")", ":", "url", "=", "self", ".", "config", ".", "api_url", "+", "url", "[", "len", "(", "self", ".", "config", ".", "oauth_url", ")", ":", "]", "try", ":", "return", "self", ".", "get_submission", "(", "url", ")", "except", "errors", ".", "Forbidden", ":", "# While the user may be able to submit to a subreddit,", "# that does not guarantee they have read access.", "return", "url" ]
43.277778
0.001255
[ "def submit(self, subreddit, title, text=None, url=None, captcha=None,\n", " save=None, send_replies=None, resubmit=None, **kwargs):\n", " \"\"\"Submit a new link to the given subreddit.\n", "\n", " Accepts either a Subreddit object or a str containing the subreddit's\n", " display name.\n", "\n", " :param resubmit: If True, submit the link even if it has already been\n", " submitted.\n", " :param save: If True the new Submission will be saved after creation.\n", " :param send_replies: If True, inbox replies will be received when\n", " people comment on the submission. If set to None, the default of\n", " True for text posts and False for link posts will be used.\n", "\n", " :returns: The newly created Submission object if the reddit instance\n", " can access it. Otherwise, return the url to the submission.\n", "\n", " This function may result in a captcha challenge. PRAW will\n", " automatically prompt you for a response. See :ref:`handling-captchas`\n", " if you want to manually handle captchas.\n", "\n", " \"\"\"\n", " if isinstance(text, six.string_types) == bool(url):\n", " raise TypeError('One (and only one) of text or url is required!')\n", " data = {'sr': six.text_type(subreddit),\n", " 'title': title}\n", " if text or text == '':\n", " data['kind'] = 'self'\n", " data['text'] = text\n", " else:\n", " data['kind'] = 'link'\n", " data['url'] = url\n", " if captcha:\n", " data.update(captcha)\n", " if resubmit is not None:\n", " data['resubmit'] = resubmit\n", " if save is not None:\n", " data['save'] = save\n", " if send_replies is not None:\n", " data['sendreplies'] = send_replies\n", " result = self.request_json(self.config['submit'], data=data,\n", " retry_on_error=False)\n", " url = result['data']['url']\n", " # Clear the OAuth setting when attempting to fetch the submission\n", " if self._use_oauth:\n", " self._use_oauth = False\n", " if url.startswith(self.config.oauth_url):\n", " url = self.config.api_url + url[len(self.config.oauth_url):]\n", " try:\n", " return self.get_submission(url)\n", " except errors.Forbidden:\n", " # While the user may be able to submit to a subreddit,\n", " # that does not guarantee they have read access.\n", " return url" ]
[ 0, 0.014084507042253521, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
54
0.001452
def start(self): """ Find the first data entry and prepare to parse. """ while not self.is_start(self.current_tag): self.next() self.new_entry()
[ "def", "start", "(", "self", ")", ":", "while", "not", "self", ".", "is_start", "(", "self", ".", "current_tag", ")", ":", "self", ".", "next", "(", ")", "self", ".", "new_entry", "(", ")" ]
23.75
0.010152
[ "def start(self):\n", " \"\"\"\n", " Find the first data entry and prepare to parse.\n", " \"\"\"\n", "\n", " while not self.is_start(self.current_tag):\n", " self.next()\n", " self.new_entry()" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0.041666666666666664 ]
8
0.015625
def all_options(self): """Returns the set of all options used in all export entries""" items = chain.from_iterable(hosts.values() for hosts in self.data.values()) return set(chain.from_iterable(items))
[ "def", "all_options", "(", "self", ")", ":", "items", "=", "chain", ".", "from_iterable", "(", "hosts", ".", "values", "(", ")", "for", "hosts", "in", "self", ".", "data", ".", "values", "(", ")", ")", "return", "set", "(", "chain", ".", "from_iterable", "(", "items", ")", ")" ]
55.5
0.013333
[ "def all_options(self):\n", " \"\"\"Returns the set of all options used in all export entries\"\"\"\n", " items = chain.from_iterable(hosts.values() for hosts in self.data.values())\n", " return set(chain.from_iterable(items))" ]
[ 0, 0.013888888888888888, 0.011904761904761904, 0.021739130434782608 ]
4
0.011883
def find_visible_elements(driver, selector, by=By.CSS_SELECTOR): """ Finds all WebElements that match a selector and are visible. Similar to webdriver.find_elements. @Params driver - the webdriver object (required) selector - the locator that is used to search the DOM (required) by - the method to search for the locator (Default: By.CSS_SELECTOR) """ elements = driver.find_elements(by=by, value=selector) return [element for element in elements if element.is_displayed()]
[ "def", "find_visible_elements", "(", "driver", ",", "selector", ",", "by", "=", "By", ".", "CSS_SELECTOR", ")", ":", "elements", "=", "driver", ".", "find_elements", "(", "by", "=", "by", ",", "value", "=", "selector", ")", "return", "[", "element", "for", "element", "in", "elements", "if", "element", ".", "is_displayed", "(", ")", "]" ]
45.818182
0.001946
[ "def find_visible_elements(driver, selector, by=By.CSS_SELECTOR):\n", " \"\"\"\n", " Finds all WebElements that match a selector and are visible.\n", " Similar to webdriver.find_elements.\n", " @Params\n", " driver - the webdriver object (required)\n", " selector - the locator that is used to search the DOM (required)\n", " by - the method to search for the locator (Default: By.CSS_SELECTOR)\n", " \"\"\"\n", " elements = driver.find_elements(by=by, value=selector)\n", " return [element for element in elements if element.is_displayed()]" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014285714285714285 ]
11
0.001299
def global_items(self): """Iterate over (key, value) pairs in the ``globals`` table.""" for (k, v) in self.sql('global_dump'): yield (self.unpack(k), self.unpack(v))
[ "def", "global_items", "(", "self", ")", ":", "for", "(", "k", ",", "v", ")", "in", "self", ".", "sql", "(", "'global_dump'", ")", ":", "yield", "(", "self", ".", "unpack", "(", "k", ")", ",", "self", ".", "unpack", "(", "v", ")", ")" ]
47.5
0.010363
[ "def global_items(self):\n", " \"\"\"Iterate over (key, value) pairs in the ``globals`` table.\"\"\"\n", " for (k, v) in self.sql('global_dump'):\n", " yield (self.unpack(k), self.unpack(v))" ]
[ 0, 0.013888888888888888, 0, 0.02 ]
4
0.008472
def parse_java_version(cls, version): """Parses the java version (given a string or Revision object). Handles java version-isms, converting things like '7' -> '1.7' appropriately. Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra versioning information after the second number. :param version: the input version, given as a string or Revision object. :return: the parsed and cleaned version, suitable as a javac -source or -target argument. :rtype: Revision """ conversion = {str(i): '1.{}'.format(i) for i in cls.SUPPORTED_CONVERSION_VERSIONS} if str(version) in conversion: return Revision.lenient(conversion[str(version)]) if not hasattr(version, 'components'): version = Revision.lenient(version) if len(version.components) <= 2: return version return Revision(*version.components[:2])
[ "def", "parse_java_version", "(", "cls", ",", "version", ")", ":", "conversion", "=", "{", "str", "(", "i", ")", ":", "'1.{}'", ".", "format", "(", "i", ")", "for", "i", "in", "cls", ".", "SUPPORTED_CONVERSION_VERSIONS", "}", "if", "str", "(", "version", ")", "in", "conversion", ":", "return", "Revision", ".", "lenient", "(", "conversion", "[", "str", "(", "version", ")", "]", ")", "if", "not", "hasattr", "(", "version", ",", "'components'", ")", ":", "version", "=", "Revision", ".", "lenient", "(", "version", ")", "if", "len", "(", "version", ".", "components", ")", "<=", "2", ":", "return", "version", "return", "Revision", "(", "*", "version", ".", "components", "[", ":", "2", "]", ")" ]
42.142857
0.00884
[ "def parse_java_version(cls, version):\n", " \"\"\"Parses the java version (given a string or Revision object).\n", "\n", " Handles java version-isms, converting things like '7' -> '1.7' appropriately.\n", "\n", " Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra\n", " versioning information after the second number.\n", "\n", " :param version: the input version, given as a string or Revision object.\n", " :return: the parsed and cleaned version, suitable as a javac -source or -target argument.\n", " :rtype: Revision\n", " \"\"\"\n", " conversion = {str(i): '1.{}'.format(i) for i in cls.SUPPORTED_CONVERSION_VERSIONS}\n", " if str(version) in conversion:\n", " return Revision.lenient(conversion[str(version)])\n", "\n", " if not hasattr(version, 'components'):\n", " version = Revision.lenient(version)\n", " if len(version.components) <= 2:\n", " return version\n", " return Revision(*version.components[:2])" ]
[ 0, 0, 0, 0.012195121951219513, 0, 0.010416666666666666, 0, 0, 0, 0.010638297872340425, 0, 0, 0.011494252873563218, 0, 0.017857142857142856, 0, 0, 0.023809523809523808, 0, 0.047619047619047616, 0.022727272727272728 ]
21
0.007465
def drop_table(self, table): """ Drop a table from the MyDB context. ## Arguments * `table` (str): The name of the table to drop. """ job_id = self.submit("DROP TABLE %s"%table, context="MYDB") status = self.monitor(job_id) if status[0] != 5: raise Exception("Couldn't drop table %s"%table)
[ "def", "drop_table", "(", "self", ",", "table", ")", ":", "job_id", "=", "self", ".", "submit", "(", "\"DROP TABLE %s\"", "%", "table", ",", "context", "=", "\"MYDB\"", ")", "status", "=", "self", ".", "monitor", "(", "job_id", ")", "if", "status", "[", "0", "]", "!=", "5", ":", "raise", "Exception", "(", "\"Couldn't drop table %s\"", "%", "table", ")" ]
27.461538
0.01084
[ "def drop_table(self, table):\n", " \"\"\"\n", " Drop a table from the MyDB context.\n", "\n", " ## Arguments\n", "\n", " * `table` (str): The name of the table to drop.\n", "\n", " \"\"\"\n", " job_id = self.submit(\"DROP TABLE %s\"%table, context=\"MYDB\")\n", " status = self.monitor(job_id)\n", " if status[0] != 5:\n", " raise Exception(\"Couldn't drop table %s\"%table)" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0, 0.03389830508474576 ]
13
0.010149
def plfit_lsq(x,y): """ Returns A and B in y=Ax^B http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html """ n = len(x) btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum() bbottom = n*(log(x)**2).sum() - (log(x).sum())**2 b = btop / bbottom a = ( log(y).sum() - b * log(x).sum() ) / n A = exp(a) return A,b
[ "def", "plfit_lsq", "(", "x", ",", "y", ")", ":", "n", "=", "len", "(", "x", ")", "btop", "=", "n", "*", "(", "log", "(", "x", ")", "*", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ")", ".", "sum", "(", ")", "*", "(", "log", "(", "y", ")", ")", ".", "sum", "(", ")", "bbottom", "=", "n", "*", "(", "log", "(", "x", ")", "**", "2", ")", ".", "sum", "(", ")", "-", "(", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "**", "2", "b", "=", "btop", "/", "bbottom", "a", "=", "(", "log", "(", "y", ")", ".", "sum", "(", ")", "-", "b", "*", "log", "(", "x", ")", ".", "sum", "(", ")", ")", "/", "n", "A", "=", "exp", "(", "a", ")", "return", "A", ",", "b" ]
27.615385
0.013477
[ "def plfit_lsq(x,y):\n", " \"\"\"\n", " Returns A and B in y=Ax^B\n", " http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html\n", " \"\"\"\n", " n = len(x)\n", " btop = n * (log(x)*log(y)).sum() - (log(x)).sum()*(log(y)).sum()\n", " bbottom = n*(log(x)**2).sum() - (log(x).sum())**2\n", " b = btop / bbottom\n", " a = ( log(y).sum() - b * log(x).sum() ) / n\n", "\n", " A = exp(a)\n", " return A,b" ]
[ 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664, 0, 0, 0.14285714285714285 ]
13
0.01804
async def validate(state, holdout_glob): """Validate the trained model against holdout games. Args: state: the RL loop State instance. holdout_glob: a glob that matches holdout games. """ if not glob.glob(holdout_glob): print('Glob "{}" didn\'t match any files, skipping validation'.format( holdout_glob)) else: await run( 'python3', 'validate.py', holdout_glob, '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')), '--work_dir={}'.format(fsdb.working_dir()))
[ "async", "def", "validate", "(", "state", ",", "holdout_glob", ")", ":", "if", "not", "glob", ".", "glob", "(", "holdout_glob", ")", ":", "print", "(", "'Glob \"{}\" didn\\'t match any files, skipping validation'", ".", "format", "(", "holdout_glob", ")", ")", "else", ":", "await", "run", "(", "'python3'", ",", "'validate.py'", ",", "holdout_glob", ",", "'--flagfile={}'", ".", "format", "(", "os", ".", "path", ".", "join", "(", "FLAGS", ".", "flags_dir", ",", "'validate.flags'", ")", ")", ",", "'--work_dir={}'", ".", "format", "(", "fsdb", ".", "working_dir", "(", ")", ")", ")" ]
32.875
0.009242
[ "async def validate(state, holdout_glob):\n", " \"\"\"Validate the trained model against holdout games.\n", "\n", " Args:\n", " state: the RL loop State instance.\n", " holdout_glob: a glob that matches holdout games.\n", " \"\"\"\n", "\n", " if not glob.glob(holdout_glob):\n", " print('Glob \"{}\" didn\\'t match any files, skipping validation'.format(\n", " holdout_glob))\n", " else:\n", " await run(\n", " 'python3', 'validate.py', holdout_glob,\n", " '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'validate.flags')),\n", " '--work_dir={}'.format(fsdb.working_dir()))" ]
[ 0, 0.01818181818181818, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0.125, 0, 0, 0.012345679012345678, 0.0196078431372549 ]
16
0.012784
def create_pywbem_ssl_context(): """ Create an SSL context based on what is commonly accepted as the required limitations. This code attempts to create the same context for Python 2 and Python 3 except for the ciphers This list is based on what is currently defined in the Python SSL module create_default_context function This includes: * Disallow SSLV2 and SSLV3 * Allow TLSV1 TLSV1.1, TLSV1.2 * No compression * Single DH Use and Single ECDH use cacerts info is set independently so is not part of our context setter. """ if six.PY2: context = SSL.Context('sslv23') # Many of the flags are not in the M2Crypto source so they were taken # from OpenSSL SSL.h module as flags. SSL.context.set_options(SSL.SSL_OP_NO_SSLv2 | 0x02000000 | # OP_NO_SSLV3 0x00020000 | # OP_NO_COMPRESSION 0x00100000 | # OP_SINGLE_DH_USE 0x00400000 | # OP_CIPHER_SERVER_PREFERENCE 0x00080000) # OP_SINGLE_ECDH_USE else: # The choice for the Python SSL module is whether to use the # create_default directly and possibly have different limits depending # on which version of Python you use or to set the attributes # directly based on a currently used SSL context = SSL.create_default_context(purpose=SSL.Purpose.CLIENT_AUTH) # Variable settings per SSL create_default_context. These are what # the function above sets for Python 3.4 # context = SSLContext(PROTOCOL_SSLv23) # context.options |= OP_NO_SSLv2 # context.options |= OP_NO_SSLv3 # context.options |= getattr(SSL, "OP_NO_COMPRESSION", 0) # context.options |= getattr(SSL, "OP_CIPHER_SERVER_PREFERENCE", 0) # context.options |= getattr(SSL, "OP_SINGLE_DH_USE", 0) # context.options |= getattr(SSL, "OP_SINGLE_ECDH_USE", 0) # context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) return context
[ "def", "create_pywbem_ssl_context", "(", ")", ":", "if", "six", ".", "PY2", ":", "context", "=", "SSL", ".", "Context", "(", "'sslv23'", ")", "# Many of the flags are not in the M2Crypto source so they were taken", "# from OpenSSL SSL.h module as flags.", "SSL", ".", "context", ".", "set_options", "(", "SSL", ".", "SSL_OP_NO_SSLv2", "|", "0x02000000", "|", "# OP_NO_SSLV3", "0x00020000", "|", "# OP_NO_COMPRESSION", "0x00100000", "|", "# OP_SINGLE_DH_USE", "0x00400000", "|", "# OP_CIPHER_SERVER_PREFERENCE", "0x00080000", ")", "# OP_SINGLE_ECDH_USE", "else", ":", "# The choice for the Python SSL module is whether to use the", "# create_default directly and possibly have different limits depending", "# on which version of Python you use or to set the attributes", "# directly based on a currently used SSL", "context", "=", "SSL", ".", "create_default_context", "(", "purpose", "=", "SSL", ".", "Purpose", ".", "CLIENT_AUTH", ")", "# Variable settings per SSL create_default_context. These are what", "# the function above sets for Python 3.4", "# context = SSLContext(PROTOCOL_SSLv23)", "# context.options |= OP_NO_SSLv2", "# context.options |= OP_NO_SSLv3", "# context.options |= getattr(SSL, \"OP_NO_COMPRESSION\", 0)", "# context.options |= getattr(SSL, \"OP_CIPHER_SERVER_PREFERENCE\", 0)", "# context.options |= getattr(SSL, \"OP_SINGLE_DH_USE\", 0)", "# context.options |= getattr(SSL, \"OP_SINGLE_ECDH_USE\", 0)", "# context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)", "return", "context" ]
48.113636
0.000463
[ "def create_pywbem_ssl_context():\n", " \"\"\" Create an SSL context based on what is commonly accepted as the\n", " required limitations. This code attempts to create the same context for\n", " Python 2 and Python 3 except for the ciphers\n", " This list is based on what is currently defined in the Python SSL\n", " module create_default_context function\n", " This includes:\n", "\n", " * Disallow SSLV2 and SSLV3\n", " * Allow TLSV1 TLSV1.1, TLSV1.2\n", " * No compression\n", " * Single DH Use and Single ECDH use\n", " cacerts info is set independently so is not part of our context setter.\n", " \"\"\"\n", "\n", " if six.PY2:\n", " context = SSL.Context('sslv23')\n", " # Many of the flags are not in the M2Crypto source so they were taken\n", " # from OpenSSL SSL.h module as flags.\n", " SSL.context.set_options(SSL.SSL_OP_NO_SSLv2 |\n", " 0x02000000 | # OP_NO_SSLV3\n", " 0x00020000 | # OP_NO_COMPRESSION\n", " 0x00100000 | # OP_SINGLE_DH_USE\n", " 0x00400000 | # OP_CIPHER_SERVER_PREFERENCE\n", " 0x00080000) # OP_SINGLE_ECDH_USE\n", " else:\n", " # The choice for the Python SSL module is whether to use the\n", " # create_default directly and possibly have different limits depending\n", " # on which version of Python you use or to set the attributes\n", " # directly based on a currently used SSL\n", " context = SSL.create_default_context(purpose=SSL.Purpose.CLIENT_AUTH)\n", "\n", " # Variable settings per SSL create_default_context. These are what\n", " # the function above sets for Python 3.4\n", " # context = SSLContext(PROTOCOL_SSLv23)\n", " # context.options |= OP_NO_SSLv2\n", " # context.options |= OP_NO_SSLv3\n", " # context.options |= getattr(SSL, \"OP_NO_COMPRESSION\", 0)\n", " # context.options |= getattr(SSL, \"OP_CIPHER_SERVER_PREFERENCE\", 0)\n", " # context.options |= getattr(SSL, \"OP_SINGLE_DH_USE\", 0)\n", " # context.options |= getattr(SSL, \"OP_SINGLE_ECDH_USE\", 0)\n", " # context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)\n", "\n", " return context" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
44
0.001263
def indexer_receiver(sender, json=None, record=None, index=None, **dummy_kwargs): """Connect to before_record_index signal to transform record for ES.""" if index and index.startswith('grants-'): # Generate suggest field suggestions = [ json.get('code'), json.get('acronym'), json.get('title') ] json['suggest'] = { 'input': [s for s in suggestions if s], 'output': json['title'], 'context': { 'funder': [json['funder']['doi']] }, 'payload': { 'id': json['internal_id'], 'legacy_id': (json['code'] if json.get('program') == 'FP7' else json['internal_id']), 'code': json['code'], 'title': json['title'], 'acronym': json.get('acronym'), 'program': json.get('program'), }, } elif index and index.startswith('funders-'): # Generate suggest field suggestions = json.get('acronyms', []) + [json.get('name')] json['suggest'] = { 'input': [s for s in suggestions if s], 'output': json['name'], 'payload': { 'id': json['doi'] }, }
[ "def", "indexer_receiver", "(", "sender", ",", "json", "=", "None", ",", "record", "=", "None", ",", "index", "=", "None", ",", "*", "*", "dummy_kwargs", ")", ":", "if", "index", "and", "index", ".", "startswith", "(", "'grants-'", ")", ":", "# Generate suggest field", "suggestions", "=", "[", "json", ".", "get", "(", "'code'", ")", ",", "json", ".", "get", "(", "'acronym'", ")", ",", "json", ".", "get", "(", "'title'", ")", "]", "json", "[", "'suggest'", "]", "=", "{", "'input'", ":", "[", "s", "for", "s", "in", "suggestions", "if", "s", "]", ",", "'output'", ":", "json", "[", "'title'", "]", ",", "'context'", ":", "{", "'funder'", ":", "[", "json", "[", "'funder'", "]", "[", "'doi'", "]", "]", "}", ",", "'payload'", ":", "{", "'id'", ":", "json", "[", "'internal_id'", "]", ",", "'legacy_id'", ":", "(", "json", "[", "'code'", "]", "if", "json", ".", "get", "(", "'program'", ")", "==", "'FP7'", "else", "json", "[", "'internal_id'", "]", ")", ",", "'code'", ":", "json", "[", "'code'", "]", ",", "'title'", ":", "json", "[", "'title'", "]", ",", "'acronym'", ":", "json", ".", "get", "(", "'acronym'", ")", ",", "'program'", ":", "json", ".", "get", "(", "'program'", ")", ",", "}", ",", "}", "elif", "index", "and", "index", ".", "startswith", "(", "'funders-'", ")", ":", "# Generate suggest field", "suggestions", "=", "json", ".", "get", "(", "'acronyms'", ",", "[", "]", ")", "+", "[", "json", ".", "get", "(", "'name'", ")", "]", "json", "[", "'suggest'", "]", "=", "{", "'input'", ":", "[", "s", "for", "s", "in", "suggestions", "if", "s", "]", ",", "'output'", ":", "json", "[", "'name'", "]", ",", "'payload'", ":", "{", "'id'", ":", "json", "[", "'doi'", "]", "}", ",", "}" ]
36.25
0.000746
[ "def indexer_receiver(sender, json=None, record=None, index=None,\n", " **dummy_kwargs):\n", " \"\"\"Connect to before_record_index signal to transform record for ES.\"\"\"\n", " if index and index.startswith('grants-'):\n", " # Generate suggest field\n", " suggestions = [\n", " json.get('code'),\n", " json.get('acronym'),\n", " json.get('title')\n", " ]\n", " json['suggest'] = {\n", " 'input': [s for s in suggestions if s],\n", " 'output': json['title'],\n", " 'context': {\n", " 'funder': [json['funder']['doi']]\n", " },\n", " 'payload': {\n", " 'id': json['internal_id'],\n", " 'legacy_id': (json['code'] if json.get('program') == 'FP7'\n", " else json['internal_id']),\n", " 'code': json['code'],\n", " 'title': json['title'],\n", " 'acronym': json.get('acronym'),\n", " 'program': json.get('program'),\n", " },\n", " }\n", " elif index and index.startswith('funders-'):\n", " # Generate suggest field\n", " suggestions = json.get('acronyms', []) + [json.get('name')]\n", " json['suggest'] = {\n", " 'input': [s for s in suggestions if s],\n", " 'output': json['name'],\n", " 'payload': {\n", " 'id': json['doi']\n", " },\n", " }" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111 ]
36
0.003086
def guess_filename(filename): """Guess filename""" if osp.isfile(filename): return filename if not filename.endswith('.py'): filename += '.py' for path in [getcwd_or_home()] + sys.path: fname = osp.join(path, filename) if osp.isfile(fname): return fname elif osp.isfile(fname+'.py'): return fname+'.py' elif osp.isfile(fname+'.pyw'): return fname+'.pyw' return filename
[ "def", "guess_filename", "(", "filename", ")", ":", "if", "osp", ".", "isfile", "(", "filename", ")", ":", "return", "filename", "if", "not", "filename", ".", "endswith", "(", "'.py'", ")", ":", "filename", "+=", "'.py'", "for", "path", "in", "[", "getcwd_or_home", "(", ")", "]", "+", "sys", ".", "path", ":", "fname", "=", "osp", ".", "join", "(", "path", ",", "filename", ")", "if", "osp", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "elif", "osp", ".", "isfile", "(", "fname", "+", "'.py'", ")", ":", "return", "fname", "+", "'.py'", "elif", "osp", ".", "isfile", "(", "fname", "+", "'.pyw'", ")", ":", "return", "fname", "+", "'.pyw'", "return", "filename" ]
31.533333
0.002053
[ "def guess_filename(filename):\r\n", " \"\"\"Guess filename\"\"\"\r\n", " if osp.isfile(filename):\r\n", " return filename\r\n", " if not filename.endswith('.py'):\r\n", " filename += '.py'\r\n", " for path in [getcwd_or_home()] + sys.path:\r\n", " fname = osp.join(path, filename)\r\n", " if osp.isfile(fname):\r\n", " return fname\r\n", " elif osp.isfile(fname+'.py'):\r\n", " return fname+'.py'\r\n", " elif osp.isfile(fname+'.pyw'):\r\n", " return fname+'.pyw'\r\n", " return filename" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842 ]
15
0.003509
def segmenttable_get_by_name(xmldoc, name): """ Retrieve the segmentlists whose name equals name. The result is a segmentlistdict indexed by instrument. The output of this function is not coalesced, each segmentlist contains the segments as found in the segment table. NOTE: this is a light-weight version of the .get_by_name() method of the LigolwSegments class intended for use when the full machinery of that class is not required. Considerably less document validation and error checking is performed by this version. Consider using that method instead if your application will be interfacing with the document via that class anyway. """ # # find required tables # def_table = lsctables.SegmentDefTable.get_table(xmldoc) seg_table = lsctables.SegmentTable.get_table(xmldoc) # # segment_def_id --> instrument names mapping but only for # segment_definer entries bearing the requested name # instrument_index = dict((row.segment_def_id, row.instruments) for row in def_table if row.name == name) # # populate result segmentlistdict object from segment_def_map table # and index # instruments = set(instrument for instruments in instrument_index.values() for instrument in instruments) result = segments.segmentlistdict((instrument, segments.segmentlist()) for instrument in instruments) for row in seg_table: if row.segment_def_id in instrument_index: seg = row.segment for instrument in instrument_index[row.segment_def_id]: result[instrument].append(seg) # # done # return result
[ "def", "segmenttable_get_by_name", "(", "xmldoc", ",", "name", ")", ":", "#", "# find required tables", "#", "def_table", "=", "lsctables", ".", "SegmentDefTable", ".", "get_table", "(", "xmldoc", ")", "seg_table", "=", "lsctables", ".", "SegmentTable", ".", "get_table", "(", "xmldoc", ")", "#", "# segment_def_id --> instrument names mapping but only for", "# segment_definer entries bearing the requested name", "#", "instrument_index", "=", "dict", "(", "(", "row", ".", "segment_def_id", ",", "row", ".", "instruments", ")", "for", "row", "in", "def_table", "if", "row", ".", "name", "==", "name", ")", "#", "# populate result segmentlistdict object from segment_def_map table", "# and index", "#", "instruments", "=", "set", "(", "instrument", "for", "instruments", "in", "instrument_index", ".", "values", "(", ")", "for", "instrument", "in", "instruments", ")", "result", "=", "segments", ".", "segmentlistdict", "(", "(", "instrument", ",", "segments", ".", "segmentlist", "(", ")", ")", "for", "instrument", "in", "instruments", ")", "for", "row", "in", "seg_table", ":", "if", "row", ".", "segment_def_id", "in", "instrument_index", ":", "seg", "=", "row", ".", "segment", "for", "instrument", "in", "instrument_index", "[", "row", ".", "segment_def_id", "]", ":", "result", "[", "instrument", "]", ".", "append", "(", "seg", ")", "#", "# done", "#", "return", "result" ]
31.145833
0.026589
[ "def segmenttable_get_by_name(xmldoc, name):\n", "\t\"\"\"\n", "\tRetrieve the segmentlists whose name equals name. The result is a\n", "\tsegmentlistdict indexed by instrument.\n", "\n", "\tThe output of this function is not coalesced, each segmentlist\n", "\tcontains the segments as found in the segment table.\n", "\n", "\tNOTE: this is a light-weight version of the .get_by_name() method\n", "\tof the LigolwSegments class intended for use when the full\n", "\tmachinery of that class is not required. Considerably less\n", "\tdocument validation and error checking is performed by this\n", "\tversion. Consider using that method instead if your application\n", "\twill be interfacing with the document via that class anyway.\n", "\t\"\"\"\n", "\t#\n", "\t# find required tables\n", "\t#\n", "\n", "\tdef_table = lsctables.SegmentDefTable.get_table(xmldoc)\n", "\tseg_table = lsctables.SegmentTable.get_table(xmldoc)\n", "\n", "\t#\n", "\t# segment_def_id --> instrument names mapping but only for\n", "\t# segment_definer entries bearing the requested name\n", "\t#\n", "\n", "\tinstrument_index = dict((row.segment_def_id, row.instruments) for row in def_table if row.name == name)\n", "\n", "\t#\n", "\t# populate result segmentlistdict object from segment_def_map table\n", "\t# and index\n", "\t#\n", "\n", "\tinstruments = set(instrument for instruments in instrument_index.values() for instrument in instruments)\n", "\tresult = segments.segmentlistdict((instrument, segments.segmentlist()) for instrument in instruments)\n", "\n", "\tfor row in seg_table:\n", "\t\tif row.segment_def_id in instrument_index:\n", "\t\t\tseg = row.segment\n", "\t\t\tfor instrument in instrument_index[row.segment_def_id]:\n", "\t\t\t\tresult[instrument].append(seg)\n", "\n", "\t#\n", "\t# done\n", "\t#\n", "\n", "\treturn result" ]
[ 0, 0.2, 0.014705882352941176, 0.025, 0, 0.015625, 0.018518518518518517, 0, 0.014705882352941176, 0.016666666666666666, 0.01639344262295082, 0.01639344262295082, 0.015151515151515152, 0.016129032258064516, 0.2, 0.3333333333333333, 0.041666666666666664, 0.3333333333333333, 0, 0.017543859649122806, 0.018518518518518517, 0, 0.3333333333333333, 0.016666666666666666, 0.018518518518518517, 0.3333333333333333, 0, 0.01904761904761905, 0, 0.3333333333333333, 0.014492753623188406, 0.07692307692307693, 0.3333333333333333, 0, 0.018867924528301886, 0.019417475728155338, 0, 0.043478260869565216, 0.022222222222222223, 0.047619047619047616, 0.01694915254237288, 0.02857142857142857, 0, 0.3333333333333333, 0.125, 0.3333333333333333, 0, 0.14285714285714285 ]
48
0.081757
def get_aggregate_by_id(self, account_id: str) -> AccountAggregate: """ Returns the aggregate for the given id """ account = self.get_by_id(account_id) return self.get_account_aggregate(account)
[ "def", "get_aggregate_by_id", "(", "self", ",", "account_id", ":", "str", ")", "->", "AccountAggregate", ":", "account", "=", "self", ".", "get_by_id", "(", "account_id", ")", "return", "self", ".", "get_account_aggregate", "(", "account", ")" ]
53.75
0.009174
[ "def get_aggregate_by_id(self, account_id: str) -> AccountAggregate:\n", " \"\"\" Returns the aggregate for the given id \"\"\"\n", " account = self.get_by_id(account_id)\n", " return self.get_account_aggregate(account)" ]
[ 0, 0.01818181818181818, 0, 0.02 ]
4
0.009545
def get_genetic_profiles(study_id, profile_filter=None): """Return all the genetic profiles (data sets) for a given study. Genetic profiles are different types of data for a given study. For instance the study 'cellline_ccle_broad' has profiles such as 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA' for copy number alterations, etc. Parameters ---------- study_id : str The ID of the cBio study. Example: 'paad_icgc' profile_filter : Optional[str] A string used to filter the profiles to return. Will be one of: - MUTATION - MUTATION_EXTENDED - COPY_NUMBER_ALTERATION - MRNA_EXPRESSION - METHYLATION The genetic profiles can include "mutation", "CNA", "rppa", "methylation", etc. Returns ------- genetic_profiles : list[str] A list of genetic profiles available for the given study. """ data = {'cmd': 'getGeneticProfiles', 'cancer_study_id': study_id} df = send_request(**data) res = _filter_data_frame(df, ['genetic_profile_id'], 'genetic_alteration_type', profile_filter) genetic_profiles = list(res['genetic_profile_id'].values()) return genetic_profiles
[ "def", "get_genetic_profiles", "(", "study_id", ",", "profile_filter", "=", "None", ")", ":", "data", "=", "{", "'cmd'", ":", "'getGeneticProfiles'", ",", "'cancer_study_id'", ":", "study_id", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'genetic_profile_id'", "]", ",", "'genetic_alteration_type'", ",", "profile_filter", ")", "genetic_profiles", "=", "list", "(", "res", "[", "'genetic_profile_id'", "]", ".", "values", "(", ")", ")", "return", "genetic_profiles" ]
35.083333
0.00077
[ "def get_genetic_profiles(study_id, profile_filter=None):\n", " \"\"\"Return all the genetic profiles (data sets) for a given study.\n", "\n", " Genetic profiles are different types of data for a given study. For\n", " instance the study 'cellline_ccle_broad' has profiles such as\n", " 'cellline_ccle_broad_mutations' for mutations, 'cellline_ccle_broad_CNA'\n", " for copy number alterations, etc.\n", "\n", " Parameters\n", " ----------\n", " study_id : str\n", " The ID of the cBio study.\n", " Example: 'paad_icgc'\n", " profile_filter : Optional[str]\n", " A string used to filter the profiles to return.\n", " Will be one of:\n", " - MUTATION\n", " - MUTATION_EXTENDED\n", " - COPY_NUMBER_ALTERATION\n", " - MRNA_EXPRESSION\n", " - METHYLATION\n", " The genetic profiles can include \"mutation\", \"CNA\", \"rppa\",\n", " \"methylation\", etc.\n", "\n", " Returns\n", " -------\n", " genetic_profiles : list[str]\n", " A list of genetic profiles available for the given study.\n", " \"\"\"\n", " data = {'cmd': 'getGeneticProfiles',\n", " 'cancer_study_id': study_id}\n", " df = send_request(**data)\n", " res = _filter_data_frame(df, ['genetic_profile_id'],\n", " 'genetic_alteration_type', profile_filter)\n", " genetic_profiles = list(res['genetic_profile_id'].values())\n", " return genetic_profiles" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035 ]
36
0.001029
def retry_request(method, url, headers=None, payload=None, auth=None, tries=10, initial_interval=5, callback=None): """Retry an HTTP request with linear backoff. Returns the response if the status code is < 400 or waits (try * initial_interval) seconds and retries (up to tries times) if it is not. Parameters ---------- method: `str` Method: `GET`, `PUT`, or `POST` url: `str` URL of HTTP request headers: `dict` HTTP headers to supply. payload: `dict` Payload for request; passed as parameters to `GET`, JSON message body for `PUT`/`POST`. auth: `tuple` Authentication tuple for Basic/Digest/Custom HTTP Auth. tries: `int` Number of attempts to make. Defaults to `10`. initial_interval: `int` Interval between first and second try, and amount of time added before each successive attempt is made. Defaults to `5`. callback : callable A callable (function) object that is called each time a retry is needed. The callable has a keyword argument signature: - ``n``: number of tries completed (integer). - ``remaining``: number of tries remaining (integer). - ``status``: HTTP status of the previous call. - ``content``: body content of the previous call. Returns ------- :class:`requests.Response` The final HTTP Response received. Raises ------ :class:`apikit.BackendError` The `status_code` will be `500`, and the reason `Internal Server Error`. Its `content` will be diagnostic of the last response received. """ method = method.lower() attempt = 1 while True: if method == "get": resp = requests.get(url, headers=headers, params=payload, auth=auth) elif method == "put" or method == "post": resp = requests.put(url, headers=headers, json=payload, auth=auth) else: raise_ise("Bad method %s: must be 'get', 'put', or 'post" % method) if resp.status_code < 400: break delay = initial_interval * attempt if attempt >= tries: raise_ise("Failed to '%s' %s after %d attempts." % (method, url, tries) + " Last response was '%d %s' [%s]" % (resp.status_code, resp.reason, resp.text.strip())) if callback is not None: callback(n=attempt, remaining=tries - attempt, status=resp.status_code, content=resp.text.strip()) time.sleep(delay) attempt += 1 return resp
[ "def", "retry_request", "(", "method", ",", "url", ",", "headers", "=", "None", ",", "payload", "=", "None", ",", "auth", "=", "None", ",", "tries", "=", "10", ",", "initial_interval", "=", "5", ",", "callback", "=", "None", ")", ":", "method", "=", "method", ".", "lower", "(", ")", "attempt", "=", "1", "while", "True", ":", "if", "method", "==", "\"get\"", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "params", "=", "payload", ",", "auth", "=", "auth", ")", "elif", "method", "==", "\"put\"", "or", "method", "==", "\"post\"", ":", "resp", "=", "requests", ".", "put", "(", "url", ",", "headers", "=", "headers", ",", "json", "=", "payload", ",", "auth", "=", "auth", ")", "else", ":", "raise_ise", "(", "\"Bad method %s: must be 'get', 'put', or 'post\"", "%", "method", ")", "if", "resp", ".", "status_code", "<", "400", ":", "break", "delay", "=", "initial_interval", "*", "attempt", "if", "attempt", ">=", "tries", ":", "raise_ise", "(", "\"Failed to '%s' %s after %d attempts.\"", "%", "(", "method", ",", "url", ",", "tries", ")", "+", "\" Last response was '%d %s' [%s]\"", "%", "(", "resp", ".", "status_code", ",", "resp", ".", "reason", ",", "resp", ".", "text", ".", "strip", "(", ")", ")", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", "n", "=", "attempt", ",", "remaining", "=", "tries", "-", "attempt", ",", "status", "=", "resp", ".", "status_code", ",", "content", "=", "resp", ".", "text", ".", "strip", "(", ")", ")", "time", ".", "sleep", "(", "delay", ")", "attempt", "+=", "1", "return", "resp" ]
37.267606
0.000368
[ "def retry_request(method, url, headers=None, payload=None, auth=None,\n", " tries=10, initial_interval=5, callback=None):\n", " \"\"\"Retry an HTTP request with linear backoff. Returns the response if\n", " the status code is < 400 or waits (try * initial_interval) seconds and\n", " retries (up to tries times) if it\n", " is not.\n", "\n", " Parameters\n", " ----------\n", " method: `str`\n", " Method: `GET`, `PUT`, or `POST`\n", " url: `str`\n", " URL of HTTP request\n", " headers: `dict`\n", " HTTP headers to supply.\n", " payload: `dict`\n", " Payload for request; passed as parameters to `GET`, JSON message\n", " body for `PUT`/`POST`.\n", " auth: `tuple`\n", " Authentication tuple for Basic/Digest/Custom HTTP Auth.\n", " tries: `int`\n", " Number of attempts to make. Defaults to `10`.\n", " initial_interval: `int`\n", " Interval between first and second try, and amount of time added\n", " before each successive attempt is made. Defaults to `5`.\n", " callback : callable\n", " A callable (function) object that is called each time a retry is\n", " needed. The callable has a keyword argument signature:\n", "\n", " - ``n``: number of tries completed (integer).\n", " - ``remaining``: number of tries remaining (integer).\n", " - ``status``: HTTP status of the previous call.\n", " - ``content``: body content of the previous call.\n", "\n", " Returns\n", " -------\n", " :class:`requests.Response`\n", " The final HTTP Response received.\n", "\n", " Raises\n", " ------\n", " :class:`apikit.BackendError`\n", " The `status_code` will be `500`, and the reason `Internal Server\n", " Error`. Its `content` will be diagnostic of the last response\n", " received.\n", " \"\"\"\n", " method = method.lower()\n", " attempt = 1\n", " while True:\n", " if method == \"get\":\n", " resp = requests.get(url, headers=headers, params=payload,\n", " auth=auth)\n", " elif method == \"put\" or method == \"post\":\n", " resp = requests.put(url, headers=headers, json=payload, auth=auth)\n", " else:\n", " raise_ise(\"Bad method %s: must be 'get', 'put', or 'post\" %\n", " method)\n", " if resp.status_code < 400:\n", " break\n", " delay = initial_interval * attempt\n", " if attempt >= tries:\n", " raise_ise(\"Failed to '%s' %s after %d attempts.\" %\n", " (method, url, tries) +\n", " \" Last response was '%d %s' [%s]\" %\n", " (resp.status_code, resp.reason, resp.text.strip()))\n", " if callback is not None:\n", " callback(n=attempt, remaining=tries - attempt,\n", " status=resp.status_code, content=resp.text.strip())\n", " time.sleep(delay)\n", " attempt += 1\n", " return resp" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667 ]
71
0.000939
def find_offsets(data, ofs): '''find mag offsets by applying Bills "offsets revisited" algorithm on the data This is an implementation of the algorithm from: http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf ''' # a limit on the maximum change in each step max_change = args.max_change # the gain factor for the algorithm gain = args.gain data2 = [] for d in data: d = d.copy() + noise() d.x = float(int(d.x + 0.5)) d.y = float(int(d.y + 0.5)) d.z = float(int(d.z + 0.5)) data2.append(d) data = data2 history_idx = 0 mag_history = data[0:args.history] for i in range(args.history, len(data)): B1 = mag_history[history_idx] + ofs B2 = data[i] + ofs diff = B2 - B1 diff_length = diff.length() if diff_length <= args.min_diff: # the mag vector hasn't changed enough - we don't get any # information from this history_idx = (history_idx+1) % args.history continue mag_history[history_idx] = data[i] history_idx = (history_idx+1) % args.history # equation 6 of Bills paper delta = diff * (gain * (B2.length() - B1.length()) / diff_length) # limit the change from any one reading. This is to prevent # single crazy readings from throwing off the offsets for a long # time delta_length = delta.length() if max_change != 0 and delta_length > max_change: delta *= max_change / delta_length # set the new offsets ofs = ofs - delta if args.verbose: print(ofs) return ofs
[ "def", "find_offsets", "(", "data", ",", "ofs", ")", ":", "# a limit on the maximum change in each step", "max_change", "=", "args", ".", "max_change", "# the gain factor for the algorithm", "gain", "=", "args", ".", "gain", "data2", "=", "[", "]", "for", "d", "in", "data", ":", "d", "=", "d", ".", "copy", "(", ")", "+", "noise", "(", ")", "d", ".", "x", "=", "float", "(", "int", "(", "d", ".", "x", "+", "0.5", ")", ")", "d", ".", "y", "=", "float", "(", "int", "(", "d", ".", "y", "+", "0.5", ")", ")", "d", ".", "z", "=", "float", "(", "int", "(", "d", ".", "z", "+", "0.5", ")", ")", "data2", ".", "append", "(", "d", ")", "data", "=", "data2", "history_idx", "=", "0", "mag_history", "=", "data", "[", "0", ":", "args", ".", "history", "]", "for", "i", "in", "range", "(", "args", ".", "history", ",", "len", "(", "data", ")", ")", ":", "B1", "=", "mag_history", "[", "history_idx", "]", "+", "ofs", "B2", "=", "data", "[", "i", "]", "+", "ofs", "diff", "=", "B2", "-", "B1", "diff_length", "=", "diff", ".", "length", "(", ")", "if", "diff_length", "<=", "args", ".", "min_diff", ":", "# the mag vector hasn't changed enough - we don't get any", "# information from this", "history_idx", "=", "(", "history_idx", "+", "1", ")", "%", "args", ".", "history", "continue", "mag_history", "[", "history_idx", "]", "=", "data", "[", "i", "]", "history_idx", "=", "(", "history_idx", "+", "1", ")", "%", "args", ".", "history", "# equation 6 of Bills paper", "delta", "=", "diff", "*", "(", "gain", "*", "(", "B2", ".", "length", "(", ")", "-", "B1", ".", "length", "(", ")", ")", "/", "diff_length", ")", "# limit the change from any one reading. This is to prevent", "# single crazy readings from throwing off the offsets for a long", "# time", "delta_length", "=", "delta", ".", "length", "(", ")", "if", "max_change", "!=", "0", "and", "delta_length", ">", "max_change", ":", "delta", "*=", "max_change", "/", "delta_length", "# set the new offsets", "ofs", "=", "ofs", "-", "delta", "if", "args", ".", "verbose", ":", "print", "(", "ofs", ")", "return", "ofs" ]
29.280702
0.00058
[ "def find_offsets(data, ofs):\n", " '''find mag offsets by applying Bills \"offsets revisited\" algorithm\n", " on the data\n", "\n", " This is an implementation of the algorithm from:\n", " http://gentlenav.googlecode.com/files/MagnetometerOffsetNullingRevisited.pdf\n", " '''\n", "\n", " # a limit on the maximum change in each step\n", " max_change = args.max_change\n", "\n", " # the gain factor for the algorithm\n", " gain = args.gain\n", "\n", " data2 = []\n", " for d in data:\n", " d = d.copy() + noise()\n", " d.x = float(int(d.x + 0.5))\n", " d.y = float(int(d.y + 0.5))\n", " d.z = float(int(d.z + 0.5))\n", " data2.append(d)\n", " data = data2\n", "\n", " history_idx = 0\n", " mag_history = data[0:args.history]\n", "\n", " for i in range(args.history, len(data)):\n", " B1 = mag_history[history_idx] + ofs\n", " B2 = data[i] + ofs\n", "\n", " diff = B2 - B1\n", " diff_length = diff.length()\n", " if diff_length <= args.min_diff:\n", " # the mag vector hasn't changed enough - we don't get any\n", " # information from this\n", " history_idx = (history_idx+1) % args.history\n", " continue\n", "\n", " mag_history[history_idx] = data[i]\n", " history_idx = (history_idx+1) % args.history\n", "\n", " # equation 6 of Bills paper\n", " delta = diff * (gain * (B2.length() - B1.length()) / diff_length)\n", "\n", " # limit the change from any one reading. This is to prevent\n", " # single crazy readings from throwing off the offsets for a long\n", " # time\n", " delta_length = delta.length()\n", " if max_change != 0 and delta_length > max_change:\n", " delta *= max_change / delta_length\n", "\n", " # set the new offsets\n", " ofs = ofs - delta\n", "\n", " if args.verbose:\n", " print(ofs)\n", " return ofs" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
57
0.001253
def _ReraiseTypeErrorWithFieldName(message_name, field_name): """Re-raise the currently-handled TypeError with the field name added.""" exc = sys.exc_info()[1] if len(exc.args) == 1 and type(exc) is TypeError: # simple TypeError; add field name to exception message exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name)) # re-raise possibly-amended exception with original traceback: six.reraise(type(exc), exc, sys.exc_info()[2])
[ "def", "_ReraiseTypeErrorWithFieldName", "(", "message_name", ",", "field_name", ")", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "len", "(", "exc", ".", "args", ")", "==", "1", "and", "type", "(", "exc", ")", "is", "TypeError", ":", "# simple TypeError; add field name to exception message", "exc", "=", "TypeError", "(", "'%s for field %s.%s'", "%", "(", "str", "(", "exc", ")", ",", "message_name", ",", "field_name", ")", ")", "# re-raise possibly-amended exception with original traceback:", "six", ".", "reraise", "(", "type", "(", "exc", ")", ",", "exc", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")" ]
51.444444
0.014862
[ "def _ReraiseTypeErrorWithFieldName(message_name, field_name):\n", " \"\"\"Re-raise the currently-handled TypeError with the field name added.\"\"\"\n", " exc = sys.exc_info()[1]\n", " if len(exc.args) == 1 and type(exc) is TypeError:\n", " # simple TypeError; add field name to exception message\n", " exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))\n", "\n", " # re-raise possibly-amended exception with original traceback:\n", " six.reraise(type(exc), exc, sys.exc_info()[2])" ]
[ 0, 0.013157894736842105, 0.038461538461538464, 0.019230769230769232, 0, 0.012345679012345678, 0, 0.015384615384615385, 0.041666666666666664 ]
9
0.015583
def ensure_running(self): '''Make sure that semaphore tracker process is running. This can be run from any process. Usually a child process will use the semaphore created by its parent.''' with self._lock: if self._fd is not None: # semaphore tracker was launched before, is it still running? if self._check_alive(): # => still alive return # => dead, launch it again os.close(self._fd) try: # Clean-up to avoid dangling processes. os.waitpid(self._pid, 0) except OSError: # The process was terminated or is a child from an ancestor # of the current process. pass self._fd = None self._pid = None warnings.warn('semaphore_tracker: process died unexpectedly, ' 'relaunching. Some semaphores might leak.') fds_to_pass = [] try: fds_to_pass.append(sys.stderr.fileno()) except Exception: pass r, w = os.pipe() cmd = 'from {} import main; main({}, {})'.format( main.__module__, r, VERBOSE) try: fds_to_pass.append(r) # process will out live us, so no need to wait on pid exe = spawn.get_executable() args = [exe] + util._args_from_interpreter_flags() # In python 3.3, there is a bug which put `-RRRRR..` instead of # `-R` in args. Replace it to get the correct flags. # See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488 if sys.version_info[:2] <= (3, 3): import re for i in range(1, len(args)): args[i] = re.sub("-R+", "-R", args[i]) args += ['-c', cmd] util.debug("launching Semaphore tracker: {}".format(args)) # bpo-33613: Register a signal mask that will block the # signals. This signal mask will be inherited by the child # that is going to be spawned and will protect the child from a # race condition that can make the child die before it # registers signal handlers for SIGINT and SIGTERM. The mask is # unregistered after spawning the child. try: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) pid = spawnv_passfds(exe, args, fds_to_pass) finally: if _HAVE_SIGMASK: signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) except BaseException: os.close(w) raise else: self._fd = w self._pid = pid finally: os.close(r)
[ "def", "ensure_running", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_fd", "is", "not", "None", ":", "# semaphore tracker was launched before, is it still running?", "if", "self", ".", "_check_alive", "(", ")", ":", "# => still alive", "return", "# => dead, launch it again", "os", ".", "close", "(", "self", ".", "_fd", ")", "try", ":", "# Clean-up to avoid dangling processes.", "os", ".", "waitpid", "(", "self", ".", "_pid", ",", "0", ")", "except", "OSError", ":", "# The process was terminated or is a child from an ancestor", "# of the current process.", "pass", "self", ".", "_fd", "=", "None", "self", ".", "_pid", "=", "None", "warnings", ".", "warn", "(", "'semaphore_tracker: process died unexpectedly, '", "'relaunching. Some semaphores might leak.'", ")", "fds_to_pass", "=", "[", "]", "try", ":", "fds_to_pass", ".", "append", "(", "sys", ".", "stderr", ".", "fileno", "(", ")", ")", "except", "Exception", ":", "pass", "r", ",", "w", "=", "os", ".", "pipe", "(", ")", "cmd", "=", "'from {} import main; main({}, {})'", ".", "format", "(", "main", ".", "__module__", ",", "r", ",", "VERBOSE", ")", "try", ":", "fds_to_pass", ".", "append", "(", "r", ")", "# process will out live us, so no need to wait on pid", "exe", "=", "spawn", ".", "get_executable", "(", ")", "args", "=", "[", "exe", "]", "+", "util", ".", "_args_from_interpreter_flags", "(", ")", "# In python 3.3, there is a bug which put `-RRRRR..` instead of", "# `-R` in args. Replace it to get the correct flags.", "# See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<=", "(", "3", ",", "3", ")", ":", "import", "re", "for", "i", "in", "range", "(", "1", ",", "len", "(", "args", ")", ")", ":", "args", "[", "i", "]", "=", "re", ".", "sub", "(", "\"-R+\"", ",", "\"-R\"", ",", "args", "[", "i", "]", ")", "args", "+=", "[", "'-c'", ",", "cmd", "]", "util", ".", "debug", "(", "\"launching Semaphore tracker: {}\"", ".", "format", "(", "args", ")", ")", "# bpo-33613: Register a signal mask that will block the", "# signals. This signal mask will be inherited by the child", "# that is going to be spawned and will protect the child from a", "# race condition that can make the child die before it", "# registers signal handlers for SIGINT and SIGTERM. The mask is", "# unregistered after spawning the child.", "try", ":", "if", "_HAVE_SIGMASK", ":", "signal", ".", "pthread_sigmask", "(", "signal", ".", "SIG_BLOCK", ",", "_IGNORED_SIGNALS", ")", "pid", "=", "spawnv_passfds", "(", "exe", ",", "args", ",", "fds_to_pass", ")", "finally", ":", "if", "_HAVE_SIGMASK", ":", "signal", ".", "pthread_sigmask", "(", "signal", ".", "SIG_UNBLOCK", ",", "_IGNORED_SIGNALS", ")", "except", "BaseException", ":", "os", ".", "close", "(", "w", ")", "raise", "else", ":", "self", ".", "_fd", "=", "w", "self", ".", "_pid", "=", "pid", "finally", ":", "os", ".", "close", "(", "r", ")" ]
43.902778
0.000928
[ "def ensure_running(self):\n", " '''Make sure that semaphore tracker process is running.\n", "\n", " This can be run from any process. Usually a child process will use\n", " the semaphore created by its parent.'''\n", " with self._lock:\n", " if self._fd is not None:\n", " # semaphore tracker was launched before, is it still running?\n", " if self._check_alive():\n", " # => still alive\n", " return\n", " # => dead, launch it again\n", " os.close(self._fd)\n", " try:\n", " # Clean-up to avoid dangling processes.\n", " os.waitpid(self._pid, 0)\n", " except OSError:\n", " # The process was terminated or is a child from an ancestor\n", " # of the current process.\n", " pass\n", " self._fd = None\n", " self._pid = None\n", "\n", " warnings.warn('semaphore_tracker: process died unexpectedly, '\n", " 'relaunching. Some semaphores might leak.')\n", "\n", " fds_to_pass = []\n", " try:\n", " fds_to_pass.append(sys.stderr.fileno())\n", " except Exception:\n", " pass\n", "\n", " r, w = os.pipe()\n", " cmd = 'from {} import main; main({}, {})'.format(\n", " main.__module__, r, VERBOSE)\n", " try:\n", " fds_to_pass.append(r)\n", " # process will out live us, so no need to wait on pid\n", " exe = spawn.get_executable()\n", " args = [exe] + util._args_from_interpreter_flags()\n", " # In python 3.3, there is a bug which put `-RRRRR..` instead of\n", " # `-R` in args. Replace it to get the correct flags.\n", " # See https://github.com/python/cpython/blob/3.3/Lib/subprocess.py#L488\n", " if sys.version_info[:2] <= (3, 3):\n", " import re\n", " for i in range(1, len(args)):\n", " args[i] = re.sub(\"-R+\", \"-R\", args[i])\n", " args += ['-c', cmd]\n", " util.debug(\"launching Semaphore tracker: {}\".format(args))\n", " # bpo-33613: Register a signal mask that will block the\n", " # signals. This signal mask will be inherited by the child\n", " # that is going to be spawned and will protect the child from a\n", " # race condition that can make the child die before it\n", " # registers signal handlers for SIGINT and SIGTERM. The mask is\n", " # unregistered after spawning the child.\n", " try:\n", " if _HAVE_SIGMASK:\n", " signal.pthread_sigmask(signal.SIG_BLOCK,\n", " _IGNORED_SIGNALS)\n", " pid = spawnv_passfds(exe, args, fds_to_pass)\n", " finally:\n", " if _HAVE_SIGMASK:\n", " signal.pthread_sigmask(signal.SIG_UNBLOCK,\n", " _IGNORED_SIGNALS)\n", " except BaseException:\n", " os.close(w)\n", " raise\n", " else:\n", " self._fd = w\n", " self._pid = pid\n", " finally:\n", " os.close(r)" ]
[ 0, 0.015625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035 ]
72
0.000889
def client_detect(self, client, starttime, endtime, threshold, threshold_type, trig_int, plotvar, min_gap=None, daylong=False, parallel_process=True, xcorr_func=None, concurrency=None, cores=None, ignore_length=False, group_size=None, debug=0, return_stream=False, full_peaks=False, save_progress=False, process_cores=None, retries=3, **kwargs): """ Detect using a Tribe of templates within a continuous stream. :type client: `obspy.clients.*.Client` :param client: Any obspy client with a dataselect service. :type starttime: :class:`obspy.core.UTCDateTime` :param starttime: Start-time for detections. :type endtime: :class:`obspy.core.UTCDateTime` :param endtime: End-time for detections :type threshold: float :param threshold: Threshold level, if using `threshold_type='MAD'` then this will be the multiple of the median absolute deviation. :type threshold_type: str :param threshold_type: The type of threshold to be used, can be MAD, absolute or av_chan_corr. See Note on thresholding below. :type trig_int: float :param trig_int: Minimum gap between detections in seconds. If multiple detections occur within trig_int of one-another, the one with the highest cross-correlation sum will be selected. :type plotvar: bool :param plotvar: Turn plotting on or off, see warning about plotting below :type min_gap: float :param min_gap: Minimum gap allowed in data - use to remove traces with known issues :type daylong: bool :param daylong: Set to True to use the :func:`eqcorrscan.utils.pre_processing.dayproc` routine, which preforms additional checks and is more efficient for day-long data over other methods. :type parallel_process: bool :param parallel_process: :type xcorr_func: str or callable :param xcorr_func: A str of a registered xcorr function or a callable for implementing a custom xcorr function. For more information see: :func:`eqcorrscan.utils.correlate.register_array_xcorr` :type concurrency: str :param concurrency: The type of concurrency to apply to the xcorr function. Options are 'multithread', 'multiprocess', 'concurrent'. For more details see :func:`eqcorrscan.utils.correlate.get_stream_xcorr` :type cores: int :param cores: Number of workers for processing and detection. :type ignore_length: bool :param ignore_length: If using daylong=True, then dayproc will try check that the data are there for at least 80% of the day, if you don't want this check (which will raise an error if too much data are missing) then set ignore_length=True. This is not recommended! :type group_size: int :param group_size: Maximum number of templates to run at once, use to reduce memory consumption, if unset will use all templates. :type full_peaks: bool :param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short` :type save_progress: bool :param save_progress: Whether to save the resulting party at every data step or not. Useful for long-running processes. :type process_cores: int :param process_cores: Number of processes to use for pre-processing (if different to `cores`). :type debug: int :param debug: Debug level from 0-5 where five is more output, for debug levels 4 and 5, detections will not be computed in parallel. :type return_stream: bool :param return_stream: Whether to also output the stream downloaded, useful if you plan to use the stream for something else, e.g. lag_calc. :type retries: int :param retries: Number of attempts allowed for downloading - allows for transient server issues. :return: :class:`eqcorrscan.core.match_filter.Party` of Families of detections. .. Note:: Detections are not corrected for `pre-pick`, the detection.detect_time corresponds to the beginning of the earliest template channel at detection. .. warning:: Picks included in the output Party.get_catalog() will not be corrected for pre-picks in the template. .. Note:: Ensures that data overlap between loops, which will lead to no missed detections at data start-stop points (see note for :meth:`eqcorrscan.core.match_filter.Tribe.detect` method). This will result in end-time not being strictly honoured, so detections may occur after the end-time set. This is because data must be run in the correct process-length. .. warning:: Plotting within the match-filter routine uses the Agg backend with interactive plotting turned off. This is because the function is designed to work in bulk. If you wish to turn interactive plotting on you must import matplotlib in your script first, when you then import match_filter you will get the warning that this call to matplotlib has no effect, which will mean that match_filter has not changed the plotting behaviour. .. note:: **Thresholding:** **MAD** threshold is calculated as the: .. math:: threshold {\\times} (median(abs(cccsum))) where :math:`cccsum` is the cross-correlation sum for a given template. **absolute** threshold is a true absolute threshold based on the cccsum value. **av_chan_corr** is based on the mean values of single-channel cross-correlations assuming all data are present as required for the template, e.g: .. math:: av\_chan\_corr\_thresh=threshold \\times (cccsum / len(template)) where :math:`template` is a single template from the input and the length is the number of channels within this template. """ party = Party() buff = 300 # Apply a buffer, often data downloaded is not the correct length data_length = max([t.process_length for t in self.templates]) pad = 0 for template in self.templates: max_delay = (template.st.sort(['starttime'])[-1].stats.starttime - template.st.sort(['starttime'])[0].stats.starttime) if max_delay > pad: pad = max_delay download_groups = int(endtime - starttime) / data_length template_channel_ids = [] for template in self.templates: for tr in template.st: if tr.stats.network not in [None, '']: chan_id = (tr.stats.network,) else: chan_id = ('*',) if tr.stats.station not in [None, '']: chan_id += (tr.stats.station,) else: chan_id += ('*',) if tr.stats.location not in [None, '']: chan_id += (tr.stats.location,) else: chan_id += ('*',) if tr.stats.channel not in [None, '']: if len(tr.stats.channel) == 2: chan_id += (tr.stats.channel[0] + '?' + tr.stats.channel[-1],) else: chan_id += (tr.stats.channel,) else: chan_id += ('*',) template_channel_ids.append(chan_id) template_channel_ids = list(set(template_channel_ids)) if return_stream: stream = Stream() if int(download_groups) < download_groups: download_groups = int(download_groups) + 1 else: download_groups = int(download_groups) for i in range(download_groups): bulk_info = [] for chan_id in template_channel_ids: bulk_info.append(( chan_id[0], chan_id[1], chan_id[2], chan_id[3], starttime + (i * data_length) - (pad + buff), starttime + ((i + 1) * data_length) + (pad + buff))) for retry_attempt in range(retries): try: st = client.get_waveforms_bulk(bulk_info) break except Exception as e: print(e) continue else: raise MatchFilterError( "Could not download data after {0} attempts".format( retries)) # Get gaps and remove traces as necessary if min_gap: gaps = st.get_gaps(min_gap=min_gap) if len(gaps) > 0: print("Large gaps in downloaded data") st.merge() gappy_channels = list( set([(gap[0], gap[1], gap[2], gap[3]) for gap in gaps])) _st = Stream() for tr in st: tr_stats = (tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel) if tr_stats in gappy_channels: print("Removing gappy channel: %s" % str(tr)) else: _st += tr st = _st st.split() st.merge() st.trim(starttime=starttime + (i * data_length) - pad, endtime=starttime + ((i + 1) * data_length) + pad) for tr in st: if not _check_daylong(tr): st.remove(tr) print("{0} contains more zeros than non-zero, " "removed".format(tr.id)) for tr in st: if tr.stats.endtime - tr.stats.starttime < \ 0.8 * data_length: st.remove(tr) print("{0} is less than 80% of the required length" ", removed".format(tr.id)) if return_stream: stream += st try: party += self.detect( stream=st, threshold=threshold, threshold_type=threshold_type, trig_int=trig_int, plotvar=plotvar, daylong=daylong, parallel_process=parallel_process, xcorr_func=xcorr_func, concurrency=concurrency, cores=cores, ignore_length=ignore_length, group_size=group_size, overlap=None, debug=debug, full_peaks=full_peaks, process_cores=process_cores, **kwargs) if save_progress: party.write("eqcorrscan_temporary_party") except Exception as e: print('Error, routine incomplete, returning incomplete Party') print('Error: %s' % str(e)) if return_stream: return party, stream else: return party for family in party: if family is not None: family.detections = family._uniq().detections if return_stream: return party, stream else: return party
[ "def", "client_detect", "(", "self", ",", "client", ",", "starttime", ",", "endtime", ",", "threshold", ",", "threshold_type", ",", "trig_int", ",", "plotvar", ",", "min_gap", "=", "None", ",", "daylong", "=", "False", ",", "parallel_process", "=", "True", ",", "xcorr_func", "=", "None", ",", "concurrency", "=", "None", ",", "cores", "=", "None", ",", "ignore_length", "=", "False", ",", "group_size", "=", "None", ",", "debug", "=", "0", ",", "return_stream", "=", "False", ",", "full_peaks", "=", "False", ",", "save_progress", "=", "False", ",", "process_cores", "=", "None", ",", "retries", "=", "3", ",", "*", "*", "kwargs", ")", ":", "party", "=", "Party", "(", ")", "buff", "=", "300", "# Apply a buffer, often data downloaded is not the correct length", "data_length", "=", "max", "(", "[", "t", ".", "process_length", "for", "t", "in", "self", ".", "templates", "]", ")", "pad", "=", "0", "for", "template", "in", "self", ".", "templates", ":", "max_delay", "=", "(", "template", ".", "st", ".", "sort", "(", "[", "'starttime'", "]", ")", "[", "-", "1", "]", ".", "stats", ".", "starttime", "-", "template", ".", "st", ".", "sort", "(", "[", "'starttime'", "]", ")", "[", "0", "]", ".", "stats", ".", "starttime", ")", "if", "max_delay", ">", "pad", ":", "pad", "=", "max_delay", "download_groups", "=", "int", "(", "endtime", "-", "starttime", ")", "/", "data_length", "template_channel_ids", "=", "[", "]", "for", "template", "in", "self", ".", "templates", ":", "for", "tr", "in", "template", ".", "st", ":", "if", "tr", ".", "stats", ".", "network", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "=", "(", "tr", ".", "stats", ".", "network", ",", ")", "else", ":", "chan_id", "=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "station", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "station", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "location", "not", "in", "[", "None", ",", "''", "]", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "location", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "if", "tr", ".", "stats", ".", "channel", "not", "in", "[", "None", ",", "''", "]", ":", "if", "len", "(", "tr", ".", "stats", ".", "channel", ")", "==", "2", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "channel", "[", "0", "]", "+", "'?'", "+", "tr", ".", "stats", ".", "channel", "[", "-", "1", "]", ",", ")", "else", ":", "chan_id", "+=", "(", "tr", ".", "stats", ".", "channel", ",", ")", "else", ":", "chan_id", "+=", "(", "'*'", ",", ")", "template_channel_ids", ".", "append", "(", "chan_id", ")", "template_channel_ids", "=", "list", "(", "set", "(", "template_channel_ids", ")", ")", "if", "return_stream", ":", "stream", "=", "Stream", "(", ")", "if", "int", "(", "download_groups", ")", "<", "download_groups", ":", "download_groups", "=", "int", "(", "download_groups", ")", "+", "1", "else", ":", "download_groups", "=", "int", "(", "download_groups", ")", "for", "i", "in", "range", "(", "download_groups", ")", ":", "bulk_info", "=", "[", "]", "for", "chan_id", "in", "template_channel_ids", ":", "bulk_info", ".", "append", "(", "(", "chan_id", "[", "0", "]", ",", "chan_id", "[", "1", "]", ",", "chan_id", "[", "2", "]", ",", "chan_id", "[", "3", "]", ",", "starttime", "+", "(", "i", "*", "data_length", ")", "-", "(", "pad", "+", "buff", ")", ",", "starttime", "+", "(", "(", "i", "+", "1", ")", "*", "data_length", ")", "+", "(", "pad", "+", "buff", ")", ")", ")", "for", "retry_attempt", "in", "range", "(", "retries", ")", ":", "try", ":", "st", "=", "client", ".", "get_waveforms_bulk", "(", "bulk_info", ")", "break", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "continue", "else", ":", "raise", "MatchFilterError", "(", "\"Could not download data after {0} attempts\"", ".", "format", "(", "retries", ")", ")", "# Get gaps and remove traces as necessary", "if", "min_gap", ":", "gaps", "=", "st", ".", "get_gaps", "(", "min_gap", "=", "min_gap", ")", "if", "len", "(", "gaps", ")", ">", "0", ":", "print", "(", "\"Large gaps in downloaded data\"", ")", "st", ".", "merge", "(", ")", "gappy_channels", "=", "list", "(", "set", "(", "[", "(", "gap", "[", "0", "]", ",", "gap", "[", "1", "]", ",", "gap", "[", "2", "]", ",", "gap", "[", "3", "]", ")", "for", "gap", "in", "gaps", "]", ")", ")", "_st", "=", "Stream", "(", ")", "for", "tr", "in", "st", ":", "tr_stats", "=", "(", "tr", ".", "stats", ".", "network", ",", "tr", ".", "stats", ".", "station", ",", "tr", ".", "stats", ".", "location", ",", "tr", ".", "stats", ".", "channel", ")", "if", "tr_stats", "in", "gappy_channels", ":", "print", "(", "\"Removing gappy channel: %s\"", "%", "str", "(", "tr", ")", ")", "else", ":", "_st", "+=", "tr", "st", "=", "_st", "st", ".", "split", "(", ")", "st", ".", "merge", "(", ")", "st", ".", "trim", "(", "starttime", "=", "starttime", "+", "(", "i", "*", "data_length", ")", "-", "pad", ",", "endtime", "=", "starttime", "+", "(", "(", "i", "+", "1", ")", "*", "data_length", ")", "+", "pad", ")", "for", "tr", "in", "st", ":", "if", "not", "_check_daylong", "(", "tr", ")", ":", "st", ".", "remove", "(", "tr", ")", "print", "(", "\"{0} contains more zeros than non-zero, \"", "\"removed\"", ".", "format", "(", "tr", ".", "id", ")", ")", "for", "tr", "in", "st", ":", "if", "tr", ".", "stats", ".", "endtime", "-", "tr", ".", "stats", ".", "starttime", "<", "0.8", "*", "data_length", ":", "st", ".", "remove", "(", "tr", ")", "print", "(", "\"{0} is less than 80% of the required length\"", "\", removed\"", ".", "format", "(", "tr", ".", "id", ")", ")", "if", "return_stream", ":", "stream", "+=", "st", "try", ":", "party", "+=", "self", ".", "detect", "(", "stream", "=", "st", ",", "threshold", "=", "threshold", ",", "threshold_type", "=", "threshold_type", ",", "trig_int", "=", "trig_int", ",", "plotvar", "=", "plotvar", ",", "daylong", "=", "daylong", ",", "parallel_process", "=", "parallel_process", ",", "xcorr_func", "=", "xcorr_func", ",", "concurrency", "=", "concurrency", ",", "cores", "=", "cores", ",", "ignore_length", "=", "ignore_length", ",", "group_size", "=", "group_size", ",", "overlap", "=", "None", ",", "debug", "=", "debug", ",", "full_peaks", "=", "full_peaks", ",", "process_cores", "=", "process_cores", ",", "*", "*", "kwargs", ")", "if", "save_progress", ":", "party", ".", "write", "(", "\"eqcorrscan_temporary_party\"", ")", "except", "Exception", "as", "e", ":", "print", "(", "'Error, routine incomplete, returning incomplete Party'", ")", "print", "(", "'Error: %s'", "%", "str", "(", "e", ")", ")", "if", "return_stream", ":", "return", "party", ",", "stream", "else", ":", "return", "party", "for", "family", "in", "party", ":", "if", "family", "is", "not", "None", ":", "family", ".", "detections", "=", "family", ".", "_uniq", "(", ")", ".", "detections", "if", "return_stream", ":", "return", "party", ",", "stream", "else", ":", "return", "party" ]
44.161049
0.000912
[ "def client_detect(self, client, starttime, endtime, threshold,\n", " threshold_type, trig_int, plotvar, min_gap=None,\n", " daylong=False, parallel_process=True, xcorr_func=None,\n", " concurrency=None, cores=None, ignore_length=False,\n", " group_size=None, debug=0, return_stream=False,\n", " full_peaks=False, save_progress=False,\n", " process_cores=None, retries=3, **kwargs):\n", " \"\"\"\n", " Detect using a Tribe of templates within a continuous stream.\n", "\n", " :type client: `obspy.clients.*.Client`\n", " :param client: Any obspy client with a dataselect service.\n", " :type starttime: :class:`obspy.core.UTCDateTime`\n", " :param starttime: Start-time for detections.\n", " :type endtime: :class:`obspy.core.UTCDateTime`\n", " :param endtime: End-time for detections\n", " :type threshold: float\n", " :param threshold:\n", " Threshold level, if using `threshold_type='MAD'` then this will be\n", " the multiple of the median absolute deviation.\n", " :type threshold_type: str\n", " :param threshold_type:\n", " The type of threshold to be used, can be MAD, absolute or\n", " av_chan_corr. See Note on thresholding below.\n", " :type trig_int: float\n", " :param trig_int:\n", " Minimum gap between detections in seconds. If multiple detections\n", " occur within trig_int of one-another, the one with the highest\n", " cross-correlation sum will be selected.\n", " :type plotvar: bool\n", " :param plotvar:\n", " Turn plotting on or off, see warning about plotting below\n", " :type min_gap: float\n", " :param min_gap:\n", " Minimum gap allowed in data - use to remove traces with known\n", " issues\n", " :type daylong: bool\n", " :param daylong:\n", " Set to True to use the\n", " :func:`eqcorrscan.utils.pre_processing.dayproc` routine, which\n", " preforms additional checks and is more efficient for day-long data\n", " over other methods.\n", " :type parallel_process: bool\n", " :param parallel_process:\n", " :type xcorr_func: str or callable\n", " :param xcorr_func:\n", " A str of a registered xcorr function or a callable for implementing\n", " a custom xcorr function. For more information see:\n", " :func:`eqcorrscan.utils.correlate.register_array_xcorr`\n", " :type concurrency: str\n", " :param concurrency:\n", " The type of concurrency to apply to the xcorr function. Options are\n", " 'multithread', 'multiprocess', 'concurrent'. For more details see\n", " :func:`eqcorrscan.utils.correlate.get_stream_xcorr`\n", " :type cores: int\n", " :param cores: Number of workers for processing and detection.\n", " :type ignore_length: bool\n", " :param ignore_length:\n", " If using daylong=True, then dayproc will try check that the data\n", " are there for at least 80% of the day, if you don't want this check\n", " (which will raise an error if too much data are missing) then set\n", " ignore_length=True. This is not recommended!\n", " :type group_size: int\n", " :param group_size:\n", " Maximum number of templates to run at once, use to reduce memory\n", " consumption, if unset will use all templates.\n", " :type full_peaks: bool\n", " :param full_peaks: See `eqcorrscan.utils.findpeaks.find_peaks2_short`\n", " :type save_progress: bool\n", " :param save_progress:\n", " Whether to save the resulting party at every data step or not.\n", " Useful for long-running processes.\n", " :type process_cores: int\n", " :param process_cores:\n", " Number of processes to use for pre-processing (if different to\n", " `cores`).\n", " :type debug: int\n", " :param debug:\n", " Debug level from 0-5 where five is more output, for debug levels\n", " 4 and 5, detections will not be computed in parallel.\n", " :type return_stream: bool\n", " :param return_stream:\n", " Whether to also output the stream downloaded, useful if you plan\n", " to use the stream for something else, e.g. lag_calc.\n", " :type retries: int\n", " :param retries:\n", " Number of attempts allowed for downloading - allows for transient\n", " server issues.\n", "\n", " :return:\n", " :class:`eqcorrscan.core.match_filter.Party` of Families of\n", " detections.\n", "\n", " .. Note::\n", " Detections are not corrected for `pre-pick`, the\n", " detection.detect_time corresponds to the beginning of the earliest\n", " template channel at detection.\n", "\n", " .. warning::\n", " Picks included in the output Party.get_catalog() will not be\n", " corrected for pre-picks in the template.\n", "\n", " .. Note::\n", " Ensures that data overlap between loops, which will lead to no\n", " missed detections at data start-stop points (see note for\n", " :meth:`eqcorrscan.core.match_filter.Tribe.detect` method).\n", " This will result in end-time not being strictly\n", " honoured, so detections may occur after the end-time set. This is\n", " because data must be run in the correct process-length.\n", "\n", " .. warning::\n", " Plotting within the match-filter routine uses the Agg backend\n", " with interactive plotting turned off. This is because the function\n", " is designed to work in bulk. If you wish to turn interactive\n", " plotting on you must import matplotlib in your script first,\n", " when you then import match_filter you will get the warning that\n", " this call to matplotlib has no effect, which will mean that\n", " match_filter has not changed the plotting behaviour.\n", "\n", " .. note::\n", " **Thresholding:**\n", "\n", " **MAD** threshold is calculated as the:\n", "\n", " .. math::\n", "\n", " threshold {\\\\times} (median(abs(cccsum)))\n", "\n", " where :math:`cccsum` is the cross-correlation sum for a given\n", " template.\n", "\n", " **absolute** threshold is a true absolute threshold based on the\n", " cccsum value.\n", "\n", " **av_chan_corr** is based on the mean values of single-channel\n", " cross-correlations assuming all data are present as required for\n", " the template, e.g:\n", "\n", " .. math::\n", "\n", " av\\_chan\\_corr\\_thresh=threshold \\\\times (cccsum /\n", " len(template))\n", "\n", " where :math:`template` is a single template from the input and the\n", " length is the number of channels within this template.\n", " \"\"\"\n", " party = Party()\n", " buff = 300\n", " # Apply a buffer, often data downloaded is not the correct length\n", " data_length = max([t.process_length for t in self.templates])\n", " pad = 0\n", " for template in self.templates:\n", " max_delay = (template.st.sort(['starttime'])[-1].stats.starttime -\n", " template.st.sort(['starttime'])[0].stats.starttime)\n", " if max_delay > pad:\n", " pad = max_delay\n", " download_groups = int(endtime - starttime) / data_length\n", " template_channel_ids = []\n", " for template in self.templates:\n", " for tr in template.st:\n", " if tr.stats.network not in [None, '']:\n", " chan_id = (tr.stats.network,)\n", " else:\n", " chan_id = ('*',)\n", " if tr.stats.station not in [None, '']:\n", " chan_id += (tr.stats.station,)\n", " else:\n", " chan_id += ('*',)\n", " if tr.stats.location not in [None, '']:\n", " chan_id += (tr.stats.location,)\n", " else:\n", " chan_id += ('*',)\n", " if tr.stats.channel not in [None, '']:\n", " if len(tr.stats.channel) == 2:\n", " chan_id += (tr.stats.channel[0] + '?' +\n", " tr.stats.channel[-1],)\n", " else:\n", " chan_id += (tr.stats.channel,)\n", " else:\n", " chan_id += ('*',)\n", " template_channel_ids.append(chan_id)\n", " template_channel_ids = list(set(template_channel_ids))\n", " if return_stream:\n", " stream = Stream()\n", " if int(download_groups) < download_groups:\n", " download_groups = int(download_groups) + 1\n", " else:\n", " download_groups = int(download_groups)\n", " for i in range(download_groups):\n", " bulk_info = []\n", " for chan_id in template_channel_ids:\n", " bulk_info.append((\n", " chan_id[0], chan_id[1], chan_id[2], chan_id[3],\n", " starttime + (i * data_length) - (pad + buff),\n", " starttime + ((i + 1) * data_length) + (pad + buff)))\n", " for retry_attempt in range(retries):\n", " try:\n", " st = client.get_waveforms_bulk(bulk_info)\n", " break\n", " except Exception as e:\n", " print(e)\n", " continue\n", " else:\n", " raise MatchFilterError(\n", " \"Could not download data after {0} attempts\".format(\n", " retries))\n", " # Get gaps and remove traces as necessary\n", " if min_gap:\n", " gaps = st.get_gaps(min_gap=min_gap)\n", " if len(gaps) > 0:\n", " print(\"Large gaps in downloaded data\")\n", " st.merge()\n", " gappy_channels = list(\n", " set([(gap[0], gap[1], gap[2], gap[3])\n", " for gap in gaps]))\n", " _st = Stream()\n", " for tr in st:\n", " tr_stats = (tr.stats.network, tr.stats.station,\n", " tr.stats.location, tr.stats.channel)\n", " if tr_stats in gappy_channels:\n", " print(\"Removing gappy channel: %s\" % str(tr))\n", " else:\n", " _st += tr\n", " st = _st\n", " st.split()\n", " st.merge()\n", " st.trim(starttime=starttime + (i * data_length) - pad,\n", " endtime=starttime + ((i + 1) * data_length) + pad)\n", " for tr in st:\n", " if not _check_daylong(tr):\n", " st.remove(tr)\n", " print(\"{0} contains more zeros than non-zero, \"\n", " \"removed\".format(tr.id))\n", " for tr in st:\n", " if tr.stats.endtime - tr.stats.starttime < \\\n", " 0.8 * data_length:\n", " st.remove(tr)\n", " print(\"{0} is less than 80% of the required length\"\n", " \", removed\".format(tr.id))\n", " if return_stream:\n", " stream += st\n", " try:\n", " party += self.detect(\n", " stream=st, threshold=threshold,\n", " threshold_type=threshold_type, trig_int=trig_int,\n", " plotvar=plotvar, daylong=daylong,\n", " parallel_process=parallel_process, xcorr_func=xcorr_func,\n", " concurrency=concurrency, cores=cores,\n", " ignore_length=ignore_length, group_size=group_size,\n", " overlap=None, debug=debug, full_peaks=full_peaks,\n", " process_cores=process_cores, **kwargs)\n", " if save_progress:\n", " party.write(\"eqcorrscan_temporary_party\")\n", " except Exception as e:\n", " print('Error, routine incomplete, returning incomplete Party')\n", " print('Error: %s' % str(e))\n", " if return_stream:\n", " return party, stream\n", " else:\n", " return party\n", " for family in party:\n", " if family is not None:\n", " family.detections = family._uniq().detections\n", " if return_stream:\n", " return party, stream\n", " else:\n", " return party" ]
[ 0, 0.014084507042253521, 0.012987012987012988, 0.0136986301369863, 0.014492753623188406, 0.01639344262295082, 0.015625, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04477611940298507, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664 ]
267
0.000963
def has_split(self, split_name): """ Checks whether or not the split with the given name exists. Parameters ---------- split_name : str name of the split """ if os.path.exists(os.path.join(self.split_dir, split_name)): return True return False
[ "def", "has_split", "(", "self", ",", "split_name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "self", ".", "split_dir", ",", "split_name", ")", ")", ":", "return", "True", "return", "False" ]
29.272727
0.009036
[ "def has_split(self, split_name):\n", " \"\"\" Checks whether or not the split with the given name exists.\n", " \n", " Parameters\n", " ----------\n", " split_name : str\n", " name of the split\n", " \"\"\"\n", " if os.path.exists(os.path.join(self.split_dir, split_name)):\n", " return True\n", " return False" ]
[ 0, 0.013888888888888888, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
11
0.015909
def register_view(self, view): """Creates treeview columns, and connect missing signals""" # if stand-alone, connects the window delete event to # kill the loop if self.view.is_stand_alone(): import gtk self.view.get_top_widget().connect('delete-event', lambda w,e: gtk.main_quit()) pass return
[ "def", "register_view", "(", "self", ",", "view", ")", ":", "# if stand-alone, connects the window delete event to", "# kill the loop", "if", "self", ".", "view", ".", "is_stand_alone", "(", ")", ":", "import", "gtk", "self", ".", "view", ".", "get_top_widget", "(", ")", ".", "connect", "(", "'delete-event'", ",", "lambda", "w", ",", "e", ":", "gtk", ".", "main_quit", "(", ")", ")", "pass", "return" ]
32.166667
0.012594
[ "def register_view(self, view):\n", " \"\"\"Creates treeview columns, and connect missing signals\"\"\"\n", "\n", " # if stand-alone, connects the window delete event to\n", " # kill the loop\n", " if self.view.is_stand_alone():\n", " import gtk\n", " self.view.get_top_widget().connect('delete-event',\n", " lambda w,e: gtk.main_quit())\n", " pass\n", " \n", " return" ]
[ 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0, 0.043478260869565216, 0, 0.1111111111111111, 0.07142857142857142 ]
12
0.02006
def setCentralWidget(self, widget): """ Sets the central widget for this button. :param widget | <QWidget> """ self.setEnabled(widget is not None) self._popupWidget.setCentralWidget(widget)
[ "def", "setCentralWidget", "(", "self", ",", "widget", ")", ":", "self", ".", "setEnabled", "(", "widget", "is", "not", "None", ")", "self", ".", "_popupWidget", ".", "setCentralWidget", "(", "widget", ")" ]
31.375
0.011628
[ "def setCentralWidget(self, widget):\r\n", " \"\"\"\r\n", " Sets the central widget for this button.\r\n", " \r\n", " :param widget | <QWidget>\r\n", " \"\"\"\r\n", " self.setEnabled(widget is not None)\r\n", " self._popupWidget.setCentralWidget(widget)" ]
[ 0, 0.07692307692307693, 0, 0.1, 0, 0, 0, 0.02 ]
8
0.024615
def generator(self) -> Iterator[str]: """ Create a generate that iterates the whole content of the file or string. :return: An iterator iterating the lines of the text stream, separated by ``'\\n'`` or ``'\\r'``. """ stream = self.stream # In case that ``self.stream`` is changed. stream.seek(0) for line in stream: yield line
[ "def", "generator", "(", "self", ")", "->", "Iterator", "[", "str", "]", ":", "stream", "=", "self", ".", "stream", "# In case that ``self.stream`` is changed.", "stream", ".", "seek", "(", "0", ")", "for", "line", "in", "stream", ":", "yield", "line" ]
38.7
0.010101
[ "def generator(self) -> Iterator[str]:\n", " \"\"\"\n", " Create a generate that iterates the whole content of the file or string.\n", "\n", " :return: An iterator iterating the lines of the text stream, separated by ``'\\\\n'`` or ``'\\\\r'``.\n", " \"\"\"\n", " stream = self.stream # In case that ``self.stream`` is changed.\n", " stream.seek(0)\n", " for line in stream:\n", " yield line" ]
[ 0, 0.08333333333333333, 0.012345679012345678, 0, 0.009433962264150943, 0, 0, 0, 0, 0.045454545454545456 ]
10
0.015057
def _make_section_node(self, template, tag_type, tag_key, parsed_section, section_start_index, section_end_index): """ Create and return a section node for the parse tree. """ if tag_type == '#': return _SectionNode(tag_key, parsed_section, self._delimiters, template, section_start_index, section_end_index) if tag_type == '^': return _InvertedNode(tag_key, parsed_section) raise Exception("Invalid symbol for section tag: %s" % repr(tag_type))
[ "def", "_make_section_node", "(", "self", ",", "template", ",", "tag_type", ",", "tag_key", ",", "parsed_section", ",", "section_start_index", ",", "section_end_index", ")", ":", "if", "tag_type", "==", "'#'", ":", "return", "_SectionNode", "(", "tag_key", ",", "parsed_section", ",", "self", ".", "_delimiters", ",", "template", ",", "section_start_index", ",", "section_end_index", ")", "if", "tag_type", "==", "'^'", ":", "return", "_InvertedNode", "(", "tag_key", ",", "parsed_section", ")", "raise", "Exception", "(", "\"Invalid symbol for section tag: %s\"", "%", "repr", "(", "tag_type", ")", ")" ]
40.357143
0.008651
[ "def _make_section_node(self, template, tag_type, tag_key, parsed_section,\n", " section_start_index, section_end_index):\n", " \"\"\"\n", " Create and return a section node for the parse tree.\n", "\n", " \"\"\"\n", " if tag_type == '#':\n", " return _SectionNode(tag_key, parsed_section, self._delimiters,\n", " template, section_start_index, section_end_index)\n", "\n", " if tag_type == '^':\n", " return _InvertedNode(tag_key, parsed_section)\n", "\n", " raise Exception(\"Invalid symbol for section tag: %s\" % repr(tag_type))" ]
[ 0, 0.014705882352941176, 0.08333333333333333, 0, 0, 0, 0, 0, 0.024691358024691357, 0, 0, 0, 0, 0.01282051282051282 ]
14
0.009682
def configs_for_writer(writer=None, ppp_config_dir=None): """Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files """ search_paths = (ppp_config_dir,) if ppp_config_dir else tuple() if writer is not None: if not isinstance(writer, (list, tuple)): writer = [writer] # given a config filename or writer name config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths( os.path.join("writers", config_basename), *search_paths) if not writer_configs: LOG.warning("No writer configs found for '%s'", writer) continue yield writer_configs
[ "def", "configs_for_writer", "(", "writer", "=", "None", ",", "ppp_config_dir", "=", "None", ")", ":", "search_paths", "=", "(", "ppp_config_dir", ",", ")", "if", "ppp_config_dir", "else", "tuple", "(", ")", "if", "writer", "is", "not", "None", ":", "if", "not", "isinstance", "(", "writer", ",", "(", "list", ",", "tuple", ")", ")", ":", "writer", "=", "[", "writer", "]", "# given a config filename or writer name", "config_files", "=", "[", "w", "if", "w", ".", "endswith", "(", "'.yaml'", ")", "else", "w", "+", "'.yaml'", "for", "w", "in", "writer", "]", "else", ":", "writer_configs", "=", "glob_config", "(", "os", ".", "path", ".", "join", "(", "'writers'", ",", "'*.yaml'", ")", ",", "*", "search_paths", ")", "config_files", "=", "set", "(", "writer_configs", ")", "for", "config_file", "in", "config_files", ":", "config_basename", "=", "os", ".", "path", ".", "basename", "(", "config_file", ")", "writer_configs", "=", "config_search_paths", "(", "os", ".", "path", ".", "join", "(", "\"writers\"", ",", "config_basename", ")", ",", "*", "search_paths", ")", "if", "not", "writer_configs", ":", "LOG", ".", "warning", "(", "\"No writer configs found for '%s'\"", ",", "writer", ")", "continue", "yield", "writer_configs" ]
37.90625
0.001608
[ "def configs_for_writer(writer=None, ppp_config_dir=None):\n", " \"\"\"Generator of writer configuration files for one or more writers\n", "\n", " Args:\n", " writer (Optional[str]): Yield configs only for this writer\n", " ppp_config_dir (Optional[str]): Additional configuration directory\n", " to search for writer configuration files.\n", "\n", " Returns: Generator of lists of configuration files\n", "\n", " \"\"\"\n", " search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()\n", " if writer is not None:\n", " if not isinstance(writer, (list, tuple)):\n", " writer = [writer]\n", " # given a config filename or writer name\n", " config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]\n", " else:\n", " writer_configs = glob_config(os.path.join('writers', '*.yaml'),\n", " *search_paths)\n", " config_files = set(writer_configs)\n", "\n", " for config_file in config_files:\n", " config_basename = os.path.basename(config_file)\n", " writer_configs = config_search_paths(\n", " os.path.join(\"writers\", config_basename), *search_paths)\n", "\n", " if not writer_configs:\n", " LOG.warning(\"No writer configs found for '%s'\", writer)\n", " continue\n", "\n", " yield writer_configs" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
32
0.001493
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
[ "def", "default_links_factory_with_additional", "(", "additional_links", ")", ":", "def", "factory", "(", "pid", ",", "*", "*", "kwargs", ")", ":", "links", "=", "default_links_factory", "(", "pid", ")", "for", "link", "in", "additional_links", ":", "links", "[", "link", "]", "=", "additional_links", "[", "link", "]", ".", "format", "(", "pid", "=", "pid", ",", "scheme", "=", "request", ".", "scheme", ",", "host", "=", "request", ".", "host", ")", "return", "links", "return", "factory" ]
40.875
0.001495
[ "def default_links_factory_with_additional(additional_links):\n", " \"\"\"Generate a links generation factory with the specified additional links.\n", "\n", " :param additional_links: A dict of link names to links to be added to the\n", " returned object.\n", " :returns: A link generation factory.\n", " \"\"\"\n", " def factory(pid, **kwargs):\n", " links = default_links_factory(pid)\n", " for link in additional_links:\n", " links[link] = additional_links[link].format(pid=pid,\n", " scheme=request.scheme,\n", " host=request.host)\n", " return links\n", "\n", " return factory" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
16
0.003472
def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
[ "def", "col", "(", "loc", ",", "strg", ")", ":", "s", "=", "strg", "return", "1", "if", "loc", "<", "len", "(", "s", ")", "and", "s", "[", "loc", "]", "==", "'\\n'", "else", "loc", "-", "s", ".", "rfind", "(", "\"\\n\"", ",", "0", ",", "loc", ")" ]
52.25
0.010972
[ "def col (loc,strg):\n", " \"\"\"Returns current column within a string, counting newlines as line separators.\n", " The first column is number 1.\n", "\n", " Note: the default parsing behavior is to expand tabs in the input string\n", " before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information\n", " on parsing strings containing C{<TAB>}s, and suggested methods to maintain a\n", " consistent view of the parsed string, the parse location, and line and column\n", " positions within the parsed string.\n", " \"\"\"\n", " s = strg\n", " return 1 if loc<len(s) and s[loc] == '\\n' else loc - s.rfind(\"\\n\", 0, loc)" ]
[ 0.1, 0.011764705882352941, 0, 0, 0, 0.008, 0, 0.012345679012345678, 0, 0, 0, 0.02564102564102564 ]
12
0.013146
def find_all(soup, name=None, attrs=None, recursive=True, text=None, limit=None, **kwargs): """The `find` and `find_all` methods of `BeautifulSoup` don't handle the `text` parameter combined with other parameters. This is necessary for e.g. finding links containing a string or pattern. This method first searches by text content, and then by the standard BeautifulSoup arguments. """ if text is None: return soup.find_all( name, attrs or {}, recursive, text, limit, **kwargs ) if isinstance(text, string_types): text = re.compile(re.escape(text), re.I) tags = soup.find_all( name, attrs or {}, recursive, **kwargs ) rv = [] for tag in tags: if match_text(text, tag): rv.append(tag) if limit is not None and len(rv) >= limit: break return rv
[ "def", "find_all", "(", "soup", ",", "name", "=", "None", ",", "attrs", "=", "None", ",", "recursive", "=", "True", ",", "text", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "text", "is", "None", ":", "return", "soup", ".", "find_all", "(", "name", ",", "attrs", "or", "{", "}", ",", "recursive", ",", "text", ",", "limit", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "text", ",", "string_types", ")", ":", "text", "=", "re", ".", "compile", "(", "re", ".", "escape", "(", "text", ")", ",", "re", ".", "I", ")", "tags", "=", "soup", ".", "find_all", "(", "name", ",", "attrs", "or", "{", "}", ",", "recursive", ",", "*", "*", "kwargs", ")", "rv", "=", "[", "]", "for", "tag", "in", "tags", ":", "if", "match_text", "(", "text", ",", "tag", ")", ":", "rv", ".", "append", "(", "tag", ")", "if", "limit", "is", "not", "None", "and", "len", "(", "rv", ")", ">=", "limit", ":", "break", "return", "rv" ]
36.041667
0.002252
[ "def find_all(soup, name=None, attrs=None, recursive=True, text=None,\n", " limit=None, **kwargs):\n", " \"\"\"The `find` and `find_all` methods of `BeautifulSoup` don't handle the\n", " `text` parameter combined with other parameters. This is necessary for\n", " e.g. finding links containing a string or pattern. This method first\n", " searches by text content, and then by the standard BeautifulSoup arguments.\n", "\n", " \"\"\"\n", " if text is None:\n", " return soup.find_all(\n", " name, attrs or {}, recursive, text, limit, **kwargs\n", " )\n", " if isinstance(text, string_types):\n", " text = re.compile(re.escape(text), re.I)\n", " tags = soup.find_all(\n", " name, attrs or {}, recursive, **kwargs\n", " )\n", " rv = []\n", " for tag in tags:\n", " if match_text(text, tag):\n", " rv.append(tag)\n", " if limit is not None and len(rv) >= limit:\n", " break\n", " return rv" ]
[ 0, 0.02702702702702703, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693 ]
24
0.004331
def move_selection(reverse=False): """ Goes through the list of gunicorns, setting the selected as the one after the currently selected. """ global selected_pid if selected_pid not in gunicorns: selected_pid = None found = False pids = sorted(gunicorns.keys(), reverse=reverse) # Iterate items twice to enable wrapping. for pid in pids + pids: if selected_pid is None or found: selected_pid = pid return found = pid == selected_pid
[ "def", "move_selection", "(", "reverse", "=", "False", ")", ":", "global", "selected_pid", "if", "selected_pid", "not", "in", "gunicorns", ":", "selected_pid", "=", "None", "found", "=", "False", "pids", "=", "sorted", "(", "gunicorns", ".", "keys", "(", ")", ",", "reverse", "=", "reverse", ")", "# Iterate items twice to enable wrapping.", "for", "pid", "in", "pids", "+", "pids", ":", "if", "selected_pid", "is", "None", "or", "found", ":", "selected_pid", "=", "pid", "return", "found", "=", "pid", "==", "selected_pid" ]
31.5
0.001927
[ "def move_selection(reverse=False):\n", " \"\"\"\n", " Goes through the list of gunicorns, setting the selected as the one after\n", " the currently selected.\n", " \"\"\"\n", " global selected_pid\n", " if selected_pid not in gunicorns:\n", " selected_pid = None\n", " found = False\n", " pids = sorted(gunicorns.keys(), reverse=reverse)\n", " # Iterate items twice to enable wrapping.\n", " for pid in pids + pids:\n", " if selected_pid is None or found:\n", " selected_pid = pid\n", " return\n", " found = pid == selected_pid" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02857142857142857 ]
16
0.001786
def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False): """ Generates an RSA, DSA or ECDSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw" :param rsa_pss_padding: If PSS padding should be used for RSA keys :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature """ if not isinstance(private_key, PrivateKey): raise TypeError(pretty_message( ''' private_key must be an instance of PrivateKey, not %s ''', type_name(private_key) )) if not isinstance(data, byte_cls): raise TypeError(pretty_message( ''' data must be a byte string, not %s ''', type_name(data) )) valid_hash_algorithms = set(['md5', 'sha1', 'sha256', 'sha384', 'sha512']) if private_key.algorithm == 'rsa' and not rsa_pss_padding: valid_hash_algorithms |= set(['raw']) if hash_algorithm not in valid_hash_algorithms: valid_hash_algorithms_error = '"md5", "sha1", "sha256", "sha384", "sha512"' if private_key.algorithm == 'rsa' and not rsa_pss_padding: valid_hash_algorithms_error += ', "raw"' raise ValueError(pretty_message( ''' hash_algorithm must be one of %s, not %s ''', valid_hash_algorithms_error, repr(hash_algorithm) )) if private_key.algorithm != 'rsa' and rsa_pss_padding is not False: raise ValueError(pretty_message( ''' PSS padding may only be used with RSA keys - signing via a %s key was requested ''', private_key.algorithm.upper() )) if hash_algorithm == 'raw': if len(data) > private_key.byte_size - 11: raise ValueError(pretty_message( ''' data must be 11 bytes shorter than the key size when hash_algorithm is "raw" - key size is %s bytes, but data is %s bytes long ''', private_key.byte_size, len(data) )) if _backend == 'winlegacy': if private_key.algorithm == 'ec': return _pure_python_ecdsa_sign(private_key, data, hash_algorithm) return _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding) return _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding)
[ "def", "_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", "=", "False", ")", ":", "if", "not", "isinstance", "(", "private_key", ",", "PrivateKey", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n private_key must be an instance of PrivateKey, not %s\n '''", ",", "type_name", "(", "private_key", ")", ")", ")", "if", "not", "isinstance", "(", "data", ",", "byte_cls", ")", ":", "raise", "TypeError", "(", "pretty_message", "(", "'''\n data must be a byte string, not %s\n '''", ",", "type_name", "(", "data", ")", ")", ")", "valid_hash_algorithms", "=", "set", "(", "[", "'md5'", ",", "'sha1'", ",", "'sha256'", ",", "'sha384'", ",", "'sha512'", "]", ")", "if", "private_key", ".", "algorithm", "==", "'rsa'", "and", "not", "rsa_pss_padding", ":", "valid_hash_algorithms", "|=", "set", "(", "[", "'raw'", "]", ")", "if", "hash_algorithm", "not", "in", "valid_hash_algorithms", ":", "valid_hash_algorithms_error", "=", "'\"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\"'", "if", "private_key", ".", "algorithm", "==", "'rsa'", "and", "not", "rsa_pss_padding", ":", "valid_hash_algorithms_error", "+=", "', \"raw\"'", "raise", "ValueError", "(", "pretty_message", "(", "'''\n hash_algorithm must be one of %s, not %s\n '''", ",", "valid_hash_algorithms_error", ",", "repr", "(", "hash_algorithm", ")", ")", ")", "if", "private_key", ".", "algorithm", "!=", "'rsa'", "and", "rsa_pss_padding", "is", "not", "False", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n PSS padding may only be used with RSA keys - signing via a %s key\n was requested\n '''", ",", "private_key", ".", "algorithm", ".", "upper", "(", ")", ")", ")", "if", "hash_algorithm", "==", "'raw'", ":", "if", "len", "(", "data", ")", ">", "private_key", ".", "byte_size", "-", "11", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n data must be 11 bytes shorter than the key size when\n hash_algorithm is \"raw\" - key size is %s bytes, but data\n is %s bytes long\n '''", ",", "private_key", ".", "byte_size", ",", "len", "(", "data", ")", ")", ")", "if", "_backend", "==", "'winlegacy'", ":", "if", "private_key", ".", "algorithm", "==", "'ec'", ":", "return", "_pure_python_ecdsa_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ")", "return", "_advapi32_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", ")", "return", "_bcrypt_sign", "(", "private_key", ",", "data", ",", "hash_algorithm", ",", "rsa_pss_padding", ")" ]
33.698795
0.001389
[ "def _sign(private_key, data, hash_algorithm, rsa_pss_padding=False):\n", " \"\"\"\n", " Generates an RSA, DSA or ECDSA signature\n", "\n", " :param private_key:\n", " The PrivateKey to generate the signature with\n", "\n", " :param data:\n", " A byte string of the data the signature is for\n", "\n", " :param hash_algorithm:\n", " A unicode string of \"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\" or \"raw\"\n", "\n", " :param rsa_pss_padding:\n", " If PSS padding should be used for RSA keys\n", "\n", " :raises:\n", " ValueError - when any of the parameters contain an invalid value\n", " TypeError - when any of the parameters are of the wrong type\n", " OSError - when an error is returned by the OS crypto library\n", "\n", " :return:\n", " A byte string of the signature\n", " \"\"\"\n", "\n", " if not isinstance(private_key, PrivateKey):\n", " raise TypeError(pretty_message(\n", " '''\n", " private_key must be an instance of PrivateKey, not %s\n", " ''',\n", " type_name(private_key)\n", " ))\n", "\n", " if not isinstance(data, byte_cls):\n", " raise TypeError(pretty_message(\n", " '''\n", " data must be a byte string, not %s\n", " ''',\n", " type_name(data)\n", " ))\n", "\n", " valid_hash_algorithms = set(['md5', 'sha1', 'sha256', 'sha384', 'sha512'])\n", " if private_key.algorithm == 'rsa' and not rsa_pss_padding:\n", " valid_hash_algorithms |= set(['raw'])\n", "\n", " if hash_algorithm not in valid_hash_algorithms:\n", " valid_hash_algorithms_error = '\"md5\", \"sha1\", \"sha256\", \"sha384\", \"sha512\"'\n", " if private_key.algorithm == 'rsa' and not rsa_pss_padding:\n", " valid_hash_algorithms_error += ', \"raw\"'\n", " raise ValueError(pretty_message(\n", " '''\n", " hash_algorithm must be one of %s, not %s\n", " ''',\n", " valid_hash_algorithms_error,\n", " repr(hash_algorithm)\n", " ))\n", "\n", " if private_key.algorithm != 'rsa' and rsa_pss_padding is not False:\n", " raise ValueError(pretty_message(\n", " '''\n", " PSS padding may only be used with RSA keys - signing via a %s key\n", " was requested\n", " ''',\n", " private_key.algorithm.upper()\n", " ))\n", "\n", " if hash_algorithm == 'raw':\n", " if len(data) > private_key.byte_size - 11:\n", " raise ValueError(pretty_message(\n", " '''\n", " data must be 11 bytes shorter than the key size when\n", " hash_algorithm is \"raw\" - key size is %s bytes, but data\n", " is %s bytes long\n", " ''',\n", " private_key.byte_size,\n", " len(data)\n", " ))\n", "\n", " if _backend == 'winlegacy':\n", " if private_key.algorithm == 'ec':\n", " return _pure_python_ecdsa_sign(private_key, data, hash_algorithm)\n", " return _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding)\n", " return _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0.013333333333333334 ]
83
0.0006
def unregister(self, measurement_class, callback): """Stop notifying ``callback`` of new values of ``measurement_class``. If the callback wasn't previously registered, this method will have no effect. """ self.callbacks[Measurement.name_from_class(measurement_class) ].remove(callback)
[ "def", "unregister", "(", "self", ",", "measurement_class", ",", "callback", ")", ":", "self", ".", "callbacks", "[", "Measurement", ".", "name_from_class", "(", "measurement_class", ")", "]", ".", "remove", "(", "callback", ")" ]
41.875
0.008772
[ "def unregister(self, measurement_class, callback):\n", " \"\"\"Stop notifying ``callback`` of new values of ``measurement_class``.\n", "\n", " If the callback wasn't previously registered, this method will have no\n", " effect.\n", " \"\"\"\n", " self.callbacks[Measurement.name_from_class(measurement_class)\n", " ].remove(callback)" ]
[ 0, 0.012658227848101266, 0, 0, 0, 0, 0, 0.058823529411764705 ]
8
0.008935
def owned_ecs(self): '''A list of the execution contexts owned by this component.''' with self._mutex: if not self._owned_ecs: self._owned_ecs = [ExecutionContext(ec, self._obj.get_context_handle(ec)) \ for ec in self._obj.get_owned_contexts()] return self._owned_ecs
[ "def", "owned_ecs", "(", "self", ")", ":", "with", "self", ".", "_mutex", ":", "if", "not", "self", ".", "_owned_ecs", ":", "self", ".", "_owned_ecs", "=", "[", "ExecutionContext", "(", "ec", ",", "self", ".", "_obj", ".", "get_context_handle", "(", "ec", ")", ")", "for", "ec", "in", "self", ".", "_obj", ".", "get_owned_contexts", "(", ")", "]", "return", "self", ".", "_owned_ecs" ]
44
0.011142
[ "def owned_ecs(self):\n", " '''A list of the execution contexts owned by this component.'''\n", " with self._mutex:\n", " if not self._owned_ecs:\n", " self._owned_ecs = [ExecutionContext(ec,\n", " self._obj.get_context_handle(ec)) \\\n", " for ec in self._obj.get_owned_contexts()]\n", " return self._owned_ecs" ]
[ 0, 0.013888888888888888, 0, 0, 0, 0.03571428571428571, 0, 0.03333333333333333 ]
8
0.010367
def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1, release_version="", pset_hash="", app_name="", output_module_label="", global_tag="", processing_version=0, acquisition_era_name="", run_num=-1, physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="", processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="", min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0', ldate='0', detail=False, dataset_id=-1): """ API to list dataset(s) in DBS * You can use ANY combination of these parameters in this API * In absence of parameters, all valid datasets known to the DBS instance will be returned :param dataset: Full dataset (path) of the dataset. :type dataset: str :param parent_dataset: Full dataset (path) of the dataset :type parent_dataset: str :param release_version: cmssw version :type release_version: str :param pset_hash: pset hash :type pset_hash: str :param app_name: Application name (generally it is cmsRun) :type app_name: str :param output_module_label: output_module_label :type output_module_label: str :param global_tag: global_tag :type global_tag: str :param processing_version: Processing Version :type processing_version: str :param acquisition_era_name: Acquisition Era :type acquisition_era_name: str :param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed. :type run_num: int,list,str :param physics_group_name: List only dataset having physics_group_name attribute :type physics_group_name: str :param logical_file_name: List dataset containing the logical_file_name :type logical_file_name: str :param primary_ds_name: Primary Dataset Name :type primary_ds_name: str :param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA) :type primary_ds_type: str :param processed_ds_name: List datasets having this processed dataset name :type processed_ds_name: str :param data_tier_name: Data Tier :type data_tier_name: str :param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.) :type dataset_access_type: str :param prep_id: prep_id :type prep_id: str :param create_by: Creator of the dataset :type create_by: str :param last_modified_by: Last modifier of the dataset :type last_modified_by: str :param min_cdate: Lower limit for the creation date (unixtime) (Optional) :type min_cdate: int, str :param max_cdate: Upper limit for the creation date (unixtime) (Optional) :type max_cdate: int, str :param min_ldate: Lower limit for the last modification date (unixtime) (Optional) :type min_ldate: int, str :param max_ldate: Upper limit for the last modification date (unixtime) (Optional) :type max_ldate: int, str :param cdate: creation date (unixtime) (Optional) :type cdate: int, str :param ldate: last modification date (unixtime) (Optional) :type ldate: int, str :param detail: List all details of a dataset :type detail: bool :param dataset_id: dataset table primary key used by CMS Computing Analytics. :type dataset_id: int, long, str :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type) :rtype: list of dicts """ dataset = dataset.replace("*", "%") parent_dataset = parent_dataset.replace("*", "%") release_version = release_version.replace("*", "%") pset_hash = pset_hash.replace("*", "%") app_name = app_name.replace("*", "%") output_module_label = output_module_label.replace("*", "%") global_tag = global_tag.replace("*", "%") logical_file_name = logical_file_name.replace("*", "%") physics_group_name = physics_group_name.replace("*", "%") primary_ds_name = primary_ds_name.replace("*", "%") primary_ds_type = primary_ds_type.replace("*", "%") data_tier_name = data_tier_name.replace("*", "%") dataset_access_type = dataset_access_type.replace("*", "%") processed_ds_name = processed_ds_name.replace("*", "%") acquisition_era_name = acquisition_era_name.replace("*", "%") #processing_version = processing_version.replace("*", "%") #create_by and last_modified_by have be full spelled, no wildcard will allowed. #We got them from request head so they can be either HN account name or DN. #This is depended on how an user's account is set up. # # In the next release we will require dataset has no wildcard in it. # DBS will reject wildcard search with dataset name with listDatasets call. # One should seperate the dataset into primary , process and datatier if any wildcard. # YG Oct 26, 2016 # Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client. # YG Dec. 9 2016 # # run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours # We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given. # YG Jan. 15 2019 # if (run_num != -1 and logical_file_name ==''): for r in parseRunRange(run_num): if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): if r == 1 or r == '1': dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.", self.logger.exception) elif isinstance(r, run_tuple): if r[0] == r[1]: dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.", self.logger.exception) elif r[0] <= 1 <= r[1]: dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.", self.logger.exception) if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ): dataset='' elif( dataset and ( dataset.find('%') != -1 ) ) : junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/') dataset = '' if ( primary_ds_name == '%' ): primary_ds_name = '' if( processed_ds_name == '%' ): processed_ds_name = '' if ( data_tier_name == '%' ): data_tier_name = '' try: dataset_id = int(dataset_id) except: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.", self.logger.exception, 'dataset_id has to be an int.') if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\ or last_modified_by.find('%')!=-1: dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\ No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by') try: if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate): min_cdate = 0 else: try: min_cdate = int(min_cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate") if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate): max_cdate = 0 else: try: max_cdate = int(max_cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate") if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate): min_ldate = 0 else: try: min_ldate = int(min_ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate") if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate): max_ldate = 0 else: try: max_ldate = int(max_ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate") if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate): cdate = 0 else: try: cdate = int(cdate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate") if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate): ldate = 0 else: try: ldate = int(ldate) except: dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate") except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) detail = detail in (True, 1, "True", "1", 'true') try: return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash, app_name, output_module_label, global_tag, processing_version, acquisition_era_name, run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name, data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by, min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "listDatasets", "(", "self", ",", "dataset", "=", "\"\"", ",", "parent_dataset", "=", "\"\"", ",", "is_dataset_valid", "=", "1", ",", "release_version", "=", "\"\"", ",", "pset_hash", "=", "\"\"", ",", "app_name", "=", "\"\"", ",", "output_module_label", "=", "\"\"", ",", "global_tag", "=", "\"\"", ",", "processing_version", "=", "0", ",", "acquisition_era_name", "=", "\"\"", ",", "run_num", "=", "-", "1", ",", "physics_group_name", "=", "\"\"", ",", "logical_file_name", "=", "\"\"", ",", "primary_ds_name", "=", "\"\"", ",", "primary_ds_type", "=", "\"\"", ",", "processed_ds_name", "=", "''", ",", "data_tier_name", "=", "\"\"", ",", "dataset_access_type", "=", "\"VALID\"", ",", "prep_id", "=", "''", ",", "create_by", "=", "\"\"", ",", "last_modified_by", "=", "\"\"", ",", "min_cdate", "=", "'0'", ",", "max_cdate", "=", "'0'", ",", "min_ldate", "=", "'0'", ",", "max_ldate", "=", "'0'", ",", "cdate", "=", "'0'", ",", "ldate", "=", "'0'", ",", "detail", "=", "False", ",", "dataset_id", "=", "-", "1", ")", ":", "dataset", "=", "dataset", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "parent_dataset", "=", "parent_dataset", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "release_version", "=", "release_version", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "pset_hash", "=", "pset_hash", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "app_name", "=", "app_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "output_module_label", "=", "output_module_label", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "global_tag", "=", "global_tag", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "logical_file_name", "=", "logical_file_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "physics_group_name", "=", "physics_group_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "primary_ds_name", "=", "primary_ds_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "primary_ds_type", "=", "primary_ds_type", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "data_tier_name", "=", "data_tier_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "dataset_access_type", "=", "dataset_access_type", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "processed_ds_name", "=", "processed_ds_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "acquisition_era_name", "=", "acquisition_era_name", ".", "replace", "(", "\"*\"", ",", "\"%\"", ")", "#processing_version = processing_version.replace(\"*\", \"%\")", "#create_by and last_modified_by have be full spelled, no wildcard will allowed.", "#We got them from request head so they can be either HN account name or DN.", "#This is depended on how an user's account is set up.", "#", "# In the next release we will require dataset has no wildcard in it. ", "# DBS will reject wildcard search with dataset name with listDatasets call. ", "# One should seperate the dataset into primary , process and datatier if any wildcard.", "# YG Oct 26, 2016", "# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.", "# YG Dec. 9 2016", "#", "# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours", "# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.", "# YG Jan. 15 2019", "#", "if", "(", "run_num", "!=", "-", "1", "and", "logical_file_name", "==", "''", ")", ":", "for", "r", "in", "parseRunRange", "(", "run_num", ")", ":", "if", "isinstance", "(", "r", ",", "basestring", ")", "or", "isinstance", "(", "r", ",", "int", ")", "or", "isinstance", "(", "r", ",", "long", ")", ":", "if", "r", "==", "1", "or", "r", "==", "'1'", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Run_num=1 is not a valid input.\"", ",", "self", ".", "logger", ".", "exception", ")", "elif", "isinstance", "(", "r", ",", "run_tuple", ")", ":", "if", "r", "[", "0", "]", "==", "r", "[", "1", "]", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"DBS run range must be apart at least by 1.\"", ",", "self", ".", "logger", ".", "exception", ")", "elif", "r", "[", "0", "]", "<=", "1", "<=", "r", "[", "1", "]", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"Run_num=1 is not a valid input.\"", ",", "self", ".", "logger", ".", "exception", ")", "if", "(", "dataset", "and", "(", "dataset", "==", "\"/%/%/%\"", "or", "dataset", "==", "\"/%\"", "or", "dataset", "==", "\"/%/%\"", ")", ")", ":", "dataset", "=", "''", "elif", "(", "dataset", "and", "(", "dataset", ".", "find", "(", "'%'", ")", "!=", "-", "1", ")", ")", ":", "junk", ",", "primary_ds_name", ",", "processed_ds_name", ",", "data_tier_name", "=", "dataset", ".", "split", "(", "'/'", ")", "dataset", "=", "''", "if", "(", "primary_ds_name", "==", "'%'", ")", ":", "primary_ds_name", "=", "''", "if", "(", "processed_ds_name", "==", "'%'", ")", ":", "processed_ds_name", "=", "''", "if", "(", "data_tier_name", "==", "'%'", ")", ":", "data_tier_name", "=", "''", "try", ":", "dataset_id", "=", "int", "(", "dataset_id", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input for dataset_id that has to be an int.\"", ",", "self", ".", "logger", ".", "exception", ",", "'dataset_id has to be an int.'", ")", "if", "create_by", ".", "find", "(", "'*'", ")", "!=", "-", "1", "or", "create_by", ".", "find", "(", "'%'", ")", "!=", "-", "1", "or", "last_modified_by", ".", "find", "(", "'*'", ")", "!=", "-", "1", "or", "last_modified_by", ".", "find", "(", "'%'", ")", "!=", "-", "1", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"Invalid Input for create_by or last_modified_by.\\\n No wildcard allowed.\"", ",", "self", ".", "logger", ".", "exception", ",", "'No wildcards allowed for create_by or last_modified_by'", ")", "try", ":", "if", "isinstance", "(", "min_cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "min_cdate", "or", "'%'", "in", "min_cdate", ")", ":", "min_cdate", "=", "0", "else", ":", "try", ":", "min_cdate", "=", "int", "(", "min_cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for min_cdate\"", ")", "if", "isinstance", "(", "max_cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "max_cdate", "or", "'%'", "in", "max_cdate", ")", ":", "max_cdate", "=", "0", "else", ":", "try", ":", "max_cdate", "=", "int", "(", "max_cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for max_cdate\"", ")", "if", "isinstance", "(", "min_ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "min_ldate", "or", "'%'", "in", "min_ldate", ")", ":", "min_ldate", "=", "0", "else", ":", "try", ":", "min_ldate", "=", "int", "(", "min_ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for min_ldate\"", ")", "if", "isinstance", "(", "max_ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "max_ldate", "or", "'%'", "in", "max_ldate", ")", ":", "max_ldate", "=", "0", "else", ":", "try", ":", "max_ldate", "=", "int", "(", "max_ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for max_ldate\"", ")", "if", "isinstance", "(", "cdate", ",", "basestring", ")", "and", "(", "'*'", "in", "cdate", "or", "'%'", "in", "cdate", ")", ":", "cdate", "=", "0", "else", ":", "try", ":", "cdate", "=", "int", "(", "cdate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for cdate\"", ")", "if", "isinstance", "(", "ldate", ",", "basestring", ")", "and", "(", "'*'", "in", "ldate", "or", "'%'", "in", "ldate", ")", ":", "ldate", "=", "0", "else", ":", "try", ":", "ldate", "=", "int", "(", "ldate", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid input for ldate\"", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listDatasets. %s \\n. Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")", "detail", "=", "detail", "in", "(", "True", ",", "1", ",", "\"True\"", ",", "\"1\"", ",", "'true'", ")", "try", ":", "return", "self", ".", "dbsDataset", ".", "listDatasets", "(", "dataset", ",", "parent_dataset", ",", "is_dataset_valid", ",", "release_version", ",", "pset_hash", ",", "app_name", ",", "output_module_label", ",", "global_tag", ",", "processing_version", ",", "acquisition_era_name", ",", "run_num", ",", "physics_group_name", ",", "logical_file_name", ",", "primary_ds_name", ",", "primary_ds_type", ",", "processed_ds_name", ",", "data_tier_name", ",", "dataset_access_type", ",", "prep_id", ",", "create_by", ",", "last_modified_by", ",", "min_cdate", ",", "max_cdate", ",", "min_ldate", ",", "max_ldate", ",", "cdate", ",", "ldate", ",", "detail", ",", "dataset_id", ")", "except", "dbsException", "as", "de", ":", "dbsExceptionHandler", "(", "de", ".", "eCode", ",", "de", ".", "message", ",", "self", ".", "logger", ".", "exception", ",", "de", ".", "serverError", ")", "except", "Exception", "as", "ex", ":", "sError", "=", "\"DBSReaderModel/listdatasets. %s.\\n Exception trace: \\n %s\"", "%", "(", "ex", ",", "traceback", ".", "format_exc", "(", ")", ")", "dbsExceptionHandler", "(", "'dbsException-server-error'", ",", "dbsExceptionCode", "[", "'dbsException-server-error'", "]", ",", "self", ".", "logger", ".", "exception", ",", "sError", ")" ]
55.219512
0.009979
[ "def listDatasets(self, dataset=\"\", parent_dataset=\"\", is_dataset_valid=1,\n", " release_version=\"\", pset_hash=\"\", app_name=\"\", output_module_label=\"\", global_tag=\"\",\n", " processing_version=0, acquisition_era_name=\"\", run_num=-1,\n", " physics_group_name=\"\", logical_file_name=\"\", primary_ds_name=\"\", primary_ds_type=\"\",\n", " processed_ds_name='', data_tier_name=\"\", dataset_access_type=\"VALID\", prep_id='', create_by=\"\", last_modified_by=\"\",\n", " min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0',\n", " ldate='0', detail=False, dataset_id=-1):\n", " \"\"\"\n", " API to list dataset(s) in DBS\n", " * You can use ANY combination of these parameters in this API\n", " * In absence of parameters, all valid datasets known to the DBS instance will be returned\n", "\n", " :param dataset: Full dataset (path) of the dataset.\n", " :type dataset: str\n", " :param parent_dataset: Full dataset (path) of the dataset\n", " :type parent_dataset: str\n", " :param release_version: cmssw version\n", " :type release_version: str\n", " :param pset_hash: pset hash\n", " :type pset_hash: str\n", " :param app_name: Application name (generally it is cmsRun)\n", " :type app_name: str\n", " :param output_module_label: output_module_label\n", " :type output_module_label: str\n", " :param global_tag: global_tag\n", " :type global_tag: str\n", " :param processing_version: Processing Version\n", " :type processing_version: str\n", " :param acquisition_era_name: Acquisition Era\n", " :type acquisition_era_name: str\n", " :param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.\n", " :type run_num: int,list,str\n", " :param physics_group_name: List only dataset having physics_group_name attribute\n", " :type physics_group_name: str\n", " :param logical_file_name: List dataset containing the logical_file_name\n", " :type logical_file_name: str\n", " :param primary_ds_name: Primary Dataset Name\n", " :type primary_ds_name: str\n", " :param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)\n", " :type primary_ds_type: str\n", " :param processed_ds_name: List datasets having this processed dataset name\n", " :type processed_ds_name: str\n", " :param data_tier_name: Data Tier\n", " :type data_tier_name: str\n", " :param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)\n", " :type dataset_access_type: str\n", " :param prep_id: prep_id\n", " :type prep_id: str\n", " :param create_by: Creator of the dataset\n", " :type create_by: str\n", " :param last_modified_by: Last modifier of the dataset\n", " :type last_modified_by: str\n", " :param min_cdate: Lower limit for the creation date (unixtime) (Optional)\n", " :type min_cdate: int, str\n", " :param max_cdate: Upper limit for the creation date (unixtime) (Optional)\n", " :type max_cdate: int, str\n", " :param min_ldate: Lower limit for the last modification date (unixtime) (Optional)\n", " :type min_ldate: int, str\n", " :param max_ldate: Upper limit for the last modification date (unixtime) (Optional)\n", " :type max_ldate: int, str\n", " :param cdate: creation date (unixtime) (Optional)\n", " :type cdate: int, str\n", " :param ldate: last modification date (unixtime) (Optional)\n", " :type ldate: int, str\n", " :param detail: List all details of a dataset\n", " :type detail: bool\n", " :param dataset_id: dataset table primary key used by CMS Computing Analytics.\n", " :type dataset_id: int, long, str\n", " :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)\n", " :rtype: list of dicts\n", "\n", " \"\"\"\n", " dataset = dataset.replace(\"*\", \"%\")\n", " parent_dataset = parent_dataset.replace(\"*\", \"%\")\n", " release_version = release_version.replace(\"*\", \"%\")\n", " pset_hash = pset_hash.replace(\"*\", \"%\")\n", " app_name = app_name.replace(\"*\", \"%\")\n", " output_module_label = output_module_label.replace(\"*\", \"%\")\n", " global_tag = global_tag.replace(\"*\", \"%\")\n", " logical_file_name = logical_file_name.replace(\"*\", \"%\")\n", " physics_group_name = physics_group_name.replace(\"*\", \"%\")\n", " primary_ds_name = primary_ds_name.replace(\"*\", \"%\")\n", " primary_ds_type = primary_ds_type.replace(\"*\", \"%\")\n", " data_tier_name = data_tier_name.replace(\"*\", \"%\")\n", " dataset_access_type = dataset_access_type.replace(\"*\", \"%\")\n", " processed_ds_name = processed_ds_name.replace(\"*\", \"%\")\n", " acquisition_era_name = acquisition_era_name.replace(\"*\", \"%\")\n", " #processing_version = processing_version.replace(\"*\", \"%\")\n", " #create_by and last_modified_by have be full spelled, no wildcard will allowed.\n", " #We got them from request head so they can be either HN account name or DN.\n", " #This is depended on how an user's account is set up.\n", " #\n", " # In the next release we will require dataset has no wildcard in it. \n", " # DBS will reject wildcard search with dataset name with listDatasets call. \n", " # One should seperate the dataset into primary , process and datatier if any wildcard.\n", " # YG Oct 26, 2016\n", " # Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.\n", " # YG Dec. 9 2016\n", " #\n", " # run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours\n", " # We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.\n", " # YG Jan. 15 2019\n", " #\n", " if (run_num != -1 and logical_file_name ==''):\n", " for r in parseRunRange(run_num):\n", " if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long): \n", " if r == 1 or r == '1':\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"Run_num=1 is not a valid input.\",\n", " self.logger.exception)\n", " elif isinstance(r, run_tuple):\n", " if r[0] == r[1]:\n", " dbsExceptionHandler('dbsException-invalid-input', \"DBS run range must be apart at least by 1.\", \n", " self.logger.exception)\n", " elif r[0] <= 1 <= r[1]:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"Run_num=1 is not a valid input.\",\n", " self.logger.exception) \n", "\n", " if( dataset and ( dataset == \"/%/%/%\" or dataset== \"/%\" or dataset == \"/%/%\" ) ):\n", " dataset=''\n", " elif( dataset and ( dataset.find('%') != -1 ) ) :\n", " junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/')\n", " dataset = ''\n", " if ( primary_ds_name == '%' ):\n", " primary_ds_name = ''\n", " if( processed_ds_name == '%' ):\n", " processed_ds_name = ''\n", " if ( data_tier_name == '%' ):\n", " data_tier_name = ''\n", "\n", " try:\n", " dataset_id = int(dataset_id)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input2\", \"Invalid Input for dataset_id that has to be an int.\",\n", " self.logger.exception, 'dataset_id has to be an int.')\n", " if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\\\n", " or last_modified_by.find('%')!=-1:\n", " dbsExceptionHandler(\"dbsException-invalid-input2\", \"Invalid Input for create_by or last_modified_by.\\\n", " No wildcard allowed.\", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by')\n", " try:\n", " if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):\n", " min_cdate = 0\n", " else:\n", " try:\n", " min_cdate = int(min_cdate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for min_cdate\")\n", " \n", " if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):\n", " max_cdate = 0\n", " else:\n", " try:\n", " max_cdate = int(max_cdate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for max_cdate\")\n", " \n", " if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):\n", " min_ldate = 0\n", " else:\n", " try:\n", " min_ldate = int(min_ldate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for min_ldate\")\n", " \n", " if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):\n", " max_ldate = 0\n", " else:\n", " try:\n", " max_ldate = int(max_ldate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for max_ldate\")\n", " \n", " if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):\n", " cdate = 0\n", " else:\n", " try:\n", " cdate = int(cdate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for cdate\")\n", " \n", " if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate):\n", " ldate = 0\n", " else:\n", " try:\n", " ldate = int(ldate)\n", " except:\n", " dbsExceptionHandler(\"dbsException-invalid-input\", \"invalid input for ldate\")\n", " except dbsException as de:\n", " dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)\n", " except Exception as ex:\n", " sError = \"DBSReaderModel/listDatasets. %s \\n. Exception trace: \\n %s\" \\\n", " % (ex, traceback.format_exc())\n", " dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)\n", "\n", " detail = detail in (True, 1, \"True\", \"1\", 'true')\n", " try: \n", " return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash,\n", " app_name, output_module_label, global_tag, processing_version, acquisition_era_name, \n", " run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name,\n", " data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by,\n", " min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id)\n", " except dbsException as de:\n", " dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)\n", " except Exception as ex:\n", " sError = \"DBSReaderModel/listdatasets. %s.\\n Exception trace: \\n %s\" % (ex, traceback.format_exc())\n", " dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)" ]
[ 0, 0.02127659574468085, 0.014925373134328358, 0.021505376344086023, 0.016, 0.012658227848101266, 0.02040816326530612, 0.08333333333333333, 0, 0, 0.01020408163265306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00558659217877095, 0, 0.011235955056179775, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0.012195121951219513, 0, 0.01098901098901099, 0, 0.01098901098901099, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0.002386634844868735, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0.022727272727272728, 0.023809523809523808, 0.016129032258064516, 0, 0.01282051282051282, 0.023529411764705882, 0.010526315789473684, 0, 0.007462686567164179, 0, 0, 0.009259259259259259, 0.008928571428571428, 0, 0, 0.01818181818181818, 0, 0.021052631578947368, 0, 0.009174311926605505, 0.01818181818181818, 0, 0, 0.01652892561983471, 0.02040816326530612, 0, 0.009174311926605505, 0.03333333333333333, 0, 0.06666666666666667, 0.043478260869565216, 0.08620689655172414, 0.011111111111111112, 0, 0.05128205128205128, 0, 0.05, 0, 0.05263157894736842, 0, 0, 0, 0, 0.0625, 0.00847457627118644, 0.011494252873563218, 0.041237113402061855, 0.0196078431372549, 0.008771929824561403, 0.008547008547008548, 0, 0.010752688172043012, 0, 0, 0, 0, 0.041666666666666664, 0.009900990099009901, 0.07692307692307693, 0.010752688172043012, 0, 0, 0, 0, 0.041666666666666664, 0.009900990099009901, 0.07692307692307693, 0.010752688172043012, 0, 0, 0, 0, 0.041666666666666664, 0.009900990099009901, 0.07692307692307693, 0.010752688172043012, 0, 0, 0, 0, 0.041666666666666664, 0.009900990099009901, 0.07692307692307693, 0.012345679012345678, 0, 0, 0, 0, 0.041666666666666664, 0.010309278350515464, 0.07692307692307693, 0.012345679012345678, 0, 0, 0, 0, 0.041666666666666664, 0.010309278350515464, 0, 0.010752688172043012, 0, 0.011764705882352941, 0, 0.007194244604316547, 0, 0, 0.07142857142857142, 0.008403361344537815, 0.029411764705882353, 0.017094017094017096, 0.02197802197802198, 0.02127659574468085, 0, 0.010752688172043012, 0, 0.008928571428571428, 0.014492753623188406 ]
205
0.010317
def hist_calls_with_dims(**dims): """Decorator to check the distribution of return values of a function with dimensions. """ def hist_wrapper(fn): @functools.wraps(fn) def fn_wrapper(*args, **kwargs): _histogram = histogram( "%s_calls" % pyformance.registry.get_qualname(fn), **dims) rtn = fn(*args, **kwargs) if type(rtn) in (int, float): _histogram.add(rtn) return rtn return fn_wrapper return hist_wrapper
[ "def", "hist_calls_with_dims", "(", "*", "*", "dims", ")", ":", "def", "hist_wrapper", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "fn_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_histogram", "=", "histogram", "(", "\"%s_calls\"", "%", "pyformance", ".", "registry", ".", "get_qualname", "(", "fn", ")", ",", "*", "*", "dims", ")", "rtn", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "type", "(", "rtn", ")", "in", "(", "int", ",", "float", ")", ":", "_histogram", ".", "add", "(", "rtn", ")", "return", "rtn", "return", "fn_wrapper", "return", "hist_wrapper" ]
34.533333
0.00188
[ "def hist_calls_with_dims(**dims):\n", " \"\"\"Decorator to check the distribution of return values of a\n", " function with dimensions.\n", " \"\"\"\n", " def hist_wrapper(fn):\n", " @functools.wraps(fn)\n", " def fn_wrapper(*args, **kwargs):\n", " _histogram = histogram(\n", " \"%s_calls\" % pyformance.registry.get_qualname(fn), **dims)\n", " rtn = fn(*args, **kwargs)\n", " if type(rtn) in (int, float):\n", " _histogram.add(rtn)\n", " return rtn\n", " return fn_wrapper\n", " return hist_wrapper" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216 ]
15
0.002899
def get_referer(req, replace_ampersands=False): """ Return the referring page of a request. Referer (wikipedia): Referer is a common misspelling of the word "referrer"; so common, in fact, that it made it into the official specification of HTTP. When visiting a webpage, the referer or referring page is the URL of the previous webpage from which a link was followed. @param req: request @param replace_ampersands: if 1, replace & by &amp; in url (correct HTML cannot contain & characters alone) """ try: referer = req.headers_in['Referer'] if replace_ampersands == 1: return referer.replace('&', '&amp;') return referer except KeyError: return ''
[ "def", "get_referer", "(", "req", ",", "replace_ampersands", "=", "False", ")", ":", "try", ":", "referer", "=", "req", ".", "headers_in", "[", "'Referer'", "]", "if", "replace_ampersands", "==", "1", ":", "return", "referer", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", "return", "referer", "except", "KeyError", ":", "return", "''" ]
41.611111
0.001305
[ "def get_referer(req, replace_ampersands=False):\n", " \"\"\" Return the referring page of a request.\n", " Referer (wikipedia): Referer is a common misspelling of the word\n", " \"referrer\"; so common, in fact, that it made it into the official\n", " specification of HTTP. When visiting a webpage, the referer or\n", " referring page is the URL of the previous webpage from which a link was\n", " followed.\n", " @param req: request\n", " @param replace_ampersands: if 1, replace & by &amp; in url\n", " (correct HTML cannot contain & characters alone)\n", " \"\"\"\n", " try:\n", " referer = req.headers_in['Referer']\n", " if replace_ampersands == 1:\n", " return referer.replace('&', '&amp;')\n", " return referer\n", " except KeyError:\n", " return ''" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
18
0.003268
def extractall(filename, directory, backend='auto', auto_create_dir=False): ''' :param backend: auto, patool or zipfile :param filename: path to archive file :param directory: directory to extract to :param auto_create_dir: auto create directory ''' Archive(filename, backend).extractall(directory, auto_create_dir=auto_create_dir)
[ "def", "extractall", "(", "filename", ",", "directory", ",", "backend", "=", "'auto'", ",", "auto_create_dir", "=", "False", ")", ":", "Archive", "(", "filename", ",", "backend", ")", ".", "extractall", "(", "directory", ",", "auto_create_dir", "=", "auto_create_dir", ")" ]
43.666667
0.002494
[ "def extractall(filename, directory, backend='auto', auto_create_dir=False):\n", " '''\n", " :param backend: auto, patool or zipfile\n", " :param filename: path to archive file\n", " :param directory: directory to extract to\n", " :param auto_create_dir: auto create directory\n", " '''\n", " Archive(filename, backend).extractall(directory,\n", " auto_create_dir=auto_create_dir)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.013513513513513514 ]
9
0.001502
def dispatch(self, *args, **kwargs): """This decorator sets this view to have restricted permissions.""" return super(AnimalYearArchive, self).dispatch(*args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "AnimalYearArchive", ",", "self", ")", ".", "dispatch", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
60.666667
0.01087
[ "def dispatch(self, *args, **kwargs):\n", " \"\"\"This decorator sets this view to have restricted permissions.\"\"\"\n", " return super(AnimalYearArchive, self).dispatch(*args, **kwargs)" ]
[ 0, 0.013157894736842105, 0.014084507042253521 ]
3
0.009081
def setChatPhoto(self, chat_id, photo): """ See: https://core.telegram.org/bots/api#setchatphoto """ p = _strip(locals(), more=['photo']) return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)
[ "def", "setChatPhoto", "(", "self", ",", "chat_id", ",", "photo", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ",", "more", "=", "[", "'photo'", "]", ")", "return", "self", ".", "_api_request_with_file", "(", "'setChatPhoto'", ",", "_rectify", "(", "p", ")", ",", "'photo'", ",", "photo", ")" ]
59.5
0.012448
[ "def setChatPhoto(self, chat_id, photo):\n", " \"\"\" See: https://core.telegram.org/bots/api#setchatphoto \"\"\"\n", " p = _strip(locals(), more=['photo'])\n", " return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)" ]
[ 0, 0.014492753623188406, 0, 0.022988505747126436 ]
4
0.00937
def before_add_field(self, field): """ If extract_fields is set to True, then '*' fields will be removed and each individual field will read from the model meta data and added. """ if self.extract_fields and field.name == '*': field.ignore = True fields = [model_field.column for model_field in self.model._meta.fields] self.add_fields(fields)
[ "def", "before_add_field", "(", "self", ",", "field", ")", ":", "if", "self", ".", "extract_fields", "and", "field", ".", "name", "==", "'*'", ":", "field", ".", "ignore", "=", "True", "fields", "=", "[", "model_field", ".", "column", "for", "model_field", "in", "self", ".", "model", ".", "_meta", ".", "fields", "]", "self", ".", "add_fields", "(", "fields", ")" ]
45.666667
0.009547
[ "def before_add_field(self, field):\n", " \"\"\"\n", " If extract_fields is set to True, then '*' fields will be removed and each\n", " individual field will read from the model meta data and added.\n", " \"\"\"\n", " if self.extract_fields and field.name == '*':\n", " field.ignore = True\n", " fields = [model_field.column for model_field in self.model._meta.fields]\n", " self.add_fields(fields)" ]
[ 0, 0.08333333333333333, 0.012048192771084338, 0, 0, 0, 0, 0.011764705882352941, 0.02857142857142857 ]
9
0.01508
def decode(self, litmap): """Convert the DNF to an expression.""" return Or(*[And(*[litmap[idx] for idx in clause]) for clause in self.clauses])
[ "def", "decode", "(", "self", ",", "litmap", ")", ":", "return", "Or", "(", "*", "[", "And", "(", "*", "[", "litmap", "[", "idx", "]", "for", "idx", "in", "clause", "]", ")", "for", "clause", "in", "self", ".", "clauses", "]", ")" ]
44.25
0.011111
[ "def decode(self, litmap):\n", " \"\"\"Convert the DNF to an expression.\"\"\"\n", " return Or(*[And(*[litmap[idx] for idx in clause])\n", " for clause in self.clauses])" ]
[ 0, 0.020833333333333332, 0, 0.020833333333333332 ]
4
0.010417
def date_to_long_form_string(dt, locale_ = 'en_US.utf8'): '''dt should be a datetime.date object.''' if locale_: old_locale = locale.getlocale() locale.setlocale(locale.LC_ALL, locale_) v = dt.strftime("%A %B %d %Y") if locale_: locale.setlocale(locale.LC_ALL, old_locale) return v
[ "def", "date_to_long_form_string", "(", "dt", ",", "locale_", "=", "'en_US.utf8'", ")", ":", "if", "locale_", ":", "old_locale", "=", "locale", ".", "getlocale", "(", ")", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "locale_", ")", "v", "=", "dt", ".", "strftime", "(", "\"%A %B %d %Y\"", ")", "if", "locale_", ":", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "old_locale", ")", "return", "v" ]
35.222222
0.009231
[ "def date_to_long_form_string(dt, locale_ = 'en_US.utf8'):\n", " '''dt should be a datetime.date object.'''\n", " if locale_:\n", " old_locale = locale.getlocale()\n", " locale.setlocale(locale.LC_ALL, locale_)\n", " v = dt.strftime(\"%A %B %d %Y\")\n", " if locale_:\n", " locale.setlocale(locale.LC_ALL, old_locale)\n", " return v" ]
[ 0.034482758620689655, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333 ]
9
0.013091
def first_up(ofile, Rec, file_type): """ writes the header for a MagIC template file """ keylist = [] pmag_out = open(ofile, 'a') outstring = "tab \t" + file_type + "\n" pmag_out.write(outstring) keystring = "" for key in list(Rec.keys()): keystring = keystring + '\t' + key keylist.append(key) keystring = keystring + '\n' pmag_out.write(keystring[1:]) pmag_out.close() return keylist
[ "def", "first_up", "(", "ofile", ",", "Rec", ",", "file_type", ")", ":", "keylist", "=", "[", "]", "pmag_out", "=", "open", "(", "ofile", ",", "'a'", ")", "outstring", "=", "\"tab \\t\"", "+", "file_type", "+", "\"\\n\"", "pmag_out", ".", "write", "(", "outstring", ")", "keystring", "=", "\"\"", "for", "key", "in", "list", "(", "Rec", ".", "keys", "(", ")", ")", ":", "keystring", "=", "keystring", "+", "'\\t'", "+", "key", "keylist", ".", "append", "(", "key", ")", "keystring", "=", "keystring", "+", "'\\n'", "pmag_out", ".", "write", "(", "keystring", "[", "1", ":", "]", ")", "pmag_out", ".", "close", "(", ")", "return", "keylist" ]
27.375
0.002208
[ "def first_up(ofile, Rec, file_type):\n", " \"\"\"\n", " writes the header for a MagIC template file\n", " \"\"\"\n", " keylist = []\n", " pmag_out = open(ofile, 'a')\n", " outstring = \"tab \\t\" + file_type + \"\\n\"\n", " pmag_out.write(outstring)\n", " keystring = \"\"\n", " for key in list(Rec.keys()):\n", " keystring = keystring + '\\t' + key\n", " keylist.append(key)\n", " keystring = keystring + '\\n'\n", " pmag_out.write(keystring[1:])\n", " pmag_out.close()\n", " return keylist" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
16
0.003472
def color(requestContext, seriesList, theColor): """ Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa') """ for series in seriesList: series.color = theColor return seriesList
[ "def", "color", "(", "requestContext", ",", "seriesList", ",", "theColor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "color", "=", "theColor", "return", "seriesList" ]
29.733333
0.002174
[ "def color(requestContext, seriesList, theColor):\n", " \"\"\"\n", " Assigns the given color to the seriesList\n", "\n", " Example::\n", "\n", " &target=color(collectd.hostname.cpu.0.user, 'green')\n", " &target=color(collectd.hostname.cpu.0.system, 'ff0000')\n", " &target=color(collectd.hostname.cpu.0.idle, 'gray')\n", " &target=color(collectd.hostname.cpu.0.idle, '6464ffaa')\n", "\n", " \"\"\"\n", " for series in seriesList:\n", " series.color = theColor\n", " return seriesList" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
15
0.003175
def dumps(o, preserve=False): """Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict """ retval = "" addtoretval, sections = _dump_sections(o, "") retval += addtoretval while sections != {}: newsections = {} for section in sections: addtoretval, addtosections = _dump_sections(sections[section], section, preserve) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
[ "def", "dumps", "(", "o", ",", "preserve", "=", "False", ")", ":", "retval", "=", "\"\"", "addtoretval", ",", "sections", "=", "_dump_sections", "(", "o", ",", "\"\"", ")", "retval", "+=", "addtoretval", "while", "sections", "!=", "{", "}", ":", "newsections", "=", "{", "}", "for", "section", "in", "sections", ":", "addtoretval", ",", "addtosections", "=", "_dump_sections", "(", "sections", "[", "section", "]", ",", "section", ",", "preserve", ")", "if", "addtoretval", "or", "(", "not", "addtoretval", "and", "not", "addtosections", ")", ":", "if", "retval", "and", "retval", "[", "-", "2", ":", "]", "!=", "\"\\n\\n\"", ":", "retval", "+=", "\"\\n\"", "retval", "+=", "\"[\"", "+", "section", "+", "\"]\\n\"", "if", "addtoretval", ":", "retval", "+=", "addtoretval", "for", "s", "in", "addtosections", ":", "newsections", "[", "section", "+", "\".\"", "+", "s", "]", "=", "addtosections", "[", "s", "]", "sections", "=", "newsections", "return", "retval" ]
33.133333
0.000978
[ "def dumps(o, preserve=False):\n", " \"\"\"Stringifies input dict as toml\n", "\n", " Args:\n", " o: Object to dump into toml\n", "\n", " preserve: Boolean parameter. If true, preserve inline tables.\n", "\n", " Returns:\n", " String containing the toml corresponding to dict\n", " \"\"\"\n", "\n", " retval = \"\"\n", " addtoretval, sections = _dump_sections(o, \"\")\n", " retval += addtoretval\n", " while sections != {}:\n", " newsections = {}\n", " for section in sections:\n", " addtoretval, addtosections = _dump_sections(sections[section],\n", " section, preserve)\n", " if addtoretval or (not addtoretval and not addtosections):\n", " if retval and retval[-2:] != \"\\n\\n\":\n", " retval += \"\\n\"\n", " retval += \"[\" + section + \"]\\n\"\n", " if addtoretval:\n", " retval += addtoretval\n", " for s in addtosections:\n", " newsections[section + \".\" + s] = addtosections[s]\n", " sections = newsections\n", " return retval" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
30
0.001961
def abs_energy(self, x): """ As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L390>`_ \ Returns the absolute energy of the time series which is the sum over the squared values\ .. math:: E=\\sum_{i=1,\ldots, n}x_i^2 :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float """ _energy = feature_calculators.abs_energy(x) logging.debug("abs energy by tsfresh calculated") return _energy
[ "def", "abs_energy", "(", "self", ",", "x", ")", ":", "_energy", "=", "feature_calculators", ".", "abs_energy", "(", "x", ")", "logging", ".", "debug", "(", "\"abs energy by tsfresh calculated\"", ")", "return", "_energy" ]
33.5
0.014514
[ "def abs_energy(self, x):\n", " \"\"\"\n", " As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\\\n", " feature_calculators.py#L390>`_ \\\n", " Returns the absolute energy of the time series which is the sum over the squared values\\\n", " \n", "\n", " .. math:: \n", " \n", " E=\\\\sum_{i=1,\\ldots, n}x_i^2\n", " \n", " \n", " :param x: the time series to calculate the feature of\n", " :type x: pandas.Series\n", " :return: the value of this feature\n", " :rtype: float\n", " \"\"\"\n", " _energy = feature_calculators.abs_energy(x)\n", " logging.debug(\"abs energy by tsfresh calculated\")\n", " return _energy" ]
[ 0, 0.08333333333333333, 0.008695652173913044, 0, 0.010309278350515464, 0.1111111111111111, 0, 0.05263157894736842, 0.1111111111111111, 0.024390243902439025, 0.1111111111111111, 0.1111111111111111, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
20
0.033463
def create_token_generator(input_list): """SQL Generator to select from list of values in Oracle""" ###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list ###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list ###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger oracle_limit = 4000 grp_list = [] if type(input_list[0]) == int : input_str = ','.join(map(str, input_list)) else: input_str = ','.join(input_list) if len(input_str) >= oracle_limit: index = 0 while True: begin, end = index, index+oracle_limit if end > len(input_str): end = len(input_str) grp_list.append(input_str[begin:end]) break else: index = input_str.rfind(',', begin, end) if index == -1: break grp_list.append(input_str[begin:index]) index += 1 #to remove the leading comma else: grp_list.append(input_str) token_generator = """ WITH TOKEN_GENERATOR AS ( """ binds = {} for index, chunk in enumerate(grp_list): if index: token_generator += """ UNION ALL """ bind = "token_%s" % index token_generator += """SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token FROM DUAL CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1 """.format(bind=bind) binds.update({bind: chunk}) token_generator += ")" return token_generator, binds
[ "def", "create_token_generator", "(", "input_list", ")", ":", "###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list", "###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list", "###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger", "oracle_limit", "=", "4000", "grp_list", "=", "[", "]", "if", "type", "(", "input_list", "[", "0", "]", ")", "==", "int", ":", "input_str", "=", "','", ".", "join", "(", "map", "(", "str", ",", "input_list", ")", ")", "else", ":", "input_str", "=", "','", ".", "join", "(", "input_list", ")", "if", "len", "(", "input_str", ")", ">=", "oracle_limit", ":", "index", "=", "0", "while", "True", ":", "begin", ",", "end", "=", "index", ",", "index", "+", "oracle_limit", "if", "end", ">", "len", "(", "input_str", ")", ":", "end", "=", "len", "(", "input_str", ")", "grp_list", ".", "append", "(", "input_str", "[", "begin", ":", "end", "]", ")", "break", "else", ":", "index", "=", "input_str", ".", "rfind", "(", "','", ",", "begin", ",", "end", ")", "if", "index", "==", "-", "1", ":", "break", "grp_list", ".", "append", "(", "input_str", "[", "begin", ":", "index", "]", ")", "index", "+=", "1", "#to remove the leading comma", "else", ":", "grp_list", ".", "append", "(", "input_str", ")", "token_generator", "=", "\"\"\"\n WITH TOKEN_GENERATOR AS (\n \"\"\"", "binds", "=", "{", "}", "for", "index", ",", "chunk", "in", "enumerate", "(", "grp_list", ")", ":", "if", "index", ":", "token_generator", "+=", "\"\"\"\n UNION ALL\n \"\"\"", "bind", "=", "\"token_%s\"", "%", "index", "token_generator", "+=", "\"\"\"SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token\n FROM DUAL\n CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1\n \"\"\"", ".", "format", "(", "bind", "=", "bind", ")", "binds", ".", "update", "(", "{", "bind", ":", "chunk", "}", ")", "token_generator", "+=", "\")\"", "return", "token_generator", ",", "binds" ]
33.87234
0.015263
[ "def create_token_generator(input_list):\n", " \"\"\"SQL Generator to select from list of values in Oracle\"\"\"\n", " ###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list\n", " ###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list\n", " ###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger\n", " oracle_limit = 4000\n", " grp_list = []\n", " if type(input_list[0]) == int :\n", " input_str = ','.join(map(str, input_list))\n", " else:\n", " input_str = ','.join(input_list) \n", "\n", " if len(input_str) >= oracle_limit:\n", " index = 0\n", " while True:\n", " begin, end = index, index+oracle_limit\n", "\t if end > len(input_str):\n", " end = len(input_str)\n", " grp_list.append(input_str[begin:end])\n", " break\n", "\t else: \t\n", "\t\tindex = input_str.rfind(',', begin, end)\n", "\t\tif index == -1:\n", "\t\t\tbreak\n", "\t\tgrp_list.append(input_str[begin:index])\n", "\t\tindex += 1 #to remove the leading comma\n", " else:\n", " grp_list.append(input_str)\n", "\n", " token_generator = \"\"\"\n", " WITH TOKEN_GENERATOR AS (\n", " \"\"\"\n", " binds = {}\n", " for index, chunk in enumerate(grp_list):\n", " if index:\n", " token_generator += \"\"\"\n", " UNION ALL\n", " \"\"\"\n", " bind = \"token_%s\" % index\n", " token_generator += \"\"\"SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token\n", " FROM DUAL\n", " CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1\n", " \"\"\".format(bind=bind)\n", " binds.update({bind: chunk})\n", " token_generator += \")\"\n", "\n", " return token_generator, binds" ]
[ 0, 0, 0.020833333333333332, 0.01818181818181818, 0.020202020202020204, 0, 0, 0.027777777777777776, 0, 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0.06666666666666667, 0.02702702702702703, 0, 0, 0.23076923076923078, 0.023255813953488372, 0.05555555555555555, 0.1111111111111111, 0.023809523809523808, 0.07142857142857142, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0.011904761904761904, 0, 0, 0, 0, 0.030303030303030304 ]
47
0.018604
def init_file(self, filename, lines, expected, line_offset): """Prepare storage for errors.""" super(_PycodestyleReport, self).init_file( filename, lines, expected, line_offset) self.errors = []
[ "def", "init_file", "(", "self", ",", "filename", ",", "lines", ",", "expected", ",", "line_offset", ")", ":", "super", "(", "_PycodestyleReport", ",", "self", ")", ".", "init_file", "(", "filename", ",", "lines", ",", "expected", ",", "line_offset", ")", "self", ".", "errors", "=", "[", "]" ]
45.2
0.008696
[ "def init_file(self, filename, lines, expected, line_offset):\n", " \"\"\"Prepare storage for errors.\"\"\"\n", " super(_PycodestyleReport, self).init_file(\n", " filename, lines, expected, line_offset)\n", " self.errors = []" ]
[ 0, 0.023809523809523808, 0, 0, 0.041666666666666664 ]
5
0.013095
def detectSonyMylo(self): """Return detection of a Sony Mylo device Detects if the current browser is a Sony Mylo device. """ return UAgentInfo.manuSony in self.__userAgent \ and (UAgentInfo.qtembedded in self.__userAgent or UAgentInfo.mylocom2 in self.__userAgent)
[ "def", "detectSonyMylo", "(", "self", ")", ":", "return", "UAgentInfo", ".", "manuSony", "in", "self", ".", "__userAgent", "and", "(", "UAgentInfo", ".", "qtembedded", "in", "self", ".", "__userAgent", "or", "UAgentInfo", ".", "mylocom2", "in", "self", ".", "__userAgent", ")" ]
39.875
0.009202
[ "def detectSonyMylo(self):\n", " \"\"\"Return detection of a Sony Mylo device\n", "\n", " Detects if the current browser is a Sony Mylo device.\n", " \"\"\"\n", " return UAgentInfo.manuSony in self.__userAgent \\\n", " and (UAgentInfo.qtembedded in self.__userAgent\n", " or UAgentInfo.mylocom2 in self.__userAgent)" ]
[ 0, 0.02, 0, 0, 0, 0, 0, 0.03389830508474576 ]
8
0.006737
def close(self): '''Clean up.''' for path in self._temp_filenames: if os.path.exists(path): os.remove(path)
[ "def", "close", "(", "self", ")", ":", "for", "path", "in", "self", ".", "_temp_filenames", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "remove", "(", "path", ")" ]
29.4
0.013245
[ "def close(self):\n", " '''Clean up.'''\n", " for path in self._temp_filenames:\n", " if os.path.exists(path):\n", " os.remove(path)" ]
[ 0, 0.041666666666666664, 0, 0, 0.03225806451612903 ]
5
0.014785
def index_queryset(self, using=None): """Used when the entire index for model is updated.""" return self.get_model().objects.filter( modified__lte=datetime.datetime.now(), status=STATUS.published )
[ "def", "index_queryset", "(", "self", ",", "using", "=", "None", ")", ":", "return", "self", ".", "get_model", "(", ")", ".", "objects", ".", "filter", "(", "modified__lte", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "status", "=", "STATUS", ".", "published", ")" ]
40
0.008163
[ "def index_queryset(self, using=None):\n", " \"\"\"Used when the entire index for model is updated.\"\"\"\n", " return self.get_model().objects.filter(\n", " modified__lte=datetime.datetime.now(),\n", " status=STATUS.published\n", " )" ]
[ 0, 0.015873015873015872, 0, 0, 0, 0.1111111111111111 ]
6
0.021164
def decode_timeseries(self, resp_ttb, tsobj, convert_timestamp=False): """ Fills an TsObject with the appropriate data and metadata from a TTB-encoded TsGetResp / TsQueryResp. :param resp_ttb: the decoded TTB data :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp :param tsobj: a TsObject :type tsobj: TsObject :param convert_timestamp: Convert timestamps to datetime objects :type tsobj: boolean """ if resp_ttb is None: return tsobj self.maybe_err_ttb(resp_ttb) # NB: some queries return a BARE 'tsqueryresp' atom # catch that here: if resp_ttb == tsqueryresp_a: return tsobj # The response atom is the first element in the response tuple resp_a = resp_ttb[0] if resp_a == tsputresp_a: return elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a: resp_data = resp_ttb[1] if len(resp_data) == 0: return elif len(resp_data) == 3: resp_colnames = resp_data[0] resp_coltypes = resp_data[1] tsobj.columns = self.decode_timeseries_cols( resp_colnames, resp_coltypes) resp_rows = resp_data[2] tsobj.rows = [] for resp_row in resp_rows: tsobj.rows.append( self.decode_timeseries_row(resp_row, resp_coltypes, convert_timestamp)) else: raise RiakError( "Expected 3-tuple in response, got: {}".format(resp_data)) else: raise RiakError("Unknown TTB response type: {}".format(resp_a))
[ "def", "decode_timeseries", "(", "self", ",", "resp_ttb", ",", "tsobj", ",", "convert_timestamp", "=", "False", ")", ":", "if", "resp_ttb", "is", "None", ":", "return", "tsobj", "self", ".", "maybe_err_ttb", "(", "resp_ttb", ")", "# NB: some queries return a BARE 'tsqueryresp' atom", "# catch that here:", "if", "resp_ttb", "==", "tsqueryresp_a", ":", "return", "tsobj", "# The response atom is the first element in the response tuple", "resp_a", "=", "resp_ttb", "[", "0", "]", "if", "resp_a", "==", "tsputresp_a", ":", "return", "elif", "resp_a", "==", "tsgetresp_a", "or", "resp_a", "==", "tsqueryresp_a", ":", "resp_data", "=", "resp_ttb", "[", "1", "]", "if", "len", "(", "resp_data", ")", "==", "0", ":", "return", "elif", "len", "(", "resp_data", ")", "==", "3", ":", "resp_colnames", "=", "resp_data", "[", "0", "]", "resp_coltypes", "=", "resp_data", "[", "1", "]", "tsobj", ".", "columns", "=", "self", ".", "decode_timeseries_cols", "(", "resp_colnames", ",", "resp_coltypes", ")", "resp_rows", "=", "resp_data", "[", "2", "]", "tsobj", ".", "rows", "=", "[", "]", "for", "resp_row", "in", "resp_rows", ":", "tsobj", ".", "rows", ".", "append", "(", "self", ".", "decode_timeseries_row", "(", "resp_row", ",", "resp_coltypes", ",", "convert_timestamp", ")", ")", "else", ":", "raise", "RiakError", "(", "\"Expected 3-tuple in response, got: {}\"", ".", "format", "(", "resp_data", ")", ")", "else", ":", "raise", "RiakError", "(", "\"Unknown TTB response type: {}\"", ".", "format", "(", "resp_a", ")", ")" ]
37.93617
0.00164
[ "def decode_timeseries(self, resp_ttb, tsobj,\n", " convert_timestamp=False):\n", " \"\"\"\n", " Fills an TsObject with the appropriate data and\n", " metadata from a TTB-encoded TsGetResp / TsQueryResp.\n", "\n", " :param resp_ttb: the decoded TTB data\n", " :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp\n", " :param tsobj: a TsObject\n", " :type tsobj: TsObject\n", " :param convert_timestamp: Convert timestamps to datetime objects\n", " :type tsobj: boolean\n", " \"\"\"\n", " if resp_ttb is None:\n", " return tsobj\n", "\n", " self.maybe_err_ttb(resp_ttb)\n", "\n", " # NB: some queries return a BARE 'tsqueryresp' atom\n", " # catch that here:\n", " if resp_ttb == tsqueryresp_a:\n", " return tsobj\n", "\n", " # The response atom is the first element in the response tuple\n", " resp_a = resp_ttb[0]\n", " if resp_a == tsputresp_a:\n", " return\n", " elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a:\n", " resp_data = resp_ttb[1]\n", " if len(resp_data) == 0:\n", " return\n", " elif len(resp_data) == 3:\n", " resp_colnames = resp_data[0]\n", " resp_coltypes = resp_data[1]\n", " tsobj.columns = self.decode_timeseries_cols(\n", " resp_colnames, resp_coltypes)\n", " resp_rows = resp_data[2]\n", " tsobj.rows = []\n", " for resp_row in resp_rows:\n", " tsobj.rows.append(\n", " self.decode_timeseries_row(resp_row, resp_coltypes,\n", " convert_timestamp))\n", " else:\n", " raise RiakError(\n", " \"Expected 3-tuple in response, got: {}\".format(resp_data))\n", " else:\n", " raise RiakError(\"Unknown TTB response type: {}\".format(resp_a))" ]
[ 0, 0.019230769230769232, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013333333333333334 ]
47
0.002466