text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
texts
sequence
scores
sequence
num_lines
int64
3
2.77k
avg_score
float64
0
0.37
def remove_binaries(package_dir=False): """Remove all binaries for the current platform Parameters ---------- package_dir: bool If True, remove all binaries from the `resources` directory of the qpsphere package. If False, remove all binaries from the user's cache directory. """ paths = [] if package_dir: pdir = RESCR_PATH else: pdir = CACHE_PATH for pp in pdir.iterdir(): if pp.name != "shipped_resources_go_here": paths.append(pp) for pp in paths: pp.unlink()
[ "def", "remove_binaries", "(", "package_dir", "=", "False", ")", ":", "paths", "=", "[", "]", "if", "package_dir", ":", "pdir", "=", "RESCR_PATH", "else", ":", "pdir", "=", "CACHE_PATH", "for", "pp", "in", "pdir", ".", "iterdir", "(", ")", ":", "if", "pp", ".", "name", "!=", "\"shipped_resources_go_here\"", ":", "paths", ".", "append", "(", "pp", ")", "for", "pp", "in", "paths", ":", "pp", ".", "unlink", "(", ")" ]
24.043478
0.001739
[ "def remove_binaries(package_dir=False):\n", " \"\"\"Remove all binaries for the current platform\n", "\n", " Parameters\n", " ----------\n", " package_dir: bool\n", " If True, remove all binaries from the `resources`\n", " directory of the qpsphere package. If False,\n", " remove all binaries from the user's cache directory.\n", " \"\"\"\n", " paths = []\n", "\n", " if package_dir:\n", " pdir = RESCR_PATH\n", " else:\n", " pdir = CACHE_PATH\n", "\n", " for pp in pdir.iterdir():\n", " if pp.name != \"shipped_resources_go_here\":\n", " paths.append(pp)\n", "\n", " for pp in paths:\n", " pp.unlink()" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842 ]
23
0.002288
def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['saltenv'], pillar_override=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret
[ "def", "_gather_pillar", "(", "pillarenv", ",", "pillar_override", ")", ":", "pillar", "=", "salt", ".", "pillar", ".", "get_pillar", "(", "__opts__", ",", "__grains__", ",", "__opts__", "[", "'id'", "]", ",", "__opts__", "[", "'saltenv'", "]", ",", "pillar_override", "=", "pillar_override", ",", "pillarenv", "=", "pillarenv", ")", "ret", "=", "pillar", ".", "compile_pillar", "(", ")", "if", "pillar_override", "and", "isinstance", "(", "pillar_override", ",", "dict", ")", ":", "ret", ".", "update", "(", "pillar_override", ")", "return", "ret" ]
28.75
0.002105
[ "def _gather_pillar(pillarenv, pillar_override):\n", " '''\n", " Whenever a state run starts, gather the pillar data fresh\n", " '''\n", " pillar = salt.pillar.get_pillar(\n", " __opts__,\n", " __grains__,\n", " __opts__['id'],\n", " __opts__['saltenv'],\n", " pillar_override=pillar_override,\n", " pillarenv=pillarenv\n", " )\n", " ret = pillar.compile_pillar()\n", " if pillar_override and isinstance(pillar_override, dict):\n", " ret.update(pillar_override)\n", " return ret" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
16
0.004464
def set_result(self, rval: bool) -> None: """ Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it :param rval: Result of evaluation """ self.result = rval if self.result: self.nodes = [pn for pn in self.nodes if pn.result]
[ "def", "set_result", "(", "self", ",", "rval", ":", "bool", ")", "->", "None", ":", "self", ".", "result", "=", "rval", "if", "self", ".", "result", ":", "self", ".", "nodes", "=", "[", "pn", "for", "pn", "in", "self", ".", "nodes", "if", "pn", ".", "result", "]" ]
39.75
0.009231
[ "def set_result(self, rval: bool) -> None:\n", " \"\"\" Set the result of the evaluation. If the result is true, prune all of the children that didn't cut it\n", "\n", " :param rval: Result of evaluation\n", " \"\"\"\n", " self.result = rval\n", " if self.result:\n", " self.nodes = [pn for pn in self.nodes if pn.result]" ]
[ 0, 0.017543859649122806, 0, 0, 0, 0, 0, 0.015873015873015872 ]
8
0.004177
def on_timer(self, event): """Timer event handler Parameters ---------- event : instance of Event The event. """ # Set relative speed and acceleration rel_speed = event.dt rel_acc = 0.1 # Get what's forward pf, pr, pl, pu = self._get_directions() # Increase speed through acceleration # Note that self._speed is relative. We can balance rel_acc and # rel_speed to get a nice smooth or direct control self._speed += self._acc * rel_acc # Reduce speed. Simulate resistance. Using brakes slows down faster. # Note that the way that we reduce speed, allows for higher # speeds if keys ar bound to higher acc values (i.e. turbo) reduce = np.array([0.05, 0.05, 0.05, 0.1, 0.1, 0.1]) reduce[self._brake > 0] = 0.2 self._speed -= self._speed * reduce if np.abs(self._speed).max() < 0.05: self._speed *= 0.0 # --- Determine new position from translation speed if self._speed[:3].any(): # Create speed vectors, use scale_factor as a reference dv = np.array([1.0/d for d in self._flip_factors]) # vf = pf * dv * rel_speed * self._scale_factor vr = pr * dv * rel_speed * self._scale_factor vu = pu * dv * rel_speed * self._scale_factor direction = vf, vr, vu # Set position center_loc = np.array(self._center, dtype='float32') center_loc += (self._speed[0] * direction[0] + self._speed[1] * direction[1] + self._speed[2] * direction[2]) self._center = tuple(center_loc) # --- Determine new orientation from rotation speed roll_angle = 0 # Calculate manual roll (from speed) if self._speed[3:].any(): angleGain = np.array([1.0, 1.5, 1.0]) * 3 * math.pi / 180 angles = self._speed[3:] * angleGain q1 = Quaternion.create_from_axis_angle(angles[0], -1, 0, 0) q2 = Quaternion.create_from_axis_angle(angles[1], 0, 1, 0) q3 = Quaternion.create_from_axis_angle(angles[2], 0, 0, -1) q = q1 * q2 * q3 self._rotation1 = (q * self._rotation1).normalize() # Calculate auto-roll if self.auto_roll: up = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[self.up[1]] up = np.array(up) * {'+': +1, '-': -1}[self.up[0]] def angle(p1, p2): return np.arccos(p1.dot(p2)) #au = angle(pu, (0, 0, 1)) ar = angle(pr, up) al = angle(pl, up) af = angle(pf, up) # Roll angle that's off from being leveled (in unit strength) roll_angle = math.sin(0.5*(al - ar)) # Correct for pitch roll_angle *= abs(math.sin(af)) # abs(math.sin(au)) if abs(roll_angle) < 0.05: roll_angle = 0 if roll_angle: # Correct to soften the force at 90 degree angle roll_angle = np.sign(roll_angle) * np.abs(roll_angle)**0.5 # Get correction for this iteration and apply angle_correction = 1.0 * roll_angle * math.pi / 180 q = Quaternion.create_from_axis_angle(angle_correction, 0, 0, 1) self._rotation1 = (q * self._rotation1).normalize() # Update if self._speed.any() or roll_angle or self._update_from_mouse: self._update_from_mouse = False self.view_changed()
[ "def", "on_timer", "(", "self", ",", "event", ")", ":", "# Set relative speed and acceleration", "rel_speed", "=", "event", ".", "dt", "rel_acc", "=", "0.1", "# Get what's forward", "pf", ",", "pr", ",", "pl", ",", "pu", "=", "self", ".", "_get_directions", "(", ")", "# Increase speed through acceleration", "# Note that self._speed is relative. We can balance rel_acc and", "# rel_speed to get a nice smooth or direct control", "self", ".", "_speed", "+=", "self", ".", "_acc", "*", "rel_acc", "# Reduce speed. Simulate resistance. Using brakes slows down faster.", "# Note that the way that we reduce speed, allows for higher", "# speeds if keys ar bound to higher acc values (i.e. turbo)", "reduce", "=", "np", ".", "array", "(", "[", "0.05", ",", "0.05", ",", "0.05", ",", "0.1", ",", "0.1", ",", "0.1", "]", ")", "reduce", "[", "self", ".", "_brake", ">", "0", "]", "=", "0.2", "self", ".", "_speed", "-=", "self", ".", "_speed", "*", "reduce", "if", "np", ".", "abs", "(", "self", ".", "_speed", ")", ".", "max", "(", ")", "<", "0.05", ":", "self", ".", "_speed", "*=", "0.0", "# --- Determine new position from translation speed", "if", "self", ".", "_speed", "[", ":", "3", "]", ".", "any", "(", ")", ":", "# Create speed vectors, use scale_factor as a reference", "dv", "=", "np", ".", "array", "(", "[", "1.0", "/", "d", "for", "d", "in", "self", ".", "_flip_factors", "]", ")", "#", "vf", "=", "pf", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "vr", "=", "pr", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "vu", "=", "pu", "*", "dv", "*", "rel_speed", "*", "self", ".", "_scale_factor", "direction", "=", "vf", ",", "vr", ",", "vu", "# Set position", "center_loc", "=", "np", ".", "array", "(", "self", ".", "_center", ",", "dtype", "=", "'float32'", ")", "center_loc", "+=", "(", "self", ".", "_speed", "[", "0", "]", "*", "direction", "[", "0", "]", "+", "self", ".", "_speed", "[", "1", "]", "*", "direction", "[", "1", "]", "+", "self", ".", "_speed", "[", "2", "]", "*", "direction", "[", "2", "]", ")", "self", ".", "_center", "=", "tuple", "(", "center_loc", ")", "# --- Determine new orientation from rotation speed", "roll_angle", "=", "0", "# Calculate manual roll (from speed)", "if", "self", ".", "_speed", "[", "3", ":", "]", ".", "any", "(", ")", ":", "angleGain", "=", "np", ".", "array", "(", "[", "1.0", ",", "1.5", ",", "1.0", "]", ")", "*", "3", "*", "math", ".", "pi", "/", "180", "angles", "=", "self", ".", "_speed", "[", "3", ":", "]", "*", "angleGain", "q1", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "0", "]", ",", "-", "1", ",", "0", ",", "0", ")", "q2", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "1", "]", ",", "0", ",", "1", ",", "0", ")", "q3", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angles", "[", "2", "]", ",", "0", ",", "0", ",", "-", "1", ")", "q", "=", "q1", "*", "q2", "*", "q3", "self", ".", "_rotation1", "=", "(", "q", "*", "self", ".", "_rotation1", ")", ".", "normalize", "(", ")", "# Calculate auto-roll", "if", "self", ".", "auto_roll", ":", "up", "=", "{", "'x'", ":", "(", "1", ",", "0", ",", "0", ")", ",", "'y'", ":", "(", "0", ",", "1", ",", "0", ")", ",", "'z'", ":", "(", "0", ",", "0", ",", "1", ")", "}", "[", "self", ".", "up", "[", "1", "]", "]", "up", "=", "np", ".", "array", "(", "up", ")", "*", "{", "'+'", ":", "+", "1", ",", "'-'", ":", "-", "1", "}", "[", "self", ".", "up", "[", "0", "]", "]", "def", "angle", "(", "p1", ",", "p2", ")", ":", "return", "np", ".", "arccos", "(", "p1", ".", "dot", "(", "p2", ")", ")", "#au = angle(pu, (0, 0, 1))", "ar", "=", "angle", "(", "pr", ",", "up", ")", "al", "=", "angle", "(", "pl", ",", "up", ")", "af", "=", "angle", "(", "pf", ",", "up", ")", "# Roll angle that's off from being leveled (in unit strength)", "roll_angle", "=", "math", ".", "sin", "(", "0.5", "*", "(", "al", "-", "ar", ")", ")", "# Correct for pitch", "roll_angle", "*=", "abs", "(", "math", ".", "sin", "(", "af", ")", ")", "# abs(math.sin(au))", "if", "abs", "(", "roll_angle", ")", "<", "0.05", ":", "roll_angle", "=", "0", "if", "roll_angle", ":", "# Correct to soften the force at 90 degree angle", "roll_angle", "=", "np", ".", "sign", "(", "roll_angle", ")", "*", "np", ".", "abs", "(", "roll_angle", ")", "**", "0.5", "# Get correction for this iteration and apply", "angle_correction", "=", "1.0", "*", "roll_angle", "*", "math", ".", "pi", "/", "180", "q", "=", "Quaternion", ".", "create_from_axis_angle", "(", "angle_correction", ",", "0", ",", "0", ",", "1", ")", "self", ".", "_rotation1", "=", "(", "q", "*", "self", ".", "_rotation1", ")", ".", "normalize", "(", ")", "# Update", "if", "self", ".", "_speed", ".", "any", "(", ")", "or", "roll_angle", "or", "self", ".", "_update_from_mouse", ":", "self", ".", "_update_from_mouse", "=", "False", "self", ".", "view_changed", "(", ")" ]
38.542553
0.000807
[ "def on_timer(self, event):\n", " \"\"\"Timer event handler\n", "\n", " Parameters\n", " ----------\n", " event : instance of Event\n", " The event.\n", " \"\"\"\n", "\n", " # Set relative speed and acceleration\n", " rel_speed = event.dt\n", " rel_acc = 0.1\n", "\n", " # Get what's forward\n", " pf, pr, pl, pu = self._get_directions()\n", "\n", " # Increase speed through acceleration\n", " # Note that self._speed is relative. We can balance rel_acc and\n", " # rel_speed to get a nice smooth or direct control\n", " self._speed += self._acc * rel_acc\n", "\n", " # Reduce speed. Simulate resistance. Using brakes slows down faster.\n", " # Note that the way that we reduce speed, allows for higher\n", " # speeds if keys ar bound to higher acc values (i.e. turbo)\n", " reduce = np.array([0.05, 0.05, 0.05, 0.1, 0.1, 0.1])\n", " reduce[self._brake > 0] = 0.2\n", " self._speed -= self._speed * reduce\n", " if np.abs(self._speed).max() < 0.05:\n", " self._speed *= 0.0\n", "\n", " # --- Determine new position from translation speed\n", "\n", " if self._speed[:3].any():\n", "\n", " # Create speed vectors, use scale_factor as a reference\n", " dv = np.array([1.0/d for d in self._flip_factors])\n", " #\n", " vf = pf * dv * rel_speed * self._scale_factor\n", " vr = pr * dv * rel_speed * self._scale_factor\n", " vu = pu * dv * rel_speed * self._scale_factor\n", " direction = vf, vr, vu\n", "\n", " # Set position\n", " center_loc = np.array(self._center, dtype='float32')\n", " center_loc += (self._speed[0] * direction[0] +\n", " self._speed[1] * direction[1] +\n", " self._speed[2] * direction[2])\n", " self._center = tuple(center_loc)\n", "\n", " # --- Determine new orientation from rotation speed\n", "\n", " roll_angle = 0\n", "\n", " # Calculate manual roll (from speed)\n", " if self._speed[3:].any():\n", " angleGain = np.array([1.0, 1.5, 1.0]) * 3 * math.pi / 180\n", " angles = self._speed[3:] * angleGain\n", "\n", " q1 = Quaternion.create_from_axis_angle(angles[0], -1, 0, 0)\n", " q2 = Quaternion.create_from_axis_angle(angles[1], 0, 1, 0)\n", " q3 = Quaternion.create_from_axis_angle(angles[2], 0, 0, -1)\n", " q = q1 * q2 * q3\n", " self._rotation1 = (q * self._rotation1).normalize()\n", "\n", " # Calculate auto-roll\n", " if self.auto_roll:\n", " up = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}[self.up[1]]\n", " up = np.array(up) * {'+': +1, '-': -1}[self.up[0]]\n", "\n", " def angle(p1, p2):\n", " return np.arccos(p1.dot(p2))\n", " #au = angle(pu, (0, 0, 1))\n", " ar = angle(pr, up)\n", " al = angle(pl, up)\n", " af = angle(pf, up)\n", " # Roll angle that's off from being leveled (in unit strength)\n", " roll_angle = math.sin(0.5*(al - ar))\n", " # Correct for pitch\n", " roll_angle *= abs(math.sin(af)) # abs(math.sin(au))\n", " if abs(roll_angle) < 0.05:\n", " roll_angle = 0\n", " if roll_angle:\n", " # Correct to soften the force at 90 degree angle\n", " roll_angle = np.sign(roll_angle) * np.abs(roll_angle)**0.5\n", " # Get correction for this iteration and apply\n", " angle_correction = 1.0 * roll_angle * math.pi / 180\n", " q = Quaternion.create_from_axis_angle(angle_correction,\n", " 0, 0, 1)\n", " self._rotation1 = (q * self._rotation1).normalize()\n", "\n", " # Update\n", " if self._speed.any() or roll_angle or self._update_from_mouse:\n", " self._update_from_mouse = False\n", " self.view_changed()" ]
[ 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03225806451612903 ]
94
0.000959
def record_source(self, src, prg=''): """ function to collect raw data from the web and hard drive Examples - new source file for ontologies, email contacts list, folder for xmas photos """ self._log(self.logFileSource , force_to_string(src), prg)
[ "def", "record_source", "(", "self", ",", "src", ",", "prg", "=", "''", ")", ":", "self", ".", "_log", "(", "self", ".", "logFileSource", ",", "force_to_string", "(", "src", ")", ",", "prg", ")" ]
47
0.013937
[ "def record_source(self, src, prg=''):\n", " \"\"\"\n", " function to collect raw data from the web and hard drive\n", " Examples - new source file for ontologies, email contacts list, folder for xmas photos\n", " \"\"\"\n", " self._log(self.logFileSource , force_to_string(src), prg)" ]
[ 0, 0.08333333333333333, 0, 0.010526315789473684, 0, 0.03076923076923077 ]
6
0.020771
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False): """ Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" """ # TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return '%9s' if dtype.type == np.object_: inferred_dtype = infer_dtype(column, skipna=True) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Column `{col}` cannot be exported.\n\nOnly ' 'string-like object arrays containing all ' 'strings or a mix of strings and None can be ' 'exported. Object arrays containing only null ' 'values are prohibited. Other object types' 'cannot be exported and must first be converted ' 'to one of the supported ' 'types.'.format(col=column.name)) itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' else: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover raise NotImplementedError( "Data type {dtype} not supported.".format(dtype=dtype))
[ "def", "_dtype_to_default_stata_fmt", "(", "dtype", ",", "column", ",", "dta_version", "=", "114", ",", "force_strl", "=", "False", ")", ":", "# TODO: Refactor to combine type with format", "# TODO: expand this to handle a default datetime format?", "if", "dta_version", "<", "117", ":", "max_str_len", "=", "244", "else", ":", "max_str_len", "=", "2045", "if", "force_strl", ":", "return", "'%9s'", "if", "dtype", ".", "type", "==", "np", ".", "object_", ":", "inferred_dtype", "=", "infer_dtype", "(", "column", ",", "skipna", "=", "True", ")", "if", "not", "(", "inferred_dtype", "in", "(", "'string'", ",", "'unicode'", ")", "or", "len", "(", "column", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'Column `{col}` cannot be exported.\\n\\nOnly '", "'string-like object arrays containing all '", "'strings or a mix of strings and None can be '", "'exported. Object arrays containing only null '", "'values are prohibited. Other object types'", "'cannot be exported and must first be converted '", "'to one of the supported '", "'types.'", ".", "format", "(", "col", "=", "column", ".", "name", ")", ")", "itemsize", "=", "max_len_string_array", "(", "ensure_object", "(", "column", ".", "values", ")", ")", "if", "itemsize", ">", "max_str_len", ":", "if", "dta_version", ">=", "117", ":", "return", "'%9s'", "else", ":", "raise", "ValueError", "(", "excessive_string_length_error", "%", "column", ".", "name", ")", "return", "\"%\"", "+", "str", "(", "max", "(", "itemsize", ",", "1", ")", ")", "+", "\"s\"", "elif", "dtype", "==", "np", ".", "float64", ":", "return", "\"%10.0g\"", "elif", "dtype", "==", "np", ".", "float32", ":", "return", "\"%9.0g\"", "elif", "dtype", "==", "np", ".", "int32", ":", "return", "\"%12.0g\"", "elif", "dtype", "==", "np", ".", "int8", "or", "dtype", "==", "np", ".", "int16", ":", "return", "\"%8.0g\"", "else", ":", "# pragma : no cover", "raise", "NotImplementedError", "(", "\"Data type {dtype} not supported.\"", ".", "format", "(", "dtype", "=", "dtype", ")", ")" ]
40.222222
0.000449
[ "def _dtype_to_default_stata_fmt(dtype, column, dta_version=114,\n", " force_strl=False):\n", " \"\"\"\n", " Map numpy dtype to stata's default format for this type. Not terribly\n", " important since users can change this in Stata. Semantics are\n", "\n", " object -> \"%DDs\" where DD is the length of the string. If not a string,\n", " raise ValueError\n", " float64 -> \"%10.0g\"\n", " float32 -> \"%9.0g\"\n", " int64 -> \"%9.0g\"\n", " int32 -> \"%12.0g\"\n", " int16 -> \"%8.0g\"\n", " int8 -> \"%8.0g\"\n", " strl -> \"%9s\"\n", " \"\"\"\n", " # TODO: Refactor to combine type with format\n", " # TODO: expand this to handle a default datetime format?\n", " if dta_version < 117:\n", " max_str_len = 244\n", " else:\n", " max_str_len = 2045\n", " if force_strl:\n", " return '%9s'\n", " if dtype.type == np.object_:\n", " inferred_dtype = infer_dtype(column, skipna=True)\n", " if not (inferred_dtype in ('string', 'unicode') or\n", " len(column) == 0):\n", " raise ValueError('Column `{col}` cannot be exported.\\n\\nOnly '\n", " 'string-like object arrays containing all '\n", " 'strings or a mix of strings and None can be '\n", " 'exported. Object arrays containing only null '\n", " 'values are prohibited. Other object types'\n", " 'cannot be exported and must first be converted '\n", " 'to one of the supported '\n", " 'types.'.format(col=column.name))\n", " itemsize = max_len_string_array(ensure_object(column.values))\n", " if itemsize > max_str_len:\n", " if dta_version >= 117:\n", " return '%9s'\n", " else:\n", " raise ValueError(excessive_string_length_error % column.name)\n", " return \"%\" + str(max(itemsize, 1)) + \"s\"\n", " elif dtype == np.float64:\n", " return \"%10.0g\"\n", " elif dtype == np.float32:\n", " return \"%9.0g\"\n", " elif dtype == np.int32:\n", " return \"%12.0g\"\n", " elif dtype == np.int8 or dtype == np.int16:\n", " return \"%8.0g\"\n", " else: # pragma : no cover\n", " raise NotImplementedError(\n", " \"Data type {dtype} not supported.\".format(dtype=dtype))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014925373134328358 ]
54
0.000276
def validate_proxies_config(cls, proxies): """ Specific config validation method for the "proxies" portion of a config. Checks that each proxy defines a port and a list of `upstreams`, and that each upstream entry has a host and port defined. """ for name, proxy in six.iteritems(proxies): if "port" not in proxy: raise ValueError("No port defined for proxy %s" % name) if "upstreams" not in proxy: raise ValueError( "No upstreams defined for proxy %s" % name ) for upstream in proxy["upstreams"]: if "host" not in upstream: raise ValueError( "No host defined for upstream in proxy %s" % name ) if "port" not in upstream: raise ValueError( "No port defined for upstream in proxy %s" % name )
[ "def", "validate_proxies_config", "(", "cls", ",", "proxies", ")", ":", "for", "name", ",", "proxy", "in", "six", ".", "iteritems", "(", "proxies", ")", ":", "if", "\"port\"", "not", "in", "proxy", ":", "raise", "ValueError", "(", "\"No port defined for proxy %s\"", "%", "name", ")", "if", "\"upstreams\"", "not", "in", "proxy", ":", "raise", "ValueError", "(", "\"No upstreams defined for proxy %s\"", "%", "name", ")", "for", "upstream", "in", "proxy", "[", "\"upstreams\"", "]", ":", "if", "\"host\"", "not", "in", "upstream", ":", "raise", "ValueError", "(", "\"No host defined for upstream in proxy %s\"", "%", "name", ")", "if", "\"port\"", "not", "in", "upstream", ":", "raise", "ValueError", "(", "\"No port defined for upstream in proxy %s\"", "%", "name", ")" ]
41.208333
0.001976
[ "def validate_proxies_config(cls, proxies):\n", " \"\"\"\n", " Specific config validation method for the \"proxies\" portion of a\n", " config.\n", "\n", " Checks that each proxy defines a port and a list of `upstreams`,\n", " and that each upstream entry has a host and port defined.\n", " \"\"\"\n", " for name, proxy in six.iteritems(proxies):\n", " if \"port\" not in proxy:\n", " raise ValueError(\"No port defined for proxy %s\" % name)\n", " if \"upstreams\" not in proxy:\n", " raise ValueError(\n", " \"No upstreams defined for proxy %s\" % name\n", " )\n", " for upstream in proxy[\"upstreams\"]:\n", " if \"host\" not in upstream:\n", " raise ValueError(\n", " \"No host defined for upstream in proxy %s\" % name\n", " )\n", " if \"port\" not in upstream:\n", " raise ValueError(\n", " \"No port defined for upstream in proxy %s\" % name\n", " )" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
24
0.005456
def _gen_memd_wrappers(cls, factory): """Generates wrappers for all the memcached operations. :param factory: A function to be called to return the wrapped method. It will be called with two arguments; the first is the unbound method being wrapped, and the second is the name of such a method. The factory shall return a new unbound method :return: A dictionary of names mapping the API calls to the wrapped functions """ d = {} for n in cls._MEMCACHED_OPERATIONS: for variant in (n, n + "_multi"): try: d[variant] = factory(getattr(cls, variant), variant) except AttributeError: if n in cls._MEMCACHED_NOMULTI: continue raise return d
[ "def", "_gen_memd_wrappers", "(", "cls", ",", "factory", ")", ":", "d", "=", "{", "}", "for", "n", "in", "cls", ".", "_MEMCACHED_OPERATIONS", ":", "for", "variant", "in", "(", "n", ",", "n", "+", "\"_multi\"", ")", ":", "try", ":", "d", "[", "variant", "]", "=", "factory", "(", "getattr", "(", "cls", ",", "variant", ")", ",", "variant", ")", "except", "AttributeError", ":", "if", "n", "in", "cls", ".", "_MEMCACHED_NOMULTI", ":", "continue", "raise", "return", "d" ]
39
0.002275
[ "def _gen_memd_wrappers(cls, factory):\n", " \"\"\"Generates wrappers for all the memcached operations.\n", " :param factory: A function to be called to return the wrapped\n", " method. It will be called with two arguments; the first is\n", " the unbound method being wrapped, and the second is the name\n", " of such a method.\n", "\n", " The factory shall return a new unbound method\n", "\n", " :return: A dictionary of names mapping the API calls to the\n", " wrapped functions\n", " \"\"\"\n", " d = {}\n", " for n in cls._MEMCACHED_OPERATIONS:\n", " for variant in (n, n + \"_multi\"):\n", " try:\n", " d[variant] = factory(getattr(cls, variant), variant)\n", " except AttributeError:\n", " if n in cls._MEMCACHED_NOMULTI:\n", " continue\n", " raise\n", " return d" ]
[ 0, 0.015625, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
22
0.003551
def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15, sigrej=2.0, lower=None, upper=None, binwidth=0.3, scimask1=None, scimask2=None, dqbits=None, rpt_clean=0, atol=0.01, cte_correct=True, clobber=False, verbose=True): r"""Calibrate post-SM4 ACS/WFC exposure(s) and use standalone :ref:`acsdestripe`. This takes a RAW image and generates a FLT file containing its calibrated and destriped counterpart. If CTE correction is performed, FLC will also be present. Parameters ---------- inputfile : str or list of str Input filenames in one of these formats: * a Python list of filenames * a partial filename with wildcards ('\*raw.fits') * filename of an ASN table ('j12345670_asn.fits') * an at-file (``@input``) suffix : str The string to use to add to each input file name to indicate an output product of ``acs_destripe``. This only affects the intermediate output file that will be automatically renamed to ``*blv_tmp.fits`` during the processing. stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1') Specifies the statistics to be used for computation of the background in image rows: * 'pmode1' - SEXTRACTOR-like mode estimate based on a modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``2.5*median-1.5*mean``; * 'pmode2' - mode estimate based on `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_: ``3*median-2*mean``; * 'mean' - the mean of the distribution of the "good" pixels (after clipping, masking, etc.); * 'mode' - the mode of the distribution of the "good" pixels; * 'median' - the median of the distribution of the "good" pixels; * 'midpt' - estimate of the median of the distribution of the "good" pixels based on an algorithm similar to IRAF's `imagestats` task (``CDF(midpt)=1/2``). .. note:: The midpoint and mode are computed in two passes through the image. In the first pass the standard deviation of the pixels is calculated and used with the *binwidth* parameter to compute the resolution of the data histogram. The midpoint is estimated by integrating the histogram and computing by interpolation the data value at which exactly half the pixels are below that data value and half are above it. The mode is computed by locating the maximum of the data histogram and fitting the peak by parabolic interpolation. maxiter : int This parameter controls the maximum number of iterations to perform when computing the statistics used to compute the row-by-row corrections. sigrej : float This parameters sets the sigma level for the rejection applied during each iteration of statistics computations for the row-by-row corrections. lower : float, None (Default = None) Lower limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). upper : float, None (Default = None) Upper limit of usable pixel values for computing the background. This value should be specified in the units of the input image(s). binwidth : float (Default = 0.1) Histogram's bin width, in sigma units, used to sample the distribution of pixel brightness values in order to compute the background statistics. This parameter is aplicable *only* to *stat* parameter values of `'mode'` or `'midpt'`. clobber : bool Specify whether or not to 'clobber' (delete then replace) previously generated products with the same names. scimask1 : str or list of str Mask images for *calibrated* ``SCI,1``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. scimask2 : str or list of str Mask images for *calibrated* ``SCI,2``, one for each input file. Pixels with zero values will be masked out, in addition to clipping. This is not used for subarrays. dqbits : int, str, None (Default = None) Integer sum of all the DQ bit values from the input image's DQ array that should be considered "good" when building masks for de-striping computations. For example, if pixels in the DQ array can be combinations of 1, 2, 4, and 8 flags and one wants to consider DQ "defects" having flags 2 and 4 as being acceptable for de-striping computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel having values 2,4, or 6 will be considered a good pixel, while a DQ pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged as a "bad" pixel. Alternatively, one can enter a comma- or '+'-separated list of integer bit flags that should be added to obtain the final "good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to setting `dqbits` to 12. | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ mask to be considered "bad" pixels, and the corresponding image pixels not to be used for de-striping computations. | Default value (`None`) will turn off the use of image's DQ array for de-striping computations. | In order to reverse the meaning of the `dqbits` parameter from indicating values of the "good" DQ flags to indicating the "bad" DQ flags, prepend '~' to the string value. For example, in order not to use pixels with DQ flags 4 and 8 for sky computations and to consider as "good" all other pixels (regardless of their DQ flag), set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the same effect with an `int` input value (except for 0), enter -(4+8+1)=-9. Following this convention, a `dqbits` string value of ``'~0'`` would be equivalent to setting ``dqbits=None``. .. note:: DQ masks (if used), *will be* combined with user masks specified in the `scimask1` and `scimask2` parameters (if any). rpt_clean : int An integer indicating how many *additional* times stripe cleaning should be performed on the input image. Default = 0. atol : float, None The threshold for maximum absolute value of bias stripe correction below which repeated cleanings can stop. When `atol` is `None` cleaning will be repeated `rpt_clean` number of times. Default = 0.01 [e]. cte_correct : bool Perform CTE correction. verbose : bool Print informational messages. Default = True. Raises ------ ImportError ``stsci.tools`` not found. IOError Input file does not exist. ValueError Invalid header values or CALACS version. """ # Optional package dependencies from stsci.tools import parseinput try: from stsci.tools.bitmask import interpret_bit_flags except ImportError: from stsci.tools.bitmask import ( interpret_bits_value as interpret_bit_flags ) # process input file(s) and if we have multiple input files - recursively # call acs_destripe_plus for each input image: flist = parseinput.parseinput(inputfile)[0] if isinstance(scimask1, str): mlist1 = parseinput.parseinput(scimask1)[0] elif isinstance(scimask1, np.ndarray): mlist1 = [scimask1.copy()] elif scimask1 is None: mlist1 = [] elif isinstance(scimask1, list): mlist1 = [] for m in scimask1: if isinstance(m, np.ndarray): mlist1.append(m.copy()) elif isinstance(m, str): mlist1 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask1' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask1' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") if isinstance(scimask2, str): mlist2 = parseinput.parseinput(scimask2)[0] elif isinstance(scimask2, np.ndarray): mlist2 = [scimask2.copy()] elif scimask2 is None: mlist2 = [] elif isinstance(scimask2, list): mlist2 = [] for m in scimask2: if isinstance(m, np.ndarray): mlist2.append(m.copy()) elif isinstance(m, str): mlist2 += parseinput.parseinput(m)[0] else: raise TypeError("'scimask2' must be a list of str or " "numpy.ndarray values.") else: raise TypeError("'scimask2' must be either a str, or a " "numpy.ndarray, or a list of the two type of " "values.") n_input = len(flist) n_mask1 = len(mlist1) n_mask2 = len(mlist2) if n_input == 0: raise ValueError( 'No input file(s) provided or the file(s) do not exist') if n_mask1 == 0: mlist1 = [None] * n_input elif n_mask1 != n_input: raise ValueError('Insufficient masks for [SCI,1]') if n_mask2 == 0: mlist2 = [None] * n_input elif n_mask2 != n_input: raise ValueError('Insufficient masks for [SCI,2]') if n_input > 1: for img, mf1, mf2 in zip(flist, mlist1, mlist2): destripe_plus( inputfile=img, suffix=suffix, stat=stat, lower=lower, upper=upper, binwidth=binwidth, maxiter=maxiter, sigrej=sigrej, scimask1=scimask1, scimask2=scimask2, dqbits=dqbits, cte_correct=cte_correct, clobber=clobber, verbose=verbose ) return inputfile = flist[0] scimask1 = mlist1[0] scimask2 = mlist2[0] # verify that the RAW image exists in cwd cwddir = os.getcwd() if not os.path.exists(os.path.join(cwddir, inputfile)): raise IOError("{0} does not exist.".format(inputfile)) # get image's primary header: header = fits.getheader(inputfile) # verify masks defined (or not) simultaneously: if (header['CCDAMP'] == 'ABCD' and ((scimask1 is not None and scimask2 is None) or (scimask1 is None and scimask2 is not None))): raise ValueError("Both 'scimask1' and 'scimask2' must be specified " "or not specified together.") calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0] calacs_ver = [int(x) for x in calacs_str.decode().split('.')] if calacs_ver < [8, 3, 1]: raise ValueError('CALACS {0} is incomptible. ' 'Must be 8.3.1 or later.'.format(calacs_str)) # check date for post-SM4 and if supported subarray or full frame is_subarray = False ctecorr = header['PCTECORR'] aperture = header['APERTURE'] detector = header['DETECTOR'] date_obs = Time(header['DATE-OBS']) # intermediate filenames blvtmp_name = inputfile.replace('raw', 'blv_tmp') blctmp_name = inputfile.replace('raw', 'blc_tmp') # output filenames tra_name = inputfile.replace('_raw.fits', '.tra') flt_name = inputfile.replace('raw', 'flt') flc_name = inputfile.replace('raw', 'flc') if detector != 'WFC': raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'" " keyword.".format(inputfile)) if date_obs < SM4_DATE: raise ValueError( "{0} is a pre-SM4 image.".format(inputfile)) if header['SUBARRAY'] and cte_correct: if aperture in SUBARRAY_LIST: is_subarray = True else: LOG.warning('Using non-supported subarray, ' 'turning CTE correction off') cte_correct = False # delete files from previous CALACS runs if clobber: for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name, tra_name]: if os.path.exists(tmpfilename): os.remove(tmpfilename) # run ACSCCD on RAW acsccd.acsccd(inputfile) # modify user mask with DQ masks if requested dqbits = interpret_bit_flags(dqbits) if dqbits is not None: # save 'tra' file in memory to trick the log file # not to save first acs2d log as this is done only # for the purpose of obtaining DQ masks. # WISH: it would have been nice is there was an easy way of obtaining # just the DQ masks as if data were calibrated but without # having to recalibrate them with acs2d. if os.path.isfile(tra_name): with open(tra_name) as fh: tra_lines = fh.readlines() else: tra_lines = None # apply flats, etc. acs2d.acs2d(blvtmp_name, verbose=False, quiet=True) # extract DQ arrays from the FLT image: dq1, dq2 = _read_DQ_arrays(flt_name) mask1 = _get_mask(scimask1, 1) scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits) mask2 = _get_mask(scimask2, 2) if dq2 is not None: scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits) elif mask2 is None: scimask2 = None # reconstruct trailer file: if tra_lines is not None: with open(tra_name, mode='w') as fh: fh.writelines(tra_lines) # delete temporary FLT image: if os.path.isfile(flt_name): os.remove(flt_name) # execute destriping (post-SM4 data only) acs_destripe.clean( blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej, lower=lower, upper=upper, binwidth=binwidth, mask1=scimask1, mask2=scimask2, dqbits=dqbits, rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose) blvtmpsfx = 'blv_tmp_{0}'.format(suffix) os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name) # update subarray header if is_subarray and cte_correct: fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM') ctecorr = 'PERFORM' # perform CTE correction on destriped image if cte_correct: if ctecorr == 'PERFORM': acscte.acscte(blvtmp_name) else: LOG.warning( "PCTECORR={0}, cannot run CTE correction".format(ctecorr)) cte_correct = False # run ACS2D to get FLT and FLC images acs2d.acs2d(blvtmp_name) if cte_correct: acs2d.acs2d(blctmp_name) # delete intermediate files os.remove(blvtmp_name) if cte_correct and os.path.isfile(blctmp_name): os.remove(blctmp_name) info_str = 'Done.\nFLT: {0}\n'.format(flt_name) if cte_correct: info_str += 'FLC: {0}\n'.format(flc_name) LOG.info(info_str)
[ "def", "destripe_plus", "(", "inputfile", ",", "suffix", "=", "'strp'", ",", "stat", "=", "'pmode1'", ",", "maxiter", "=", "15", ",", "sigrej", "=", "2.0", ",", "lower", "=", "None", ",", "upper", "=", "None", ",", "binwidth", "=", "0.3", ",", "scimask1", "=", "None", ",", "scimask2", "=", "None", ",", "dqbits", "=", "None", ",", "rpt_clean", "=", "0", ",", "atol", "=", "0.01", ",", "cte_correct", "=", "True", ",", "clobber", "=", "False", ",", "verbose", "=", "True", ")", ":", "# Optional package dependencies", "from", "stsci", ".", "tools", "import", "parseinput", "try", ":", "from", "stsci", ".", "tools", ".", "bitmask", "import", "interpret_bit_flags", "except", "ImportError", ":", "from", "stsci", ".", "tools", ".", "bitmask", "import", "(", "interpret_bits_value", "as", "interpret_bit_flags", ")", "# process input file(s) and if we have multiple input files - recursively", "# call acs_destripe_plus for each input image:", "flist", "=", "parseinput", ".", "parseinput", "(", "inputfile", ")", "[", "0", "]", "if", "isinstance", "(", "scimask1", ",", "str", ")", ":", "mlist1", "=", "parseinput", ".", "parseinput", "(", "scimask1", ")", "[", "0", "]", "elif", "isinstance", "(", "scimask1", ",", "np", ".", "ndarray", ")", ":", "mlist1", "=", "[", "scimask1", ".", "copy", "(", ")", "]", "elif", "scimask1", "is", "None", ":", "mlist1", "=", "[", "]", "elif", "isinstance", "(", "scimask1", ",", "list", ")", ":", "mlist1", "=", "[", "]", "for", "m", "in", "scimask1", ":", "if", "isinstance", "(", "m", ",", "np", ".", "ndarray", ")", ":", "mlist1", ".", "append", "(", "m", ".", "copy", "(", ")", ")", "elif", "isinstance", "(", "m", ",", "str", ")", ":", "mlist1", "+=", "parseinput", ".", "parseinput", "(", "m", ")", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"'scimask1' must be a list of str or \"", "\"numpy.ndarray values.\"", ")", "else", ":", "raise", "TypeError", "(", "\"'scimask1' must be either a str, or a \"", "\"numpy.ndarray, or a list of the two type of \"", "\"values.\"", ")", "if", "isinstance", "(", "scimask2", ",", "str", ")", ":", "mlist2", "=", "parseinput", ".", "parseinput", "(", "scimask2", ")", "[", "0", "]", "elif", "isinstance", "(", "scimask2", ",", "np", ".", "ndarray", ")", ":", "mlist2", "=", "[", "scimask2", ".", "copy", "(", ")", "]", "elif", "scimask2", "is", "None", ":", "mlist2", "=", "[", "]", "elif", "isinstance", "(", "scimask2", ",", "list", ")", ":", "mlist2", "=", "[", "]", "for", "m", "in", "scimask2", ":", "if", "isinstance", "(", "m", ",", "np", ".", "ndarray", ")", ":", "mlist2", ".", "append", "(", "m", ".", "copy", "(", ")", ")", "elif", "isinstance", "(", "m", ",", "str", ")", ":", "mlist2", "+=", "parseinput", ".", "parseinput", "(", "m", ")", "[", "0", "]", "else", ":", "raise", "TypeError", "(", "\"'scimask2' must be a list of str or \"", "\"numpy.ndarray values.\"", ")", "else", ":", "raise", "TypeError", "(", "\"'scimask2' must be either a str, or a \"", "\"numpy.ndarray, or a list of the two type of \"", "\"values.\"", ")", "n_input", "=", "len", "(", "flist", ")", "n_mask1", "=", "len", "(", "mlist1", ")", "n_mask2", "=", "len", "(", "mlist2", ")", "if", "n_input", "==", "0", ":", "raise", "ValueError", "(", "'No input file(s) provided or the file(s) do not exist'", ")", "if", "n_mask1", "==", "0", ":", "mlist1", "=", "[", "None", "]", "*", "n_input", "elif", "n_mask1", "!=", "n_input", ":", "raise", "ValueError", "(", "'Insufficient masks for [SCI,1]'", ")", "if", "n_mask2", "==", "0", ":", "mlist2", "=", "[", "None", "]", "*", "n_input", "elif", "n_mask2", "!=", "n_input", ":", "raise", "ValueError", "(", "'Insufficient masks for [SCI,2]'", ")", "if", "n_input", ">", "1", ":", "for", "img", ",", "mf1", ",", "mf2", "in", "zip", "(", "flist", ",", "mlist1", ",", "mlist2", ")", ":", "destripe_plus", "(", "inputfile", "=", "img", ",", "suffix", "=", "suffix", ",", "stat", "=", "stat", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ",", "binwidth", "=", "binwidth", ",", "maxiter", "=", "maxiter", ",", "sigrej", "=", "sigrej", ",", "scimask1", "=", "scimask1", ",", "scimask2", "=", "scimask2", ",", "dqbits", "=", "dqbits", ",", "cte_correct", "=", "cte_correct", ",", "clobber", "=", "clobber", ",", "verbose", "=", "verbose", ")", "return", "inputfile", "=", "flist", "[", "0", "]", "scimask1", "=", "mlist1", "[", "0", "]", "scimask2", "=", "mlist2", "[", "0", "]", "# verify that the RAW image exists in cwd", "cwddir", "=", "os", ".", "getcwd", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "cwddir", ",", "inputfile", ")", ")", ":", "raise", "IOError", "(", "\"{0} does not exist.\"", ".", "format", "(", "inputfile", ")", ")", "# get image's primary header:", "header", "=", "fits", ".", "getheader", "(", "inputfile", ")", "# verify masks defined (or not) simultaneously:", "if", "(", "header", "[", "'CCDAMP'", "]", "==", "'ABCD'", "and", "(", "(", "scimask1", "is", "not", "None", "and", "scimask2", "is", "None", ")", "or", "(", "scimask1", "is", "None", "and", "scimask2", "is", "not", "None", ")", ")", ")", ":", "raise", "ValueError", "(", "\"Both 'scimask1' and 'scimask2' must be specified \"", "\"or not specified together.\"", ")", "calacs_str", "=", "subprocess", ".", "check_output", "(", "[", "'calacs.e'", ",", "'--version'", "]", ")", ".", "split", "(", ")", "[", "0", "]", "calacs_ver", "=", "[", "int", "(", "x", ")", "for", "x", "in", "calacs_str", ".", "decode", "(", ")", ".", "split", "(", "'.'", ")", "]", "if", "calacs_ver", "<", "[", "8", ",", "3", ",", "1", "]", ":", "raise", "ValueError", "(", "'CALACS {0} is incomptible. '", "'Must be 8.3.1 or later.'", ".", "format", "(", "calacs_str", ")", ")", "# check date for post-SM4 and if supported subarray or full frame", "is_subarray", "=", "False", "ctecorr", "=", "header", "[", "'PCTECORR'", "]", "aperture", "=", "header", "[", "'APERTURE'", "]", "detector", "=", "header", "[", "'DETECTOR'", "]", "date_obs", "=", "Time", "(", "header", "[", "'DATE-OBS'", "]", ")", "# intermediate filenames", "blvtmp_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'blv_tmp'", ")", "blctmp_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'blc_tmp'", ")", "# output filenames", "tra_name", "=", "inputfile", ".", "replace", "(", "'_raw.fits'", ",", "'.tra'", ")", "flt_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'flt'", ")", "flc_name", "=", "inputfile", ".", "replace", "(", "'raw'", ",", "'flc'", ")", "if", "detector", "!=", "'WFC'", ":", "raise", "ValueError", "(", "\"{0} is not a WFC image, please check the 'DETECTOR'\"", "\" keyword.\"", ".", "format", "(", "inputfile", ")", ")", "if", "date_obs", "<", "SM4_DATE", ":", "raise", "ValueError", "(", "\"{0} is a pre-SM4 image.\"", ".", "format", "(", "inputfile", ")", ")", "if", "header", "[", "'SUBARRAY'", "]", "and", "cte_correct", ":", "if", "aperture", "in", "SUBARRAY_LIST", ":", "is_subarray", "=", "True", "else", ":", "LOG", ".", "warning", "(", "'Using non-supported subarray, '", "'turning CTE correction off'", ")", "cte_correct", "=", "False", "# delete files from previous CALACS runs", "if", "clobber", ":", "for", "tmpfilename", "in", "[", "blvtmp_name", ",", "blctmp_name", ",", "flt_name", ",", "flc_name", ",", "tra_name", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "tmpfilename", ")", ":", "os", ".", "remove", "(", "tmpfilename", ")", "# run ACSCCD on RAW", "acsccd", ".", "acsccd", "(", "inputfile", ")", "# modify user mask with DQ masks if requested", "dqbits", "=", "interpret_bit_flags", "(", "dqbits", ")", "if", "dqbits", "is", "not", "None", ":", "# save 'tra' file in memory to trick the log file", "# not to save first acs2d log as this is done only", "# for the purpose of obtaining DQ masks.", "# WISH: it would have been nice is there was an easy way of obtaining", "# just the DQ masks as if data were calibrated but without", "# having to recalibrate them with acs2d.", "if", "os", ".", "path", ".", "isfile", "(", "tra_name", ")", ":", "with", "open", "(", "tra_name", ")", "as", "fh", ":", "tra_lines", "=", "fh", ".", "readlines", "(", ")", "else", ":", "tra_lines", "=", "None", "# apply flats, etc.", "acs2d", ".", "acs2d", "(", "blvtmp_name", ",", "verbose", "=", "False", ",", "quiet", "=", "True", ")", "# extract DQ arrays from the FLT image:", "dq1", ",", "dq2", "=", "_read_DQ_arrays", "(", "flt_name", ")", "mask1", "=", "_get_mask", "(", "scimask1", ",", "1", ")", "scimask1", "=", "acs_destripe", ".", "_mergeUserMaskAndDQ", "(", "dq1", ",", "mask1", ",", "dqbits", ")", "mask2", "=", "_get_mask", "(", "scimask2", ",", "2", ")", "if", "dq2", "is", "not", "None", ":", "scimask2", "=", "acs_destripe", ".", "_mergeUserMaskAndDQ", "(", "dq2", ",", "mask2", ",", "dqbits", ")", "elif", "mask2", "is", "None", ":", "scimask2", "=", "None", "# reconstruct trailer file:", "if", "tra_lines", "is", "not", "None", ":", "with", "open", "(", "tra_name", ",", "mode", "=", "'w'", ")", "as", "fh", ":", "fh", ".", "writelines", "(", "tra_lines", ")", "# delete temporary FLT image:", "if", "os", ".", "path", ".", "isfile", "(", "flt_name", ")", ":", "os", ".", "remove", "(", "flt_name", ")", "# execute destriping (post-SM4 data only)", "acs_destripe", ".", "clean", "(", "blvtmp_name", ",", "suffix", ",", "stat", "=", "stat", ",", "maxiter", "=", "maxiter", ",", "sigrej", "=", "sigrej", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ",", "binwidth", "=", "binwidth", ",", "mask1", "=", "scimask1", ",", "mask2", "=", "scimask2", ",", "dqbits", "=", "dqbits", ",", "rpt_clean", "=", "rpt_clean", ",", "atol", "=", "atol", ",", "clobber", "=", "clobber", ",", "verbose", "=", "verbose", ")", "blvtmpsfx", "=", "'blv_tmp_{0}'", ".", "format", "(", "suffix", ")", "os", ".", "rename", "(", "inputfile", ".", "replace", "(", "'raw'", ",", "blvtmpsfx", ")", ",", "blvtmp_name", ")", "# update subarray header", "if", "is_subarray", "and", "cte_correct", ":", "fits", ".", "setval", "(", "blvtmp_name", ",", "'PCTECORR'", ",", "value", "=", "'PERFORM'", ")", "ctecorr", "=", "'PERFORM'", "# perform CTE correction on destriped image", "if", "cte_correct", ":", "if", "ctecorr", "==", "'PERFORM'", ":", "acscte", ".", "acscte", "(", "blvtmp_name", ")", "else", ":", "LOG", ".", "warning", "(", "\"PCTECORR={0}, cannot run CTE correction\"", ".", "format", "(", "ctecorr", ")", ")", "cte_correct", "=", "False", "# run ACS2D to get FLT and FLC images", "acs2d", ".", "acs2d", "(", "blvtmp_name", ")", "if", "cte_correct", ":", "acs2d", ".", "acs2d", "(", "blctmp_name", ")", "# delete intermediate files", "os", ".", "remove", "(", "blvtmp_name", ")", "if", "cte_correct", "and", "os", ".", "path", ".", "isfile", "(", "blctmp_name", ")", ":", "os", ".", "remove", "(", "blctmp_name", ")", "info_str", "=", "'Done.\\nFLT: {0}\\n'", ".", "format", "(", "flt_name", ")", "if", "cte_correct", ":", "info_str", "+=", "'FLC: {0}\\n'", ".", "format", "(", "flc_name", ")", "LOG", ".", "info", "(", "info_str", ")" ]
38.492268
0.000261
[ "def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15,\n", " sigrej=2.0, lower=None, upper=None, binwidth=0.3,\n", " scimask1=None, scimask2=None,\n", " dqbits=None, rpt_clean=0, atol=0.01,\n", " cte_correct=True, clobber=False, verbose=True):\n", " r\"\"\"Calibrate post-SM4 ACS/WFC exposure(s) and use\n", " standalone :ref:`acsdestripe`.\n", "\n", " This takes a RAW image and generates a FLT file containing\n", " its calibrated and destriped counterpart.\n", " If CTE correction is performed, FLC will also be present.\n", "\n", " Parameters\n", " ----------\n", " inputfile : str or list of str\n", " Input filenames in one of these formats:\n", "\n", " * a Python list of filenames\n", " * a partial filename with wildcards ('\\*raw.fits')\n", " * filename of an ASN table ('j12345670_asn.fits')\n", " * an at-file (``@input``)\n", "\n", " suffix : str\n", " The string to use to add to each input file name to\n", " indicate an output product of ``acs_destripe``.\n", " This only affects the intermediate output file that will\n", " be automatically renamed to ``*blv_tmp.fits`` during the processing.\n", "\n", " stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')\n", " Specifies the statistics to be used for computation of the\n", " background in image rows:\n", "\n", " * 'pmode1' - SEXTRACTOR-like mode estimate based on a\n", " modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:\n", " ``2.5*median-1.5*mean``;\n", " * 'pmode2' - mode estimate based on\n", " `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:\n", " ``3*median-2*mean``;\n", " * 'mean' - the mean of the distribution of the \"good\" pixels (after\n", " clipping, masking, etc.);\n", " * 'mode' - the mode of the distribution of the \"good\" pixels;\n", " * 'median' - the median of the distribution of the \"good\" pixels;\n", " * 'midpt' - estimate of the median of the distribution of the \"good\"\n", " pixels based on an algorithm similar to IRAF's `imagestats` task\n", " (``CDF(midpt)=1/2``).\n", "\n", " .. note::\n", " The midpoint and mode are computed in two passes through the\n", " image. In the first pass the standard deviation of the pixels\n", " is calculated and used with the *binwidth* parameter to compute\n", " the resolution of the data histogram. The midpoint is estimated\n", " by integrating the histogram and computing by interpolation\n", " the data value at which exactly half the pixels are below that\n", " data value and half are above it. The mode is computed by\n", " locating the maximum of the data histogram and fitting the peak\n", " by parabolic interpolation.\n", "\n", " maxiter : int\n", " This parameter controls the maximum number of iterations\n", " to perform when computing the statistics used to compute the\n", " row-by-row corrections.\n", "\n", " sigrej : float\n", " This parameters sets the sigma level for the rejection applied\n", " during each iteration of statistics computations for the\n", " row-by-row corrections.\n", "\n", " lower : float, None (Default = None)\n", " Lower limit of usable pixel values for computing the background.\n", " This value should be specified in the units of the input image(s).\n", "\n", " upper : float, None (Default = None)\n", " Upper limit of usable pixel values for computing the background.\n", " This value should be specified in the units of the input image(s).\n", "\n", " binwidth : float (Default = 0.1)\n", " Histogram's bin width, in sigma units, used to sample the\n", " distribution of pixel brightness values in order to compute the\n", " background statistics. This parameter is aplicable *only* to *stat*\n", " parameter values of `'mode'` or `'midpt'`.\n", "\n", " clobber : bool\n", " Specify whether or not to 'clobber' (delete then replace)\n", " previously generated products with the same names.\n", "\n", " scimask1 : str or list of str\n", " Mask images for *calibrated* ``SCI,1``, one for each input file.\n", " Pixels with zero values will be masked out, in addition to clipping.\n", "\n", " scimask2 : str or list of str\n", " Mask images for *calibrated* ``SCI,2``, one for each input file.\n", " Pixels with zero values will be masked out, in addition to clipping.\n", " This is not used for subarrays.\n", "\n", " dqbits : int, str, None (Default = None)\n", " Integer sum of all the DQ bit values from the input image's DQ array\n", " that should be considered \"good\" when building masks for de-striping\n", " computations. For example, if pixels in the DQ array can be\n", " combinations of 1, 2, 4, and 8 flags and one wants to consider\n", " DQ \"defects\" having flags 2 and 4 as being acceptable for de-striping\n", " computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel\n", " having values 2,4, or 6 will be considered a good pixel, while a DQ\n", " pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged\n", " as a \"bad\" pixel.\n", "\n", " Alternatively, one can enter a comma- or '+'-separated list of\n", " integer bit flags that should be added to obtain the final\n", " \"good\" bits. For example, both ``4,8`` and ``4+8`` are equivalent to\n", " setting `dqbits` to 12.\n", "\n", " | Set `dqbits` to 0 to make *all* non-zero pixels in the DQ\n", " mask to be considered \"bad\" pixels, and the corresponding image\n", " pixels not to be used for de-striping computations.\n", "\n", " | Default value (`None`) will turn off the use of image's DQ array\n", " for de-striping computations.\n", "\n", " | In order to reverse the meaning of the `dqbits`\n", " parameter from indicating values of the \"good\" DQ flags\n", " to indicating the \"bad\" DQ flags, prepend '~' to the string\n", " value. For example, in order not to use pixels with\n", " DQ flags 4 and 8 for sky computations and to consider\n", " as \"good\" all other pixels (regardless of their DQ flag),\n", " set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the\n", " same effect with an `int` input value (except for 0),\n", " enter -(4+8+1)=-9. Following this convention,\n", " a `dqbits` string value of ``'~0'`` would be equivalent to\n", " setting ``dqbits=None``.\n", "\n", " .. note::\n", " DQ masks (if used), *will be* combined with user masks specified\n", " in the `scimask1` and `scimask2` parameters (if any).\n", "\n", " rpt_clean : int\n", " An integer indicating how many *additional* times stripe cleaning\n", " should be performed on the input image. Default = 0.\n", "\n", " atol : float, None\n", " The threshold for maximum absolute value of bias stripe correction\n", " below which repeated cleanings can stop. When `atol` is `None`\n", " cleaning will be repeated `rpt_clean` number of times.\n", " Default = 0.01 [e].\n", "\n", " cte_correct : bool\n", " Perform CTE correction.\n", "\n", " verbose : bool\n", " Print informational messages. Default = True.\n", "\n", " Raises\n", " ------\n", " ImportError\n", " ``stsci.tools`` not found.\n", "\n", " IOError\n", " Input file does not exist.\n", "\n", " ValueError\n", " Invalid header values or CALACS version.\n", "\n", " \"\"\"\n", " # Optional package dependencies\n", " from stsci.tools import parseinput\n", " try:\n", " from stsci.tools.bitmask import interpret_bit_flags\n", " except ImportError:\n", " from stsci.tools.bitmask import (\n", " interpret_bits_value as interpret_bit_flags\n", " )\n", "\n", " # process input file(s) and if we have multiple input files - recursively\n", " # call acs_destripe_plus for each input image:\n", " flist = parseinput.parseinput(inputfile)[0]\n", "\n", " if isinstance(scimask1, str):\n", " mlist1 = parseinput.parseinput(scimask1)[0]\n", " elif isinstance(scimask1, np.ndarray):\n", " mlist1 = [scimask1.copy()]\n", " elif scimask1 is None:\n", " mlist1 = []\n", " elif isinstance(scimask1, list):\n", " mlist1 = []\n", " for m in scimask1:\n", " if isinstance(m, np.ndarray):\n", " mlist1.append(m.copy())\n", " elif isinstance(m, str):\n", " mlist1 += parseinput.parseinput(m)[0]\n", " else:\n", " raise TypeError(\"'scimask1' must be a list of str or \"\n", " \"numpy.ndarray values.\")\n", " else:\n", " raise TypeError(\"'scimask1' must be either a str, or a \"\n", " \"numpy.ndarray, or a list of the two type of \"\n", " \"values.\")\n", "\n", " if isinstance(scimask2, str):\n", " mlist2 = parseinput.parseinput(scimask2)[0]\n", " elif isinstance(scimask2, np.ndarray):\n", " mlist2 = [scimask2.copy()]\n", " elif scimask2 is None:\n", " mlist2 = []\n", " elif isinstance(scimask2, list):\n", " mlist2 = []\n", " for m in scimask2:\n", " if isinstance(m, np.ndarray):\n", " mlist2.append(m.copy())\n", " elif isinstance(m, str):\n", " mlist2 += parseinput.parseinput(m)[0]\n", " else:\n", " raise TypeError(\"'scimask2' must be a list of str or \"\n", " \"numpy.ndarray values.\")\n", " else:\n", " raise TypeError(\"'scimask2' must be either a str, or a \"\n", " \"numpy.ndarray, or a list of the two type of \"\n", " \"values.\")\n", "\n", " n_input = len(flist)\n", " n_mask1 = len(mlist1)\n", " n_mask2 = len(mlist2)\n", "\n", " if n_input == 0:\n", " raise ValueError(\n", " 'No input file(s) provided or the file(s) do not exist')\n", "\n", " if n_mask1 == 0:\n", " mlist1 = [None] * n_input\n", " elif n_mask1 != n_input:\n", " raise ValueError('Insufficient masks for [SCI,1]')\n", "\n", " if n_mask2 == 0:\n", " mlist2 = [None] * n_input\n", " elif n_mask2 != n_input:\n", " raise ValueError('Insufficient masks for [SCI,2]')\n", "\n", " if n_input > 1:\n", " for img, mf1, mf2 in zip(flist, mlist1, mlist2):\n", " destripe_plus(\n", " inputfile=img, suffix=suffix, stat=stat,\n", " lower=lower, upper=upper, binwidth=binwidth,\n", " maxiter=maxiter, sigrej=sigrej,\n", " scimask1=scimask1, scimask2=scimask2, dqbits=dqbits,\n", " cte_correct=cte_correct, clobber=clobber, verbose=verbose\n", " )\n", " return\n", "\n", " inputfile = flist[0]\n", " scimask1 = mlist1[0]\n", " scimask2 = mlist2[0]\n", "\n", " # verify that the RAW image exists in cwd\n", " cwddir = os.getcwd()\n", " if not os.path.exists(os.path.join(cwddir, inputfile)):\n", " raise IOError(\"{0} does not exist.\".format(inputfile))\n", "\n", " # get image's primary header:\n", " header = fits.getheader(inputfile)\n", "\n", " # verify masks defined (or not) simultaneously:\n", " if (header['CCDAMP'] == 'ABCD' and\n", " ((scimask1 is not None and scimask2 is None) or\n", " (scimask1 is None and scimask2 is not None))):\n", " raise ValueError(\"Both 'scimask1' and 'scimask2' must be specified \"\n", " \"or not specified together.\")\n", "\n", " calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0]\n", " calacs_ver = [int(x) for x in calacs_str.decode().split('.')]\n", " if calacs_ver < [8, 3, 1]:\n", " raise ValueError('CALACS {0} is incomptible. '\n", " 'Must be 8.3.1 or later.'.format(calacs_str))\n", "\n", " # check date for post-SM4 and if supported subarray or full frame\n", " is_subarray = False\n", " ctecorr = header['PCTECORR']\n", " aperture = header['APERTURE']\n", " detector = header['DETECTOR']\n", " date_obs = Time(header['DATE-OBS'])\n", "\n", " # intermediate filenames\n", " blvtmp_name = inputfile.replace('raw', 'blv_tmp')\n", " blctmp_name = inputfile.replace('raw', 'blc_tmp')\n", "\n", " # output filenames\n", " tra_name = inputfile.replace('_raw.fits', '.tra')\n", " flt_name = inputfile.replace('raw', 'flt')\n", " flc_name = inputfile.replace('raw', 'flc')\n", "\n", " if detector != 'WFC':\n", " raise ValueError(\"{0} is not a WFC image, please check the 'DETECTOR'\"\n", " \" keyword.\".format(inputfile))\n", "\n", " if date_obs < SM4_DATE:\n", " raise ValueError(\n", " \"{0} is a pre-SM4 image.\".format(inputfile))\n", "\n", " if header['SUBARRAY'] and cte_correct:\n", " if aperture in SUBARRAY_LIST:\n", " is_subarray = True\n", " else:\n", " LOG.warning('Using non-supported subarray, '\n", " 'turning CTE correction off')\n", " cte_correct = False\n", "\n", " # delete files from previous CALACS runs\n", " if clobber:\n", " for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name,\n", " tra_name]:\n", " if os.path.exists(tmpfilename):\n", " os.remove(tmpfilename)\n", "\n", " # run ACSCCD on RAW\n", " acsccd.acsccd(inputfile)\n", "\n", " # modify user mask with DQ masks if requested\n", " dqbits = interpret_bit_flags(dqbits)\n", " if dqbits is not None:\n", " # save 'tra' file in memory to trick the log file\n", " # not to save first acs2d log as this is done only\n", " # for the purpose of obtaining DQ masks.\n", " # WISH: it would have been nice is there was an easy way of obtaining\n", " # just the DQ masks as if data were calibrated but without\n", " # having to recalibrate them with acs2d.\n", " if os.path.isfile(tra_name):\n", " with open(tra_name) as fh:\n", " tra_lines = fh.readlines()\n", " else:\n", " tra_lines = None\n", "\n", " # apply flats, etc.\n", " acs2d.acs2d(blvtmp_name, verbose=False, quiet=True)\n", "\n", " # extract DQ arrays from the FLT image:\n", " dq1, dq2 = _read_DQ_arrays(flt_name)\n", "\n", " mask1 = _get_mask(scimask1, 1)\n", " scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits)\n", "\n", " mask2 = _get_mask(scimask2, 2)\n", " if dq2 is not None:\n", " scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits)\n", " elif mask2 is None:\n", " scimask2 = None\n", "\n", " # reconstruct trailer file:\n", " if tra_lines is not None:\n", " with open(tra_name, mode='w') as fh:\n", " fh.writelines(tra_lines)\n", "\n", " # delete temporary FLT image:\n", " if os.path.isfile(flt_name):\n", " os.remove(flt_name)\n", "\n", " # execute destriping (post-SM4 data only)\n", " acs_destripe.clean(\n", " blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej,\n", " lower=lower, upper=upper, binwidth=binwidth,\n", " mask1=scimask1, mask2=scimask2, dqbits=dqbits,\n", " rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose)\n", " blvtmpsfx = 'blv_tmp_{0}'.format(suffix)\n", " os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name)\n", "\n", " # update subarray header\n", " if is_subarray and cte_correct:\n", " fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM')\n", " ctecorr = 'PERFORM'\n", "\n", " # perform CTE correction on destriped image\n", " if cte_correct:\n", " if ctecorr == 'PERFORM':\n", " acscte.acscte(blvtmp_name)\n", " else:\n", " LOG.warning(\n", " \"PCTECORR={0}, cannot run CTE correction\".format(ctecorr))\n", " cte_correct = False\n", "\n", " # run ACS2D to get FLT and FLC images\n", " acs2d.acs2d(blvtmp_name)\n", " if cte_correct:\n", " acs2d.acs2d(blctmp_name)\n", "\n", " # delete intermediate files\n", " os.remove(blvtmp_name)\n", " if cte_correct and os.path.isfile(blctmp_name):\n", " os.remove(blctmp_name)\n", "\n", " info_str = 'Done.\\nFLT: {0}\\n'.format(flt_name)\n", " if cte_correct:\n", " info_str += 'FLC: {0}\\n'.format(flc_name)\n", " LOG.info(info_str)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0.009523809523809525, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
388
0.000197
def dummyListOfDicts(size=100): """ returns a list (of the given size) of dicts with fake data. some dictionary keys are missing for some of the items. """ titles="ahp,halfwidth,peak,expT,expI,sweep".split(",") ld=[] #list of dicts for i in range(size): d={} for t in titles: if int(np.random.random(1)*100)>5: #5% of values are missing d[t]=float(np.random.random(1)*100) #random number 0-100 if t=="sweep" and "sweep" in d.keys(): d[t]=int(d[t]) ld.append(d) return ld
[ "def", "dummyListOfDicts", "(", "size", "=", "100", ")", ":", "titles", "=", "\"ahp,halfwidth,peak,expT,expI,sweep\"", ".", "split", "(", "\",\"", ")", "ld", "=", "[", "]", "#list of dicts", "for", "i", "in", "range", "(", "size", ")", ":", "d", "=", "{", "}", "for", "t", "in", "titles", ":", "if", "int", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", ">", "5", ":", "#5% of values are missing", "d", "[", "t", "]", "=", "float", "(", "np", ".", "random", ".", "random", "(", "1", ")", "*", "100", ")", "#random number 0-100", "if", "t", "==", "\"sweep\"", "and", "\"sweep\"", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "t", "]", "=", "int", "(", "d", "[", "t", "]", ")", "ld", ".", "append", "(", "d", ")", "return", "ld" ]
35.4375
0.024055
[ "def dummyListOfDicts(size=100):\n", " \"\"\"\n", " returns a list (of the given size) of dicts with fake data.\n", " some dictionary keys are missing for some of the items.\n", " \"\"\"\n", " titles=\"ahp,halfwidth,peak,expT,expI,sweep\".split(\",\")\n", " ld=[] #list of dicts\n", " for i in range(size):\n", " d={}\n", " for t in titles:\n", " if int(np.random.random(1)*100)>5: #5% of values are missing\n", " d[t]=float(np.random.random(1)*100) #random number 0-100\n", " if t==\"sweep\" and \"sweep\" in d.keys():\n", " d[t]=int(d[t])\n", " ld.append(d)\n", " return ld" ]
[ 0, 0, 0, 0, 0, 0.01694915254237288, 0.12, 0, 0.07692307692307693, 0, 0.0410958904109589, 0.0410958904109589, 0.0196078431372549, 0.03225806451612903, 0, 0.07692307692307693 ]
16
0.026553
def dictlist_to_tsv(dictlist: List[Dict[str, Any]]) -> str: """ From a consistent list of dictionaries mapping fieldnames to values, make a TSV file. """ if not dictlist: return "" fieldnames = dictlist[0].keys() tsv = "\t".join([tsv_escape(f) for f in fieldnames]) + "\n" for d in dictlist: tsv += "\t".join([tsv_escape(v) for v in d.values()]) + "\n" return tsv
[ "def", "dictlist_to_tsv", "(", "dictlist", ":", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ")", "->", "str", ":", "if", "not", "dictlist", ":", "return", "\"\"", "fieldnames", "=", "dictlist", "[", "0", "]", ".", "keys", "(", ")", "tsv", "=", "\"\\t\"", ".", "join", "(", "[", "tsv_escape", "(", "f", ")", "for", "f", "in", "fieldnames", "]", ")", "+", "\"\\n\"", "for", "d", "in", "dictlist", ":", "tsv", "+=", "\"\\t\"", ".", "join", "(", "[", "tsv_escape", "(", "v", ")", "for", "v", "in", "d", ".", "values", "(", ")", "]", ")", "+", "\"\\n\"", "return", "tsv" ]
33.666667
0.00241
[ "def dictlist_to_tsv(dictlist: List[Dict[str, Any]]) -> str:\n", " \"\"\"\n", " From a consistent list of dictionaries mapping fieldnames to values,\n", " make a TSV file.\n", " \"\"\"\n", " if not dictlist:\n", " return \"\"\n", " fieldnames = dictlist[0].keys()\n", " tsv = \"\\t\".join([tsv_escape(f) for f in fieldnames]) + \"\\n\"\n", " for d in dictlist:\n", " tsv += \"\\t\".join([tsv_escape(v) for v in d.values()]) + \"\\n\"\n", " return tsv" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
12
0.005952
def get_stories(label_type): """ Returns a list of the stories in the Na corpus. """ prefixes = get_story_prefixes(label_type) texts = list(set([prefix.split(".")[0].split("/")[1] for prefix in prefixes])) return texts
[ "def", "get_stories", "(", "label_type", ")", ":", "prefixes", "=", "get_story_prefixes", "(", "label_type", ")", "texts", "=", "list", "(", "set", "(", "[", "prefix", ".", "split", "(", "\".\"", ")", "[", "0", "]", ".", "split", "(", "\"/\"", ")", "[", "1", "]", "for", "prefix", "in", "prefixes", "]", ")", ")", "return", "texts" ]
38.333333
0.008511
[ "def get_stories(label_type):\n", " \"\"\" Returns a list of the stories in the Na corpus. \"\"\"\n", "\n", " prefixes = get_story_prefixes(label_type)\n", " texts = list(set([prefix.split(\".\")[0].split(\"/\")[1] for prefix in prefixes]))\n", " return texts" ]
[ 0, 0, 0, 0, 0.012048192771084338, 0.0625 ]
6
0.012425
def stringer(x): """ Takes an object and makes it stringy >>> print(stringer({'a': 1, 2: 3, 'b': [1, 'c', 2.5]})) {'b': ['1', 'c', '2.5'], 'a': '1', '2': '3'} """ if isinstance(x, string_types): return x if isinstance(x, (list, tuple)): return [stringer(y) for y in x] if isinstance(x, dict): return dict((stringer(a), stringer(b)) for a, b in x.items()) return text_type(x)
[ "def", "stringer", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "string_types", ")", ":", "return", "x", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "stringer", "(", "y", ")", "for", "y", "in", "x", "]", "if", "isinstance", "(", "x", ",", "dict", ")", ":", "return", "dict", "(", "(", "stringer", "(", "a", ")", ",", "stringer", "(", "b", ")", ")", "for", "a", ",", "b", "in", "x", ".", "items", "(", ")", ")", "return", "text_type", "(", "x", ")" ]
32.461538
0.002304
[ "def stringer(x):\n", " \"\"\"\n", " Takes an object and makes it stringy\n", " >>> print(stringer({'a': 1, 2: 3, 'b': [1, 'c', 2.5]}))\n", " {'b': ['1', 'c', '2.5'], 'a': '1', '2': '3'}\n", " \"\"\"\n", " if isinstance(x, string_types):\n", " return x\n", " if isinstance(x, (list, tuple)):\n", " return [stringer(y) for y in x]\n", " if isinstance(x, dict):\n", " return dict((stringer(a), stringer(b)) for a, b in x.items())\n", " return text_type(x)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216 ]
13
0.003344
def id_nameDAVID(df,GTF=None,name_id=None): """ Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output :param df: a dataframe output from DAVIDenrich :param GTF: a GTF dataframe from readGTF() :param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input :returns: a pandas dataframe with a gene name column added to it. """ if name_id is None: gene_name=retrieve_GTF_field('gene_name',GTF) gene_id=retrieve_GTF_field('gene_id', GTF) GTF=pd.concat([gene_name,gene_id],axis=1) else: GTF=name_id.copy() df['Gene_names']="genes" terms=df['termName'].tolist() enrichN=pd.DataFrame() for term in terms: tmp=df[df['termName']==term] tmp=tmp.reset_index(drop=True) ids=tmp.xs(0)['geneIds'] ids=pd.DataFrame(data=ids.split(", ")) ids.columns=['geneIds'] ids['geneIds']=ids['geneIds'].map(str.lower) GTF['gene_id']=GTF['gene_id'].astype(str) GTF['gene_id']=GTF['gene_id'].map(str.lower) ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id') names=ids['gene_name'].tolist() names= ', '.join(names) tmp["Gene_names"]=names #tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names) enrichN=pd.concat([enrichN, tmp]) enrichN=enrichN.reset_index(drop=True) gene_names=enrichN[['Gene_names']] gpos=enrichN.columns.get_loc("geneIds") enrichN=enrichN.drop(['Gene_names'],axis=1) cols=enrichN.columns.tolist() enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1) return enrichN
[ "def", "id_nameDAVID", "(", "df", ",", "GTF", "=", "None", ",", "name_id", "=", "None", ")", ":", "if", "name_id", "is", "None", ":", "gene_name", "=", "retrieve_GTF_field", "(", "'gene_name'", ",", "GTF", ")", "gene_id", "=", "retrieve_GTF_field", "(", "'gene_id'", ",", "GTF", ")", "GTF", "=", "pd", ".", "concat", "(", "[", "gene_name", ",", "gene_id", "]", ",", "axis", "=", "1", ")", "else", ":", "GTF", "=", "name_id", ".", "copy", "(", ")", "df", "[", "'Gene_names'", "]", "=", "\"genes\"", "terms", "=", "df", "[", "'termName'", "]", ".", "tolist", "(", ")", "enrichN", "=", "pd", ".", "DataFrame", "(", ")", "for", "term", "in", "terms", ":", "tmp", "=", "df", "[", "df", "[", "'termName'", "]", "==", "term", "]", "tmp", "=", "tmp", ".", "reset_index", "(", "drop", "=", "True", ")", "ids", "=", "tmp", ".", "xs", "(", "0", ")", "[", "'geneIds'", "]", "ids", "=", "pd", ".", "DataFrame", "(", "data", "=", "ids", ".", "split", "(", "\", \"", ")", ")", "ids", ".", "columns", "=", "[", "'geneIds'", "]", "ids", "[", "'geneIds'", "]", "=", "ids", "[", "'geneIds'", "]", ".", "map", "(", "str", ".", "lower", ")", "GTF", "[", "'gene_id'", "]", "=", "GTF", "[", "'gene_id'", "]", ".", "astype", "(", "str", ")", "GTF", "[", "'gene_id'", "]", "=", "GTF", "[", "'gene_id'", "]", ".", "map", "(", "str", ".", "lower", ")", "ids", "=", "pd", ".", "merge", "(", "ids", ",", "GTF", ",", "how", "=", "'left'", ",", "left_on", "=", "'geneIds'", ",", "right_on", "=", "'gene_id'", ")", "names", "=", "ids", "[", "'gene_name'", "]", ".", "tolist", "(", ")", "names", "=", "', '", ".", "join", "(", "names", ")", "tmp", "[", "\"Gene_names\"", "]", "=", "names", "#tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)", "enrichN", "=", "pd", ".", "concat", "(", "[", "enrichN", ",", "tmp", "]", ")", "enrichN", "=", "enrichN", ".", "reset_index", "(", "drop", "=", "True", ")", "gene_names", "=", "enrichN", "[", "[", "'Gene_names'", "]", "]", "gpos", "=", "enrichN", ".", "columns", ".", "get_loc", "(", "\"geneIds\"", ")", "enrichN", "=", "enrichN", ".", "drop", "(", "[", "'Gene_names'", "]", ",", "axis", "=", "1", ")", "cols", "=", "enrichN", ".", "columns", ".", "tolist", "(", ")", "enrichN", "=", "pd", ".", "concat", "(", "[", "enrichN", "[", "cols", "[", ":", "gpos", "+", "1", "]", "]", ",", "gene_names", ",", "enrichN", "[", "cols", "[", "gpos", "+", "1", ":", "]", "]", "]", ",", "axis", "=", "1", ")", "return", "enrichN" ]
39.860465
0.023918
[ "def id_nameDAVID(df,GTF=None,name_id=None):\n", " \"\"\"\n", " Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output\n", "\n", " :param df: a dataframe output from DAVIDenrich\n", " :param GTF: a GTF dataframe from readGTF()\n", " :param name_id: instead of a gtf dataframe a dataframe with the columns 'gene_name' and 'gene_id' can be given as input\n", "\n", " :returns: a pandas dataframe with a gene name column added to it.\n", " \"\"\"\n", " if name_id is None:\n", " gene_name=retrieve_GTF_field('gene_name',GTF)\n", " gene_id=retrieve_GTF_field('gene_id', GTF)\n", " GTF=pd.concat([gene_name,gene_id],axis=1)\n", " else:\n", " GTF=name_id.copy()\n", " df['Gene_names']=\"genes\"\n", " terms=df['termName'].tolist()\n", " enrichN=pd.DataFrame()\n", " for term in terms:\n", " tmp=df[df['termName']==term]\n", " tmp=tmp.reset_index(drop=True)\n", " ids=tmp.xs(0)['geneIds']\n", " ids=pd.DataFrame(data=ids.split(\", \"))\n", " ids.columns=['geneIds']\n", " ids['geneIds']=ids['geneIds'].map(str.lower)\n", " GTF['gene_id']=GTF['gene_id'].astype(str)\n", " GTF['gene_id']=GTF['gene_id'].map(str.lower)\n", " ids=pd.merge(ids, GTF, how='left', left_on='geneIds', right_on='gene_id')\n", " names=ids['gene_name'].tolist()\n", " names= ', '.join(names)\n", " tmp[\"Gene_names\"]=names\n", " #tmp=tmp.replace(to_replace=tmp.xs(0)['Gene_names'], value=names)\n", " enrichN=pd.concat([enrichN, tmp])\n", " enrichN=enrichN.reset_index(drop=True)\n", "\n", " gene_names=enrichN[['Gene_names']]\n", " gpos=enrichN.columns.get_loc(\"geneIds\")\n", " enrichN=enrichN.drop(['Gene_names'],axis=1)\n", " cols=enrichN.columns.tolist()\n", " enrichN=pd.concat([enrichN[cols[:gpos+1]],gene_names,enrichN[cols[gpos+1:]]],axis=1)\n", "\n", " return enrichN" ]
[ 0.045454545454545456, 0, 0.00909090909090909, 0, 0, 0, 0.008064516129032258, 0, 0, 0, 0, 0.037037037037037035, 0.0196078431372549, 0.06, 0, 0.037037037037037035, 0.034482758620689655, 0.029411764705882353, 0.037037037037037035, 0, 0.05405405405405406, 0.02564102564102564, 0.030303030303030304, 0.02127659574468085, 0.03125, 0.018867924528301886, 0.02, 0.018867924528301886, 0.024390243902439025, 0.025, 0.03125, 0.03125, 0.013513513513513514, 0.023809523809523808, 0.023255813953488372, 0, 0.02564102564102564, 0.022727272727272728, 0.041666666666666664, 0.029411764705882353, 0.056179775280898875, 0, 0.05555555555555555 ]
43
0.021887
def execution_time(self, value): """ Force the execution_time to always be a datetime :param value: :return: """ if value: self._execution_time = parse(value) if isinstance(value, type_check) else value
[ "def", "execution_time", "(", "self", ",", "value", ")", ":", "if", "value", ":", "self", ".", "_execution_time", "=", "parse", "(", "value", ")", "if", "isinstance", "(", "value", ",", "type_check", ")", "else", "value" ]
31.875
0.01145
[ "def execution_time(self, value):\n", " \"\"\"\n", " Force the execution_time to always be a datetime\n", " :param value:\n", " :return:\n", " \"\"\"\n", " if value:\n", " self._execution_time = parse(value) if isinstance(value, type_check) else value" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0.02197802197802198 ]
8
0.013164
def load_precision(filename): """ Load a CLASS precision file into a dictionary. Parameters ---------- filename : str the name of an existing file to load, or one in the files included as part of the CLASS source Returns ------- dict : the precision parameters loaded from file """ # also look in data dir path = _find_file(filename) r = dict() with open(path, 'r') as f: exec(f.read(), {}, r) return r
[ "def", "load_precision", "(", "filename", ")", ":", "# also look in data dir", "path", "=", "_find_file", "(", "filename", ")", "r", "=", "dict", "(", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "exec", "(", "f", ".", "read", "(", ")", ",", "{", "}", ",", "r", ")", "return", "r" ]
20.521739
0.002024
[ "def load_precision(filename):\n", " \"\"\"\n", " Load a CLASS precision file into a dictionary.\n", "\n", " Parameters\n", " ----------\n", " filename : str\n", " the name of an existing file to load, or one in the files included\n", " as part of the CLASS source\n", "\n", " Returns\n", " -------\n", " dict :\n", " the precision parameters loaded from file\n", " \"\"\"\n", " # also look in data dir\n", " path = _find_file(filename)\n", "\n", " r = dict()\n", " with open(path, 'r') as f:\n", " exec(f.read(), {}, r)\n", "\n", " return r" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333 ]
23
0.003623
def sed(self, name, **kwargs): """Generate a spectral energy distribution (SED) for a source. This function will fit the normalization of the source in each energy bin. By default the SED will be generated with the analysis energy bins but a custom binning can be defined with the ``loge_bins`` parameter. Parameters ---------- name : str Source name. prefix : str Optional string that will be prepended to all output files (FITS and rendered images). loge_bins : `~numpy.ndarray` Sequence of energies in log10(E/MeV) defining the edges of the energy bins. If this argument is None then the analysis energy bins will be used. The energies in this sequence must align with the bin edges of the underyling analysis instance. {options} optimizer : dict Dictionary that overrides the default optimizer settings. Returns ------- sed : dict Dictionary containing output of the SED analysis. """ timer = Timer.create(start=True) name = self.roi.get_source_by_name(name).name # Create schema for method configuration schema = ConfigSchema(self.defaults['sed'], optimizer=self.defaults['optimizer']) schema.add_option('prefix', '') schema.add_option('outfile', None, '', str) schema.add_option('loge_bins', None, '', list) config = utils.create_dict(self.config['sed'], optimizer=self.config['optimizer']) config = schema.create_config(config, **kwargs) self.logger.info('Computing SED for %s' % name) o = self._make_sed(name, **config) self.logger.info('Finished SED') outfile = config.get('outfile', None) if outfile is None: outfile = utils.format_filename(self.workdir, 'sed', prefix=[config['prefix'], name.lower().replace(' ', '_')]) else: outfile = os.path.join(self.workdir, os.path.splitext(outfile)[0]) o['file'] = None if config['write_fits']: o['file'] = os.path.basename(outfile) + '.fits' self._make_sed_fits(o, outfile + '.fits', **config) if config['write_npy']: np.save(outfile + '.npy', o) if config['make_plots']: self._plotter.make_sed_plots(o, **config) self.logger.info('Execution time: %.2f s', timer.elapsed_time) return o
[ "def", "sed", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "timer", "=", "Timer", ".", "create", "(", "start", "=", "True", ")", "name", "=", "self", ".", "roi", ".", "get_source_by_name", "(", "name", ")", ".", "name", "# Create schema for method configuration", "schema", "=", "ConfigSchema", "(", "self", ".", "defaults", "[", "'sed'", "]", ",", "optimizer", "=", "self", ".", "defaults", "[", "'optimizer'", "]", ")", "schema", ".", "add_option", "(", "'prefix'", ",", "''", ")", "schema", ".", "add_option", "(", "'outfile'", ",", "None", ",", "''", ",", "str", ")", "schema", ".", "add_option", "(", "'loge_bins'", ",", "None", ",", "''", ",", "list", ")", "config", "=", "utils", ".", "create_dict", "(", "self", ".", "config", "[", "'sed'", "]", ",", "optimizer", "=", "self", ".", "config", "[", "'optimizer'", "]", ")", "config", "=", "schema", ".", "create_config", "(", "config", ",", "*", "*", "kwargs", ")", "self", ".", "logger", ".", "info", "(", "'Computing SED for %s'", "%", "name", ")", "o", "=", "self", ".", "_make_sed", "(", "name", ",", "*", "*", "config", ")", "self", ".", "logger", ".", "info", "(", "'Finished SED'", ")", "outfile", "=", "config", ".", "get", "(", "'outfile'", ",", "None", ")", "if", "outfile", "is", "None", ":", "outfile", "=", "utils", ".", "format_filename", "(", "self", ".", "workdir", ",", "'sed'", ",", "prefix", "=", "[", "config", "[", "'prefix'", "]", ",", "name", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "]", ")", "else", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "os", ".", "path", ".", "splitext", "(", "outfile", ")", "[", "0", "]", ")", "o", "[", "'file'", "]", "=", "None", "if", "config", "[", "'write_fits'", "]", ":", "o", "[", "'file'", "]", "=", "os", ".", "path", ".", "basename", "(", "outfile", ")", "+", "'.fits'", "self", ".", "_make_sed_fits", "(", "o", ",", "outfile", "+", "'.fits'", ",", "*", "*", "config", ")", "if", "config", "[", "'write_npy'", "]", ":", "np", ".", "save", "(", "outfile", "+", "'.npy'", ",", "o", ")", "if", "config", "[", "'make_plots'", "]", ":", "self", ".", "_plotter", ".", "make_sed_plots", "(", "o", ",", "*", "*", "config", ")", "self", ".", "logger", ".", "info", "(", "'Execution time: %.2f s'", ",", "timer", ".", "elapsed_time", ")", "return", "o" ]
35.56
0.001094
[ "def sed(self, name, **kwargs):\n", " \"\"\"Generate a spectral energy distribution (SED) for a source. This\n", " function will fit the normalization of the source in each\n", " energy bin. By default the SED will be generated with the\n", " analysis energy bins but a custom binning can be defined with\n", " the ``loge_bins`` parameter.\n", "\n", " Parameters\n", " ----------\n", " name : str\n", " Source name.\n", "\n", " prefix : str\n", " Optional string that will be prepended to all output files\n", " (FITS and rendered images).\n", "\n", " loge_bins : `~numpy.ndarray`\n", " Sequence of energies in log10(E/MeV) defining the edges of\n", " the energy bins. If this argument is None then the\n", " analysis energy bins will be used. The energies in this\n", " sequence must align with the bin edges of the underyling\n", " analysis instance.\n", "\n", " {options}\n", "\n", " optimizer : dict\n", " Dictionary that overrides the default optimizer settings.\n", "\n", " Returns\n", " -------\n", " sed : dict\n", " Dictionary containing output of the SED analysis.\n", "\n", " \"\"\"\n", " timer = Timer.create(start=True)\n", " name = self.roi.get_source_by_name(name).name\n", "\n", " # Create schema for method configuration\n", " schema = ConfigSchema(self.defaults['sed'],\n", " optimizer=self.defaults['optimizer'])\n", " schema.add_option('prefix', '')\n", " schema.add_option('outfile', None, '', str)\n", " schema.add_option('loge_bins', None, '', list)\n", " config = utils.create_dict(self.config['sed'],\n", " optimizer=self.config['optimizer'])\n", " config = schema.create_config(config, **kwargs)\n", "\n", " self.logger.info('Computing SED for %s' % name)\n", "\n", " o = self._make_sed(name, **config)\n", "\n", " self.logger.info('Finished SED')\n", "\n", " outfile = config.get('outfile', None)\n", " if outfile is None:\n", " outfile = utils.format_filename(self.workdir, 'sed',\n", " prefix=[config['prefix'],\n", " name.lower().replace(' ', '_')])\n", " else:\n", " outfile = os.path.join(self.workdir,\n", " os.path.splitext(outfile)[0])\n", "\n", " o['file'] = None\n", " if config['write_fits']:\n", " o['file'] = os.path.basename(outfile) + '.fits'\n", " self._make_sed_fits(o, outfile + '.fits', **config)\n", "\n", " if config['write_npy']:\n", " np.save(outfile + '.npy', o)\n", "\n", " if config['make_plots']:\n", " self._plotter.make_sed_plots(o, **config)\n", "\n", " self.logger.info('Execution time: %.2f s', timer.elapsed_time)\n", " return o" ]
[ 0, 0.012987012987012988, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
75
0.001163
def http_request(self, verb, uri, data=None, headers=None, files=None, response_format=None, is_rdf = True, stream = False ): ''' Primary route for all HTTP requests to repository. Ability to set most parameters for requests library, with some additional convenience parameters as well. Args: verb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc. uri (rdflib.term.URIRef,str): input URI data (str,file): payload of data to send for request, may be overridden in preperation of request headers (dict): optional dictionary of headers passed directly to requests.request files (dict): optional dictionary of files passed directly to requests.request response_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc. is_rdf (bool): if True, set Accept header based on combination of response_format and headers stream (bool): passed directly to requests.request for stream parameter Returns: requests.models.Response ''' # set content negotiated response format for RDFSources if is_rdf: ''' Acceptable content negotiated response formats include: application/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository) application/n-triples application/rdf+xml text/n3 (or text/rdf+n3) text/plain text/turtle (or application/x-turtle) ''' # set for GET requests only if verb == 'GET': # if no response_format has been requested to this point, use repository instance default if not response_format: response_format = self.repo.default_serialization # if headers present, append if headers and 'Accept' not in headers.keys(): headers['Accept'] = response_format # if headers are blank, init dictionary else: headers = {'Accept':response_format} # prepare uri for HTTP request if type(uri) == rdflib.term.URIRef: uri = uri.toPython() logger.debug("%s request for %s, format %s, headers %s" % (verb, uri, response_format, headers)) # manually prepare request session = requests.Session() request = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files) prepped_request = session.prepare_request(request) response = session.send(prepped_request, stream=stream, ) return response
[ "def", "http_request", "(", "self", ",", "verb", ",", "uri", ",", "data", "=", "None", ",", "headers", "=", "None", ",", "files", "=", "None", ",", "response_format", "=", "None", ",", "is_rdf", "=", "True", ",", "stream", "=", "False", ")", ":", "# set content negotiated response format for RDFSources", "if", "is_rdf", ":", "'''\n\t\t\tAcceptable content negotiated response formats include:\n\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n\t\t\t\tapplication/n-triples\n\t\t\t\tapplication/rdf+xml\n\t\t\t\ttext/n3 (or text/rdf+n3)\n\t\t\t\ttext/plain\n\t\t\t\ttext/turtle (or application/x-turtle)\n\t\t\t'''", "# set for GET requests only", "if", "verb", "==", "'GET'", ":", "# if no response_format has been requested to this point, use repository instance default", "if", "not", "response_format", ":", "response_format", "=", "self", ".", "repo", ".", "default_serialization", "# if headers present, append", "if", "headers", "and", "'Accept'", "not", "in", "headers", ".", "keys", "(", ")", ":", "headers", "[", "'Accept'", "]", "=", "response_format", "# if headers are blank, init dictionary", "else", ":", "headers", "=", "{", "'Accept'", ":", "response_format", "}", "# prepare uri for HTTP request", "if", "type", "(", "uri", ")", "==", "rdflib", ".", "term", ".", "URIRef", ":", "uri", "=", "uri", ".", "toPython", "(", ")", "logger", ".", "debug", "(", "\"%s request for %s, format %s, headers %s\"", "%", "(", "verb", ",", "uri", ",", "response_format", ",", "headers", ")", ")", "# manually prepare request", "session", "=", "requests", ".", "Session", "(", ")", "request", "=", "requests", ".", "Request", "(", "verb", ",", "uri", ",", "auth", "=", "(", "self", ".", "repo", ".", "username", ",", "self", ".", "repo", ".", "password", ")", ",", "data", "=", "data", ",", "headers", "=", "headers", ",", "files", "=", "files", ")", "prepped_request", "=", "session", ".", "prepare_request", "(", "request", ")", "response", "=", "session", ".", "send", "(", "prepped_request", ",", "stream", "=", "stream", ",", ")", "return", "response" ]
35.104478
0.036394
[ "def http_request(self,\n", "\t\t\tverb,\n", "\t\t\turi,\n", "\t\t\tdata=None,\n", "\t\t\theaders=None,\n", "\t\t\tfiles=None,\n", "\t\t\tresponse_format=None,\n", "\t\t\tis_rdf = True,\n", "\t\t\tstream = False\n", "\t\t):\n", "\n", "\t\t'''\n", "\t\tPrimary route for all HTTP requests to repository. Ability to set most parameters for requests library,\n", "\t\twith some additional convenience parameters as well.\n", "\n", "\t\tArgs:\n", "\t\t\tverb (str): HTTP verb to use for request, e.g. PUT, POST, GET, HEAD, PATCH, etc.\n", "\t\t\turi (rdflib.term.URIRef,str): input URI\n", "\t\t\tdata (str,file): payload of data to send for request, may be overridden in preperation of request\n", "\t\t\theaders (dict): optional dictionary of headers passed directly to requests.request\n", "\t\t\tfiles (dict): optional dictionary of files passed directly to requests.request\n", "\t\t\tresponse_format (str): desired response format for resource's payload, e.g. 'application/rdf+xml', 'text/turtle', etc.\n", "\t\t\tis_rdf (bool): if True, set Accept header based on combination of response_format and headers\n", "\t\t\tstream (bool): passed directly to requests.request for stream parameter\n", "\n", "\t\tReturns:\n", "\t\t\trequests.models.Response\n", "\t\t'''\n", "\n", "\t\t# set content negotiated response format for RDFSources\n", "\t\tif is_rdf:\n", "\t\t\t'''\n", "\t\t\tAcceptable content negotiated response formats include:\n", "\t\t\t\tapplication/ld+json (discouraged, if not prohibited, as it drops prefixes used in repository)\n", "\t\t\t\tapplication/n-triples\n", "\t\t\t\tapplication/rdf+xml\n", "\t\t\t\ttext/n3 (or text/rdf+n3)\n", "\t\t\t\ttext/plain\n", "\t\t\t\ttext/turtle (or application/x-turtle)\n", "\t\t\t'''\n", "\t\t\t# set for GET requests only\n", "\t\t\tif verb == 'GET':\n", "\t\t\t\t# if no response_format has been requested to this point, use repository instance default\n", "\t\t\t\tif not response_format:\n", "\t\t\t\t\tresponse_format = self.repo.default_serialization\n", "\t\t\t\t# if headers present, append\n", "\t\t\t\tif headers and 'Accept' not in headers.keys():\n", "\t\t\t\t\theaders['Accept'] = response_format\n", "\t\t\t\t# if headers are blank, init dictionary\n", "\t\t\t\telse:\n", "\t\t\t\t\theaders = {'Accept':response_format}\n", "\n", "\t\t# prepare uri for HTTP request\n", "\t\tif type(uri) == rdflib.term.URIRef:\n", "\t\t\turi = uri.toPython()\n", "\n", "\t\tlogger.debug(\"%s request for %s, format %s, headers %s\" %\n", "\t\t\t(verb, uri, response_format, headers))\n", "\n", "\t\t# manually prepare request\n", "\t\tsession = requests.Session()\n", "\t\trequest = requests.Request(verb, uri, auth=(self.repo.username, self.repo.password), data=data, headers=headers, files=files)\n", "\t\tprepped_request = session.prepare_request(request)\n", "\t\tresponse = session.send(prepped_request,\n", "\t\t\tstream=stream,\n", "\t\t)\n", "\t\treturn response" ]
[ 0, 0.2222222222222222, 0.25, 0.14285714285714285, 0.11764705882352941, 0.13333333333333333, 0.08, 0.2222222222222222, 0.2222222222222222, 0.4, 0, 0.3333333333333333, 0.018691588785046728, 0.01818181818181818, 0, 0.125, 0.023809523809523808, 0.023255813953488372, 0.019801980198019802, 0.023255813953488372, 0.024390243902439025, 0.01639344262295082, 0.020618556701030927, 0.013333333333333334, 0, 0.09090909090909091, 0.03571428571428571, 0.16666666666666666, 0, 0.017241379310344827, 0.07692307692307693, 0.14285714285714285, 0.01694915254237288, 0.02040816326530612, 0.038461538461538464, 0.041666666666666664, 0.034482758620689655, 0.06666666666666667, 0.023809523809523808, 0.14285714285714285, 0.03225806451612903, 0.047619047619047616, 0.02127659574468085, 0.03571428571428571, 0.01818181818181818, 0.030303030303030304, 0.0196078431372549, 0.024390243902439025, 0.022727272727272728, 0.1, 0.047619047619047616, 0, 0.030303030303030304, 0.02631578947368421, 0.041666666666666664, 0, 0.016666666666666666, 0.047619047619047616, 0, 0.034482758620689655, 0.03225806451612903, 0.015625, 0.018867924528301886, 0.023255813953488372, 0.1111111111111111, 0.5, 0.11764705882352941 ]
67
0.071369
def is_unit_or_unitstring(value): """must be an astropy.unit""" if is_unit(value)[0]: return True, value try: unit = units.Unit(value) except: return False, value else: return True, unit
[ "def", "is_unit_or_unitstring", "(", "value", ")", ":", "if", "is_unit", "(", "value", ")", "[", "0", "]", ":", "return", "True", ",", "value", "try", ":", "unit", "=", "units", ".", "Unit", "(", "value", ")", "except", ":", "return", "False", ",", "value", "else", ":", "return", "True", ",", "unit" ]
22.9
0.008403
[ "def is_unit_or_unitstring(value):\n", " \"\"\"must be an astropy.unit\"\"\"\n", " if is_unit(value)[0]:\n", " return True, value\n", " try:\n", " unit = units.Unit(value)\n", " except:\n", " return False, value\n", " else:\n", " return True, unit" ]
[ 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0.04 ]
10
0.012333
def get(src_hdfs_path, dest_path, **kwargs): """\ Copy the contents of ``src_hdfs_path`` to ``dest_path``. ``dest_path`` is forced to be interpreted as an ordinary local path (see :func:`~path.abspath`). The source file is opened for reading and the copy is opened for writing. Additional keyword arguments, if any, are handled like in :func:`open`. """ cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)
[ "def", "get", "(", "src_hdfs_path", ",", "dest_path", ",", "*", "*", "kwargs", ")", ":", "cp", "(", "src_hdfs_path", ",", "path", ".", "abspath", "(", "dest_path", ",", "local", "=", "True", ")", ",", "*", "*", "kwargs", ")" ]
44.2
0.002217
[ "def get(src_hdfs_path, dest_path, **kwargs):\n", " \"\"\"\\\n", " Copy the contents of ``src_hdfs_path`` to ``dest_path``.\n", "\n", " ``dest_path`` is forced to be interpreted as an ordinary local\n", " path (see :func:`~path.abspath`). The source file is opened for\n", " reading and the copy is opened for writing. Additional keyword\n", " arguments, if any, are handled like in :func:`open`.\n", " \"\"\"\n", " cp(src_hdfs_path, path.abspath(dest_path, local=True), **kwargs)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176 ]
10
0.001471
def remove(self): """ Remove the directory. """ lib.gp_camera_folder_remove_dir( self._cam._cam, self.parent.path.encode(), self.name.encode(), self._cam._ctx)
[ "def", "remove", "(", "self", ")", ":", "lib", ".", "gp_camera_folder_remove_dir", "(", "self", ".", "_cam", ".", "_cam", ",", "self", ".", "parent", ".", "path", ".", "encode", "(", ")", ",", "self", ".", "name", ".", "encode", "(", ")", ",", "self", ".", "_cam", ".", "_ctx", ")" ]
39
0.01005
[ "def remove(self):\n", " \"\"\" Remove the directory. \"\"\"\n", " lib.gp_camera_folder_remove_dir(\n", " self._cam._cam, self.parent.path.encode(), self.name.encode(),\n", " self._cam._ctx)" ]
[ 0, 0.02631578947368421, 0, 0, 0.037037037037037035 ]
5
0.012671
def __bindings(self): """Binds events to handlers""" self.textctrl.Bind(wx.EVT_TEXT, self.OnText) self.fontbutton.Bind(wx.EVT_BUTTON, self.OnFont) self.Bind(csel.EVT_COLOURSELECT, self.OnColor)
[ "def", "__bindings", "(", "self", ")", ":", "self", ".", "textctrl", ".", "Bind", "(", "wx", ".", "EVT_TEXT", ",", "self", ".", "OnText", ")", "self", ".", "fontbutton", ".", "Bind", "(", "wx", ".", "EVT_BUTTON", ",", "self", ".", "OnFont", ")", "self", ".", "Bind", "(", "csel", ".", "EVT_COLOURSELECT", ",", "self", ".", "OnColor", ")" ]
36.833333
0.00885
[ "def __bindings(self):\n", " \"\"\"Binds events to handlers\"\"\"\n", "\n", " self.textctrl.Bind(wx.EVT_TEXT, self.OnText)\n", " self.fontbutton.Bind(wx.EVT_BUTTON, self.OnFont)\n", " self.Bind(csel.EVT_COLOURSELECT, self.OnColor)" ]
[ 0, 0.02564102564102564, 0, 0, 0, 0.018518518518518517 ]
6
0.00736
def StartAndWait(self): """Starts the task and waits until it is done.""" self.StartTask() self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely) self.ClearTask()
[ "def", "StartAndWait", "(", "self", ")", ":", "self", ".", "StartTask", "(", ")", "self", ".", "WaitUntilTaskDone", "(", "pydaq", ".", "DAQmx_Val_WaitInfinitely", ")", "self", ".", "ClearTask", "(", ")" ]
38
0.010309
[ "def StartAndWait(self):\n", " \"\"\"Starts the task and waits until it is done.\"\"\"\n", " self.StartTask()\n", " self.WaitUntilTaskDone(pydaq.DAQmx_Val_WaitInfinitely)\n", " self.ClearTask()" ]
[ 0, 0.017241379310344827, 0, 0, 0.041666666666666664 ]
5
0.011782
def fit_sparse(model_matrix, response, model, model_coefficients_start, tolerance, l1_regularizer, l2_regularizer=None, maximum_iterations=None, maximum_full_sweeps_per_iteration=1, learning_rate=None, name=None): r"""Fits a GLM using coordinate-wise FIM-informed proximal gradient descent. This function uses a L1- and L2-regularized, second-order quasi-Newton method to find maximum-likelihood parameters for the given model and observed data. The second-order approximations use negative Fisher information in place of the Hessian, that is, ```none FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood( Y | model_matrix, current value of model_coefficients)] ``` For large, sparse data sets, `model_matrix` should be supplied as a `SparseTensor`. Args: model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor` where each row represents a sample's features. Has shape `[N, n]` where `N` is the number of data samples and `n` is the number of features per sample. response: (Batch of) vector-shaped `Tensor` with the same dtype as `model_matrix` where each element represents a sample's observed response (to the corresponding row of features). model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link function and distribution of the GLM, and thus characterizes the negative log-likelihood which will be minimized. Must have sufficient statistic equal to the response, that is, `T(y) = y`. model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with the same dtype as `model_matrix`, representing the initial values of the coefficients for the GLM regression. Has shape `[n]` where `model_matrix` has shape `[N, n]`. tolerance: scalar, `float` `Tensor` representing the tolerance for each optiization step; see the `tolerance` argument of `fit_sparse_one_step`. l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1 regularization term. l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2 regularization term. Default value: `None` (i.e., no L2 regularization). maximum_iterations: Python integer specifying maximum number of iterations of the outer loop of the optimizer (i.e., maximum number of calls to `fit_sparse_one_step`). After this many iterations of the outer loop, the algorithm will terminate even if the return value `model_coefficients` has not converged. Default value: `1`. maximum_full_sweeps_per_iteration: Python integer specifying the maximum number of coordinate descent sweeps allowed in each iteration. Default value: `1`. learning_rate: scalar, `float` `Tensor` representing a multiplicative factor used to dampen the proximal gradient descent steps. Default value: `None` (i.e., factor is conceptually `1`). name: Python string representing the name of the TensorFlow operation. The default name is `"fit_sparse"`. Returns: model_coefficients: (Batch of) `Tensor` of the same shape and dtype as `model_coefficients_start`, representing the computed model coefficients which minimize the regularized negative log-likelihood. is_converged: scalar, `bool` `Tensor` indicating whether the minimization procedure converged across all batches within the specified number of iterations. Here convergence means that an iteration of the inner loop (`fit_sparse_one_step`) returns `True` for its `is_converged` output value. iter: scalar, `int` `Tensor` indicating the actual number of iterations of the outer loop of the optimizer completed (i.e., number of calls to `fit_sparse_one_step` before achieving convergence). #### Example ```python from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions def make_dataset(n, d, link, scale=1., dtype=np.float32): model_coefficients = tfd.Uniform( low=np.array(-1, dtype), high=np.array(1, dtype)).sample( d, seed=42) radius = np.sqrt(2.) model_coefficients *= radius / tf.linalg.norm(model_coefficients) mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d)) model_coefficients = tf.where(mask, model_coefficients, tf.zeros_like(model_coefficients)) model_matrix = tfd.Normal( loc=np.array(0, dtype), scale=np.array(1, dtype)).sample( [n, d], seed=43) scale = tf.convert_to_tensor(scale, dtype) linear_response = tf.matmul(model_matrix, model_coefficients[..., tf.newaxis])[..., 0] if link == 'linear': response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) elif link == 'probit': response = tf.cast( tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0, dtype) elif link == 'logit': response = tfd.Bernoulli(logits=linear_response).sample(seed=44) else: raise ValueError('unrecognized true link: {}'.format(link)) return model_matrix, response, model_coefficients, mask with tf.Session() as sess: x_, y_, model_coefficients_true_, _ = sess.run(make_dataset( n=int(1e5), d=100, link='probit')) model = tfp.glm.Bernoulli() model_coefficients_start = tf.zeros(x_.shape[-1], np.float32) model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse( model_matrix=tf.convert_to_tensor(x_), response=tf.convert_to_tensor(y_), model=model, model_coefficients_start=model_coefficients_start, l1_regularizer=800., l2_regularizer=None, maximum_iterations=10, maximum_full_sweeps_per_iteration=10, tolerance=1e-6, learning_rate=None) model_coefficients_, is_converged_, num_iter_ = sess.run([ model_coefficients, is_converged, num_iter]) print("is_converged:", is_converged_) print(" num_iter:", num_iter_) print("\nLearned / True") print(np.concatenate( [[model_coefficients_], [model_coefficients_true_]], axis=0).T) # ==> # is_converged: True # num_iter: 1 # # Learned / True # [[ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.11195257 0.12484948] # [ 0. 0. ] # [ 0.05191106 0.06394956] # [-0.15090358 -0.15325639] # [-0.18187316 -0.18825999] # [-0.06140942 -0.07994166] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0.14474444 0.15810856] # [ 0. 0. ] # [-0.25249591 -0.24260855] # [ 0. 0. ] # [ 0. 0. ] # [-0.03888761 -0.06755984] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [-0.0192222 -0.04169233] # [ 0. 0. ] # [ 0. 0. ] # [ 0.01434913 0.03568212] # [-0.11336883 -0.12873614] # [ 0. 0. ] # [-0.24496339 -0.24048163] # [ 0. 0. ] # [ 0. 0. ] # [ 0.04088281 0.06565224] # [-0.12784363 -0.13359821] # [ 0.05618424 0.07396613] # [ 0. 0. ] # [ 0. 0. ] # [ 0. 0. ] # [ 0. -0.01719233] # [ 0. 0. ] # [ 0. 0. ] # [-0.00076072 -0.03607186] # [ 0.21801499 0.21146794] # [-0.02161094 -0.04031265] # [ 0.0918689 0.10487888] # [ 0.0106154 0.03233612] # [-0.07817317 -0.09725142] # [ 0. 0. ] # [ 0. 0. ] # [-0.23725343 -0.24194022] # [ 0. 0. ] # [-0.08725718 -0.1048776 ] # [ 0. 0. ] # [ 0. 0. ] # [-0.02114314 -0.04145789] # [ 0. 0. ] # [ 0. 0. ] # [-0.02710908 -0.04590397] # [ 0.15293184 0.15415154] # [ 0.2114463 0.2088728 ] # [-0.10969634 -0.12368613] # [ 0. -0.01505797] # [-0.01140458 -0.03234904] # [ 0.16051085 0.1680062 ] # [ 0.09816848 0.11094204] ``` #### References [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths for Generalized Linear Models via Coordinate Descent. _Journal of Statistical Software_, 33(1), 2010. https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for L1-regularized Logistic Regression. _Journal of Machine Learning Research_, 13, 2012. http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf """ graph_deps = [ model_matrix, response, model_coefficients_start, l1_regularizer, l2_regularizer, maximum_iterations, maximum_full_sweeps_per_iteration, # TODO(b/111925792): Replace `tolerance` arg with something like # `convergence_criteria_fn`. tolerance, learning_rate, ] with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps): # TODO(b/111922388): Include dispersion and offset parameters. def _grad_neg_log_likelihood_and_fim_fn(x): predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x) g, h_middle = _grad_neg_log_likelihood_and_fim( model_matrix, predicted_linear_response, response, model) return g, model_matrix, h_middle return tfp.optimizer.proximal_hessian_sparse_minimize( _grad_neg_log_likelihood_and_fim_fn, x_start=model_coefficients_start, l1_regularizer=l1_regularizer, l2_regularizer=l2_regularizer, maximum_iterations=maximum_iterations, maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration, learning_rate=learning_rate, tolerance=tolerance, name=name)
[ "def", "fit_sparse", "(", "model_matrix", ",", "response", ",", "model", ",", "model_coefficients_start", ",", "tolerance", ",", "l1_regularizer", ",", "l2_regularizer", "=", "None", ",", "maximum_iterations", "=", "None", ",", "maximum_full_sweeps_per_iteration", "=", "1", ",", "learning_rate", "=", "None", ",", "name", "=", "None", ")", ":", "graph_deps", "=", "[", "model_matrix", ",", "response", ",", "model_coefficients_start", ",", "l1_regularizer", ",", "l2_regularizer", ",", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", ",", "# TODO(b/111925792): Replace `tolerance` arg with something like", "# `convergence_criteria_fn`.", "tolerance", ",", "learning_rate", ",", "]", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'fit_sparse'", ",", "graph_deps", ")", ":", "# TODO(b/111922388): Include dispersion and offset parameters.", "def", "_grad_neg_log_likelihood_and_fim_fn", "(", "x", ")", ":", "predicted_linear_response", "=", "sparse_or_dense_matvecmul", "(", "model_matrix", ",", "x", ")", "g", ",", "h_middle", "=", "_grad_neg_log_likelihood_and_fim", "(", "model_matrix", ",", "predicted_linear_response", ",", "response", ",", "model", ")", "return", "g", ",", "model_matrix", ",", "h_middle", "return", "tfp", ".", "optimizer", ".", "proximal_hessian_sparse_minimize", "(", "_grad_neg_log_likelihood_and_fim_fn", ",", "x_start", "=", "model_coefficients_start", ",", "l1_regularizer", "=", "l1_regularizer", ",", "l2_regularizer", "=", "l2_regularizer", ",", "maximum_iterations", "=", "maximum_iterations", ",", "maximum_full_sweeps_per_iteration", "=", "maximum_full_sweeps_per_iteration", ",", "learning_rate", "=", "learning_rate", ",", "tolerance", "=", "tolerance", ",", "name", "=", "name", ")" ]
39.173228
0.001176
[ "def fit_sparse(model_matrix,\n", " response,\n", " model,\n", " model_coefficients_start,\n", " tolerance,\n", " l1_regularizer,\n", " l2_regularizer=None,\n", " maximum_iterations=None,\n", " maximum_full_sweeps_per_iteration=1,\n", " learning_rate=None,\n", " name=None):\n", " r\"\"\"Fits a GLM using coordinate-wise FIM-informed proximal gradient descent.\n", "\n", " This function uses a L1- and L2-regularized, second-order quasi-Newton method\n", " to find maximum-likelihood parameters for the given model and observed data.\n", " The second-order approximations use negative Fisher information in place of\n", " the Hessian, that is,\n", "\n", " ```none\n", " FisherInfo = E_Y[Hessian with respect to model_coefficients of -LogLikelihood(\n", " Y | model_matrix, current value of model_coefficients)]\n", " ```\n", "\n", " For large, sparse data sets, `model_matrix` should be supplied as a\n", " `SparseTensor`.\n", "\n", " Args:\n", " model_matrix: (Batch of) matrix-shaped, `float` `Tensor` or `SparseTensor`\n", " where each row represents a sample's features. Has shape `[N, n]` where\n", " `N` is the number of data samples and `n` is the number of features per\n", " sample.\n", " response: (Batch of) vector-shaped `Tensor` with the same dtype as\n", " `model_matrix` where each element represents a sample's observed response\n", " (to the corresponding row of features).\n", " model: `tfp.glm.ExponentialFamily`-like instance, which specifies the link\n", " function and distribution of the GLM, and thus characterizes the negative\n", " log-likelihood which will be minimized. Must have sufficient statistic\n", " equal to the response, that is, `T(y) = y`.\n", " model_coefficients_start: (Batch of) vector-shaped, `float` `Tensor` with\n", " the same dtype as `model_matrix`, representing the initial values of the\n", " coefficients for the GLM regression. Has shape `[n]` where `model_matrix`\n", " has shape `[N, n]`.\n", " tolerance: scalar, `float` `Tensor` representing the tolerance for each\n", " optiization step; see the `tolerance` argument of `fit_sparse_one_step`.\n", " l1_regularizer: scalar, `float` `Tensor` representing the weight of the L1\n", " regularization term.\n", " l2_regularizer: scalar, `float` `Tensor` representing the weight of the L2\n", " regularization term.\n", " Default value: `None` (i.e., no L2 regularization).\n", " maximum_iterations: Python integer specifying maximum number of iterations\n", " of the outer loop of the optimizer (i.e., maximum number of calls to\n", " `fit_sparse_one_step`). After this many iterations of the outer loop, the\n", " algorithm will terminate even if the return value `model_coefficients` has\n", " not converged.\n", " Default value: `1`.\n", " maximum_full_sweeps_per_iteration: Python integer specifying the maximum\n", " number of coordinate descent sweeps allowed in each iteration.\n", " Default value: `1`.\n", " learning_rate: scalar, `float` `Tensor` representing a multiplicative factor\n", " used to dampen the proximal gradient descent steps.\n", " Default value: `None` (i.e., factor is conceptually `1`).\n", " name: Python string representing the name of the TensorFlow operation.\n", " The default name is `\"fit_sparse\"`.\n", "\n", " Returns:\n", " model_coefficients: (Batch of) `Tensor` of the same shape and dtype as\n", " `model_coefficients_start`, representing the computed model coefficients\n", " which minimize the regularized negative log-likelihood.\n", " is_converged: scalar, `bool` `Tensor` indicating whether the minimization\n", " procedure converged across all batches within the specified number of\n", " iterations. Here convergence means that an iteration of the inner loop\n", " (`fit_sparse_one_step`) returns `True` for its `is_converged` output\n", " value.\n", " iter: scalar, `int` `Tensor` indicating the actual number of iterations of\n", " the outer loop of the optimizer completed (i.e., number of calls to\n", " `fit_sparse_one_step` before achieving convergence).\n", "\n", " #### Example\n", "\n", " ```python\n", " from __future__ import print_function\n", " import numpy as np\n", " import tensorflow as tf\n", " import tensorflow_probability as tfp\n", " tfd = tfp.distributions\n", "\n", " def make_dataset(n, d, link, scale=1., dtype=np.float32):\n", " model_coefficients = tfd.Uniform(\n", " low=np.array(-1, dtype), high=np.array(1, dtype)).sample(\n", " d, seed=42)\n", " radius = np.sqrt(2.)\n", " model_coefficients *= radius / tf.linalg.norm(model_coefficients)\n", " mask = tf.random_shuffle(tf.range(d)) < tf.to_int32(0.5 * tf.to_float(d))\n", " model_coefficients = tf.where(mask, model_coefficients,\n", " tf.zeros_like(model_coefficients))\n", " model_matrix = tfd.Normal(\n", " loc=np.array(0, dtype), scale=np.array(1, dtype)).sample(\n", " [n, d], seed=43)\n", " scale = tf.convert_to_tensor(scale, dtype)\n", " linear_response = tf.matmul(model_matrix,\n", " model_coefficients[..., tf.newaxis])[..., 0]\n", " if link == 'linear':\n", " response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)\n", " elif link == 'probit':\n", " response = tf.cast(\n", " tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,\n", " dtype)\n", " elif link == 'logit':\n", " response = tfd.Bernoulli(logits=linear_response).sample(seed=44)\n", " else:\n", " raise ValueError('unrecognized true link: {}'.format(link))\n", " return model_matrix, response, model_coefficients, mask\n", "\n", " with tf.Session() as sess:\n", " x_, y_, model_coefficients_true_, _ = sess.run(make_dataset(\n", " n=int(1e5), d=100, link='probit'))\n", "\n", " model = tfp.glm.Bernoulli()\n", " model_coefficients_start = tf.zeros(x_.shape[-1], np.float32)\n", "\n", " model_coefficients, is_converged, num_iter = tfp.glm.fit_sparse(\n", " model_matrix=tf.convert_to_tensor(x_),\n", " response=tf.convert_to_tensor(y_),\n", " model=model,\n", " model_coefficients_start=model_coefficients_start,\n", " l1_regularizer=800.,\n", " l2_regularizer=None,\n", " maximum_iterations=10,\n", " maximum_full_sweeps_per_iteration=10,\n", " tolerance=1e-6,\n", " learning_rate=None)\n", "\n", " model_coefficients_, is_converged_, num_iter_ = sess.run([\n", " model_coefficients, is_converged, num_iter])\n", "\n", " print(\"is_converged:\", is_converged_)\n", " print(\" num_iter:\", num_iter_)\n", " print(\"\\nLearned / True\")\n", " print(np.concatenate(\n", " [[model_coefficients_], [model_coefficients_true_]], axis=0).T)\n", "\n", " # ==>\n", " # is_converged: True\n", " # num_iter: 1\n", " #\n", " # Learned / True\n", " # [[ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0.11195257 0.12484948]\n", " # [ 0. 0. ]\n", " # [ 0.05191106 0.06394956]\n", " # [-0.15090358 -0.15325639]\n", " # [-0.18187316 -0.18825999]\n", " # [-0.06140942 -0.07994166]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0.14474444 0.15810856]\n", " # [ 0. 0. ]\n", " # [-0.25249591 -0.24260855]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.03888761 -0.06755984]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.0192222 -0.04169233]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0.01434913 0.03568212]\n", " # [-0.11336883 -0.12873614]\n", " # [ 0. 0. ]\n", " # [-0.24496339 -0.24048163]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0.04088281 0.06565224]\n", " # [-0.12784363 -0.13359821]\n", " # [ 0.05618424 0.07396613]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [ 0. -0.01719233]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.00076072 -0.03607186]\n", " # [ 0.21801499 0.21146794]\n", " # [-0.02161094 -0.04031265]\n", " # [ 0.0918689 0.10487888]\n", " # [ 0.0106154 0.03233612]\n", " # [-0.07817317 -0.09725142]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.23725343 -0.24194022]\n", " # [ 0. 0. ]\n", " # [-0.08725718 -0.1048776 ]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.02114314 -0.04145789]\n", " # [ 0. 0. ]\n", " # [ 0. 0. ]\n", " # [-0.02710908 -0.04590397]\n", " # [ 0.15293184 0.15415154]\n", " # [ 0.2114463 0.2088728 ]\n", " # [-0.10969634 -0.12368613]\n", " # [ 0. -0.01505797]\n", " # [-0.01140458 -0.03234904]\n", " # [ 0.16051085 0.1680062 ]\n", " # [ 0.09816848 0.11094204]\n", " ```\n", "\n", " #### References\n", "\n", " [1]: Jerome Friedman, Trevor Hastie and Rob Tibshirani. Regularization Paths\n", " for Generalized Linear Models via Coordinate Descent. _Journal of\n", " Statistical Software_, 33(1), 2010.\n", " https://www.jstatsoft.org/article/view/v033i01/v33i01.pdf\n", "\n", " [2]: Guo-Xun Yuan, Chia-Hua Ho and Chih-Jen Lin. An Improved GLMNET for\n", " L1-regularized Logistic Regression. _Journal of Machine Learning\n", " Research_, 13, 2012.\n", " http://www.jmlr.org/papers/volume13/yuan12a/yuan12a.pdf\n", " \"\"\"\n", " graph_deps = [\n", " model_matrix,\n", " response,\n", " model_coefficients_start,\n", " l1_regularizer,\n", " l2_regularizer,\n", " maximum_iterations,\n", " maximum_full_sweeps_per_iteration,\n", " # TODO(b/111925792): Replace `tolerance` arg with something like\n", " # `convergence_criteria_fn`.\n", " tolerance,\n", " learning_rate,\n", " ]\n", " with tf.compat.v1.name_scope(name, 'fit_sparse', graph_deps):\n", " # TODO(b/111922388): Include dispersion and offset parameters.\n", " def _grad_neg_log_likelihood_and_fim_fn(x):\n", " predicted_linear_response = sparse_or_dense_matvecmul(model_matrix, x)\n", " g, h_middle = _grad_neg_log_likelihood_and_fim(\n", " model_matrix, predicted_linear_response, response, model)\n", " return g, model_matrix, h_middle\n", "\n", " return tfp.optimizer.proximal_hessian_sparse_minimize(\n", " _grad_neg_log_likelihood_and_fim_fn,\n", " x_start=model_coefficients_start,\n", " l1_regularizer=l1_regularizer,\n", " l2_regularizer=l2_regularizer,\n", " maximum_iterations=maximum_iterations,\n", " maximum_full_sweeps_per_iteration=maximum_full_sweeps_per_iteration,\n", " learning_rate=learning_rate,\n", " tolerance=tolerance,\n", " name=name)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012658227848101266, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0.012345679012345678, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015625, 0, 0, 0.012987012987012988, 0.018518518518518517, 0, 0.02564102564102564, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
254
0.00103
def load(self): """Return the current load. The load is represented as a float, where 1.0 represents having hit one of the flow control limits, and values between 0.0 and 1.0 represent how close we are to them. (0.5 means we have exactly half of what the flow control setting allows, for example.) There are (currently) two flow control settings; this property computes how close the manager is to each of them, and returns whichever value is higher. (It does not matter that we have lots of running room on setting A if setting B is over.) Returns: float: The load value. """ if self._leaser is None: return 0 return max( [ self._leaser.message_count / self._flow_control.max_messages, self._leaser.bytes / self._flow_control.max_bytes, ] )
[ "def", "load", "(", "self", ")", ":", "if", "self", ".", "_leaser", "is", "None", ":", "return", "0", "return", "max", "(", "[", "self", ".", "_leaser", ".", "message_count", "/", "self", ".", "_flow_control", ".", "max_messages", ",", "self", ".", "_leaser", ".", "bytes", "/", "self", ".", "_flow_control", ".", "max_bytes", ",", "]", ")" ]
36.52
0.002134
[ "def load(self):\n", " \"\"\"Return the current load.\n", "\n", " The load is represented as a float, where 1.0 represents having\n", " hit one of the flow control limits, and values between 0.0 and 1.0\n", " represent how close we are to them. (0.5 means we have exactly half\n", " of what the flow control setting allows, for example.)\n", "\n", " There are (currently) two flow control settings; this property\n", " computes how close the manager is to each of them, and returns\n", " whichever value is higher. (It does not matter that we have lots of\n", " running room on setting A if setting B is over.)\n", "\n", " Returns:\n", " float: The load value.\n", " \"\"\"\n", " if self._leaser is None:\n", " return 0\n", "\n", " return max(\n", " [\n", " self._leaser.message_count / self._flow_control.max_messages,\n", " self._leaser.bytes / self._flow_control.max_bytes,\n", " ]\n", " )" ]
[ 0, 0.027777777777777776, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111 ]
25
0.005556
def decision_function(self, X=None): """Output the decision value of the prediction. if X is not equal to self.test_raw_data\\_, i.e. predict is not called, first generate the test_data after getting the test_data, get the decision value via self.clf. if X is None, test_data\\_ is ready to be used Parameters ---------- X: Optional[list of tuple (data1, data2)] data1 and data2 are numpy array in shape [num_TRs, num_voxels] to be computed for correlation. default None, meaning that the data to be predicted have been processed in the fit method. Otherwise, X contains the activity data filtered by ROIs and prepared for correlation computation. len(X) is the number of test samples. if len(X) > 1: normalization is done on all test samples. Within list, all data1s must have the same num_voxels value, all data2s must have the same num_voxels value. Returns ------- confidence: the predictions confidence values of X, in shape [len(X),] """ if X is not None and not self._is_equal_to_test_raw_data(X): for x in X: assert len(x) == 2, \ 'there must be two parts for each correlation computation' X1, X2 = zip(*X) num_voxels1 = X1[0].shape[1] num_voxels2 = X2[0].shape[1] assert len(X1) == len(X2), \ 'the list lengths do not match' # make sure X1 always has more voxels if num_voxels1 < num_voxels2: X1, X2 = X2, X1 num_voxels1, num_voxels2 = num_voxels2, num_voxels1 assert self.num_features_ == num_voxels1 * num_voxels2, \ 'the number of features does not match the model' num_test_samples = len(X1) self.test_raw_data_ = X # generate the test_data first # correlation computation corr_data = self._prepare_corerelation_data(X1, X2) # normalization normalized_corr_data = \ self._normalize_correlation_data(corr_data, num_test_samples) # test data generation self.test_data_ = self._prepare_test_data(normalized_corr_data) confidence = self.clf.decision_function(self.test_data_) return confidence
[ "def", "decision_function", "(", "self", ",", "X", "=", "None", ")", ":", "if", "X", "is", "not", "None", "and", "not", "self", ".", "_is_equal_to_test_raw_data", "(", "X", ")", ":", "for", "x", "in", "X", ":", "assert", "len", "(", "x", ")", "==", "2", ",", "'there must be two parts for each correlation computation'", "X1", ",", "X2", "=", "zip", "(", "*", "X", ")", "num_voxels1", "=", "X1", "[", "0", "]", ".", "shape", "[", "1", "]", "num_voxels2", "=", "X2", "[", "0", "]", ".", "shape", "[", "1", "]", "assert", "len", "(", "X1", ")", "==", "len", "(", "X2", ")", ",", "'the list lengths do not match'", "# make sure X1 always has more voxels", "if", "num_voxels1", "<", "num_voxels2", ":", "X1", ",", "X2", "=", "X2", ",", "X1", "num_voxels1", ",", "num_voxels2", "=", "num_voxels2", ",", "num_voxels1", "assert", "self", ".", "num_features_", "==", "num_voxels1", "*", "num_voxels2", ",", "'the number of features does not match the model'", "num_test_samples", "=", "len", "(", "X1", ")", "self", ".", "test_raw_data_", "=", "X", "# generate the test_data first", "# correlation computation", "corr_data", "=", "self", ".", "_prepare_corerelation_data", "(", "X1", ",", "X2", ")", "# normalization", "normalized_corr_data", "=", "self", ".", "_normalize_correlation_data", "(", "corr_data", ",", "num_test_samples", ")", "# test data generation", "self", ".", "test_data_", "=", "self", ".", "_prepare_test_data", "(", "normalized_corr_data", ")", "confidence", "=", "self", ".", "clf", ".", "decision_function", "(", "self", ".", "test_data_", ")", "return", "confidence" ]
45.574074
0.000796
[ "def decision_function(self, X=None):\n", " \"\"\"Output the decision value of the prediction.\n", "\n", " if X is not equal to self.test_raw_data\\\\_, i.e. predict is not called,\n", " first generate the test_data\n", " after getting the test_data, get the decision value via self.clf.\n", " if X is None, test_data\\\\_ is ready to be used\n", "\n", " Parameters\n", " ----------\n", " X: Optional[list of tuple (data1, data2)]\n", " data1 and data2 are numpy array in shape [num_TRs, num_voxels]\n", " to be computed for correlation.\n", " default None, meaning that the data to be predicted\n", " have been processed in the fit method.\n", " Otherwise, X contains the activity data filtered by ROIs\n", " and prepared for correlation computation.\n", " len(X) is the number of test samples.\n", " if len(X) > 1: normalization is done on all test samples.\n", " Within list, all data1s must have the same num_voxels value,\n", " all data2s must have the same num_voxels value.\n", "\n", " Returns\n", " -------\n", " confidence: the predictions confidence values of X, in shape [len(X),]\n", " \"\"\"\n", " if X is not None and not self._is_equal_to_test_raw_data(X):\n", " for x in X:\n", " assert len(x) == 2, \\\n", " 'there must be two parts for each correlation computation'\n", " X1, X2 = zip(*X)\n", " num_voxels1 = X1[0].shape[1]\n", " num_voxels2 = X2[0].shape[1]\n", " assert len(X1) == len(X2), \\\n", " 'the list lengths do not match'\n", " # make sure X1 always has more voxels\n", " if num_voxels1 < num_voxels2:\n", " X1, X2 = X2, X1\n", " num_voxels1, num_voxels2 = num_voxels2, num_voxels1\n", " assert self.num_features_ == num_voxels1 * num_voxels2, \\\n", " 'the number of features does not match the model'\n", " num_test_samples = len(X1)\n", " self.test_raw_data_ = X\n", " # generate the test_data first\n", " # correlation computation\n", " corr_data = self._prepare_corerelation_data(X1, X2)\n", " # normalization\n", " normalized_corr_data = \\\n", " self._normalize_correlation_data(corr_data,\n", " num_test_samples)\n", " # test data generation\n", " self.test_data_ = self._prepare_test_data(normalized_corr_data)\n", " confidence = self.clf.decision_function(self.test_data_)\n", " return confidence" ]
[ 0, 0.017857142857142856, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04 ]
54
0.001071
def namedb_get_all_importing_namespace_hashes( self, current_block ): """ Get the list of all non-expired preordered and revealed namespace hashes. """ query = "SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);" args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE, NAMESPACE_PREORDER, current_block + NAMESPACE_PREORDER_EXPIRE ) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row in namespace_rows: ret.append( namespace_row['preorder_hash'] ) return ret
[ "def", "namedb_get_all_importing_namespace_hashes", "(", "self", ",", "current_block", ")", ":", "query", "=", "\"SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);\"", "args", "=", "(", "NAMESPACE_REVEAL", ",", "current_block", "+", "NAMESPACE_REVEAL_EXPIRE", ",", "NAMESPACE_PREORDER", ",", "current_block", "+", "NAMESPACE_PREORDER_EXPIRE", ")", "namespace_rows", "=", "namedb_query_execute", "(", "cur", ",", "query", ",", "args", ")", "ret", "=", "[", "]", "for", "namespace_row", "in", "namespace_rows", ":", "ret", ".", "append", "(", "namespace_row", "[", "'preorder_hash'", "]", ")", "return", "ret" ]
42.357143
0.016502
[ "def namedb_get_all_importing_namespace_hashes( self, current_block ):\n", " \"\"\"\n", " Get the list of all non-expired preordered and revealed namespace hashes.\n", " \"\"\"\n", "\n", " query = \"SELECT preorder_hash FROM namespaces WHERE (op = ? AND reveal_block < ?) OR (op = ? AND block_number < ?);\"\n", " args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE, NAMESPACE_PREORDER, current_block + NAMESPACE_PREORDER_EXPIRE )\n", "\n", " namespace_rows = namedb_query_execute( cur, query, args )\n", " ret = []\n", " for namespace_row in namespace_rows:\n", " ret.append( namespace_row['preorder_hash'] )\n", "\n", " return ret" ]
[ 0.02857142857142857, 0, 0, 0, 0, 0.008264462809917356, 0.014814814814814815, 0, 0.03225806451612903, 0, 0, 0.03773584905660377, 0, 0.07142857142857142 ]
14
0.013791
def send_approve_mail(request, user): """ Sends an email to staff in listed in the setting ``ACCOUNTS_APPROVAL_EMAILS``, when a new user signs up and the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``. """ approval_emails = split_addresses(settings.ACCOUNTS_APPROVAL_EMAILS) if not approval_emails: return context = { "request": request, "user": user, "change_url": admin_url(user.__class__, "change", user.id), } subject = subject_template("email/account_approve_subject.txt", context) send_mail_template(subject, "email/account_approve", settings.DEFAULT_FROM_EMAIL, approval_emails, context=context)
[ "def", "send_approve_mail", "(", "request", ",", "user", ")", ":", "approval_emails", "=", "split_addresses", "(", "settings", ".", "ACCOUNTS_APPROVAL_EMAILS", ")", "if", "not", "approval_emails", ":", "return", "context", "=", "{", "\"request\"", ":", "request", ",", "\"user\"", ":", "user", ",", "\"change_url\"", ":", "admin_url", "(", "user", ".", "__class__", ",", "\"change\"", ",", "user", ".", "id", ")", ",", "}", "subject", "=", "subject_template", "(", "\"email/account_approve_subject.txt\"", ",", "context", ")", "send_mail_template", "(", "subject", ",", "\"email/account_approve\"", ",", "settings", ".", "DEFAULT_FROM_EMAIL", ",", "approval_emails", ",", "context", "=", "context", ")" ]
39.5
0.001374
[ "def send_approve_mail(request, user):\n", " \"\"\"\n", " Sends an email to staff in listed in the setting\n", " ``ACCOUNTS_APPROVAL_EMAILS``, when a new user signs up and the\n", " ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``.\n", " \"\"\"\n", " approval_emails = split_addresses(settings.ACCOUNTS_APPROVAL_EMAILS)\n", " if not approval_emails:\n", " return\n", " context = {\n", " \"request\": request,\n", " \"user\": user,\n", " \"change_url\": admin_url(user.__class__, \"change\", user.id),\n", " }\n", " subject = subject_template(\"email/account_approve_subject.txt\", context)\n", " send_mail_template(subject, \"email/account_approve\",\n", " settings.DEFAULT_FROM_EMAIL, approval_emails,\n", " context=context)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02564102564102564 ]
18
0.001425
def get_current_cmus(): """ Get the current song from cmus. """ result = subprocess.run('cmus-remote -Q'.split(' '), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split('\n'): line = line.split(' ') if line[0] != 'tag': continue key = line[1] if key in ['album', 'title', 'artist', 'albumartist'] and\ key not in info: info[key] = ' '.join(line[2:]) if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
[ "def", "get_current_cmus", "(", ")", ":", "result", "=", "subprocess", ".", "run", "(", "'cmus-remote -Q'", ".", "split", "(", "' '", ")", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "info", "=", "{", "}", "for", "line", "in", "result", ".", "stdout", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ":", "line", "=", "line", ".", "split", "(", "' '", ")", "if", "line", "[", "0", "]", "!=", "'tag'", ":", "continue", "key", "=", "line", "[", "1", "]", "if", "key", "in", "[", "'album'", ",", "'title'", ",", "'artist'", ",", "'albumartist'", "]", "and", "key", "not", "in", "info", ":", "info", "[", "key", "]", "=", "' '", ".", "join", "(", "line", "[", "2", ":", "]", ")", "if", "'albumartist'", "in", "info", ":", "info", "[", "'artist'", "]", "=", "info", "[", "'albumartist'", "]", "del", "info", "[", "'albumartist'", "]", "return", "Song", "(", "*", "*", "info", ")" ]
30.619048
0.001508
[ "def get_current_cmus():\n", " \"\"\"\n", " Get the current song from cmus.\n", " \"\"\"\n", " result = subprocess.run('cmus-remote -Q'.split(' '), check=True,\n", " stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n", " info = {}\n", " for line in result.stdout.decode().split('\\n'):\n", " line = line.split(' ')\n", " if line[0] != 'tag':\n", " continue\n", " key = line[1]\n", " if key in ['album', 'title', 'artist', 'albumartist'] and\\\n", " key not in info:\n", " info[key] = ' '.join(line[2:])\n", "\n", " if 'albumartist' in info:\n", " info['artist'] = info['albumartist']\n", " del info['albumartist']\n", "\n", " return Song(**info)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.043478260869565216 ]
21
0.00207
def build_import_pattern(mapping1, mapping2): u""" mapping1: A dict mapping py3k modules to all possible py2k replacements mapping2: A dict mapping py2k modules to the things they do This builds a HUGE pattern to match all ways that things can be imported """ # py3k: urllib.request, py2k: ('urllib2', 'urllib') yield from_import % (all_modules_subpattern()) for py3k, py2k in mapping1.items(): name, attr = py3k.split(u'.') s_name = simple_name % (name) s_attr = simple_attr % (attr) d_name = dotted_name % (s_name, s_attr) yield name_import % (d_name) yield power_twoname % (s_name, s_attr) if attr == u'__init__': yield name_import % (s_name) yield power_onename % (s_name) yield name_import_rename % (d_name) yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)
[ "def", "build_import_pattern", "(", "mapping1", ",", "mapping2", ")", ":", "# py3k: urllib.request, py2k: ('urllib2', 'urllib')", "yield", "from_import", "%", "(", "all_modules_subpattern", "(", ")", ")", "for", "py3k", ",", "py2k", "in", "mapping1", ".", "items", "(", ")", ":", "name", ",", "attr", "=", "py3k", ".", "split", "(", "u'.'", ")", "s_name", "=", "simple_name", "%", "(", "name", ")", "s_attr", "=", "simple_attr", "%", "(", "attr", ")", "d_name", "=", "dotted_name", "%", "(", "s_name", ",", "s_attr", ")", "yield", "name_import", "%", "(", "d_name", ")", "yield", "power_twoname", "%", "(", "s_name", ",", "s_attr", ")", "if", "attr", "==", "u'__init__'", ":", "yield", "name_import", "%", "(", "s_name", ")", "yield", "power_onename", "%", "(", "s_name", ")", "yield", "name_import_rename", "%", "(", "d_name", ")", "yield", "from_import_rename", "%", "(", "s_name", ",", "s_attr", ",", "s_attr", ",", "s_attr", ",", "s_attr", ")" ]
44.45
0.001101
[ "def build_import_pattern(mapping1, mapping2):\n", " u\"\"\"\n", " mapping1: A dict mapping py3k modules to all possible py2k replacements\n", " mapping2: A dict mapping py2k modules to the things they do\n", " This builds a HUGE pattern to match all ways that things can be imported\n", " \"\"\"\n", " # py3k: urllib.request, py2k: ('urllib2', 'urllib')\n", " yield from_import % (all_modules_subpattern())\n", " for py3k, py2k in mapping1.items():\n", " name, attr = py3k.split(u'.')\n", " s_name = simple_name % (name)\n", " s_attr = simple_attr % (attr)\n", " d_name = dotted_name % (s_name, s_attr)\n", " yield name_import % (d_name)\n", " yield power_twoname % (s_name, s_attr)\n", " if attr == u'__init__':\n", " yield name_import % (s_name)\n", " yield power_onename % (s_name)\n", " yield name_import_rename % (d_name)\n", " yield from_import_rename % (s_name, s_attr, s_attr, s_attr, s_attr)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013333333333333334 ]
20
0.000667
def get_qtls_from_mapqtl_data(matrix, threshold, inputfile): """Extract the QTLs found by MapQTL reading its file. This assume that there is only one QTL per linkage group. :arg matrix, the MapQTL file read in memory :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. :arg inputfile, name of the inputfile in which the QTLs have been found """ trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0] qtls = [] qtl = None for entry in matrix[1:]: if qtl is None: qtl = entry if qtl[1] != entry[1]: if float(qtl[4]) > float(threshold): qtl[0] = trait_name qtls.append(qtl) qtl = entry if entry[4] == '': # pragma: no cover entry[4] = 0 if qtl[4] == '': # pragma: no cover qtl[4] = 0 if float(entry[4]) > float(qtl[4]): qtl = entry if float(qtl[4]) > float(threshold): qtl[0] = trait_name if qtl not in qtls: qtls.append(qtl) return qtls
[ "def", "get_qtls_from_mapqtl_data", "(", "matrix", ",", "threshold", ",", "inputfile", ")", ":", "trait_name", "=", "inputfile", ".", "split", "(", "')_'", ",", "1", ")", "[", "1", "]", ".", "split", "(", "'.mqo'", ")", "[", "0", "]", "qtls", "=", "[", "]", "qtl", "=", "None", "for", "entry", "in", "matrix", "[", "1", ":", "]", ":", "if", "qtl", "is", "None", ":", "qtl", "=", "entry", "if", "qtl", "[", "1", "]", "!=", "entry", "[", "1", "]", ":", "if", "float", "(", "qtl", "[", "4", "]", ")", ">", "float", "(", "threshold", ")", ":", "qtl", "[", "0", "]", "=", "trait_name", "qtls", ".", "append", "(", "qtl", ")", "qtl", "=", "entry", "if", "entry", "[", "4", "]", "==", "''", ":", "# pragma: no cover", "entry", "[", "4", "]", "=", "0", "if", "qtl", "[", "4", "]", "==", "''", ":", "# pragma: no cover", "qtl", "[", "4", "]", "=", "0", "if", "float", "(", "entry", "[", "4", "]", ")", ">", "float", "(", "qtl", "[", "4", "]", ")", ":", "qtl", "=", "entry", "if", "float", "(", "qtl", "[", "4", "]", ")", ">", "float", "(", "threshold", ")", ":", "qtl", "[", "0", "]", "=", "trait_name", "if", "qtl", "not", "in", "qtls", ":", "qtls", ".", "append", "(", "qtl", ")", "return", "qtls" ]
31.285714
0.000886
[ "def get_qtls_from_mapqtl_data(matrix, threshold, inputfile):\n", " \"\"\"Extract the QTLs found by MapQTL reading its file.\n", " This assume that there is only one QTL per linkage group.\n", "\n", " :arg matrix, the MapQTL file read in memory\n", " :arg threshold, threshold used to determine if a given LOD value is\n", " reflective the presence of a QTL.\n", " :arg inputfile, name of the inputfile in which the QTLs have been\n", " found\n", "\n", " \"\"\"\n", " trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0]\n", " qtls = []\n", " qtl = None\n", " for entry in matrix[1:]:\n", " if qtl is None:\n", " qtl = entry\n", " if qtl[1] != entry[1]:\n", " if float(qtl[4]) > float(threshold):\n", " qtl[0] = trait_name\n", " qtls.append(qtl)\n", " qtl = entry\n", " if entry[4] == '': # pragma: no cover\n", " entry[4] = 0\n", " if qtl[4] == '': # pragma: no cover\n", " qtl[4] = 0\n", " if float(entry[4]) > float(qtl[4]):\n", " qtl = entry\n", "\n", " if float(qtl[4]) > float(threshold):\n", " qtl[0] = trait_name\n", " if qtl not in qtls:\n", " qtls.append(qtl)\n", "\n", " return qtls" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667 ]
35
0.001905
def listen(cls, event, func): """Add a callback for a signal against the class""" signal(event).connect(func, sender=cls)
[ "def", "listen", "(", "cls", ",", "event", ",", "func", ")", ":", "signal", "(", "event", ")", ".", "connect", "(", "func", ",", "sender", "=", "cls", ")" ]
45
0.014599
[ "def listen(cls, event, func):\n", " \"\"\"Add a callback for a signal against the class\"\"\"\n", " signal(event).connect(func, sender=cls)" ]
[ 0, 0.016666666666666666, 0.02127659574468085 ]
3
0.012648
def get_hostname(): ''' Determines the current hostname by probing ``uname -n``. Falls back to ``hostname`` in case of problems. |appteardown| if both failed (usually they don't but consider this if you are debugging weird problems..) :returns: The hostname as string. Domain parts will be split off ''' h = shell_run('uname -n', critical=False, verbose=False) if not h: h = shell_run('hostname', critical=False, verbose=False) if not h: shell_notify('could not retrieve hostname', state=True) return str(h.get('out')).split('.')[0]
[ "def", "get_hostname", "(", ")", ":", "h", "=", "shell_run", "(", "'uname -n'", ",", "critical", "=", "False", ",", "verbose", "=", "False", ")", "if", "not", "h", ":", "h", "=", "shell_run", "(", "'hostname'", ",", "critical", "=", "False", ",", "verbose", "=", "False", ")", "if", "not", "h", ":", "shell_notify", "(", "'could not retrieve hostname'", ",", "state", "=", "True", ")", "return", "str", "(", "h", ".", "get", "(", "'out'", ")", ")", ".", "split", "(", "'.'", ")", "[", "0", "]" ]
32.611111
0.001656
[ "def get_hostname():\n", " '''\n", " Determines the current hostname by probing ``uname -n``.\n", " Falls back to ``hostname`` in case of problems.\n", "\n", " |appteardown| if both failed (usually they don't but consider\n", " this if you are debugging weird problems..)\n", "\n", " :returns:\n", " The hostname as string. Domain parts will be split off\n", " '''\n", "\n", " h = shell_run('uname -n', critical=False, verbose=False)\n", " if not h:\n", " h = shell_run('hostname', critical=False, verbose=False)\n", " if not h:\n", " shell_notify('could not retrieve hostname', state=True)\n", " return str(h.get('out')).split('.')[0]" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808 ]
18
0.001323
def modifie(self, key: str, value: Any) -> None: """Store the modification. `value` should be dumped in DB compatible format.""" if key in self.FIELDS_OPTIONS: self.modifie_options(key, value) else: self.modifications[key] = value
[ "def", "modifie", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "if", "key", "in", "self", ".", "FIELDS_OPTIONS", ":", "self", ".", "modifie_options", "(", "key", ",", "value", ")", "else", ":", "self", ".", "modifications", "[", "key", "]", "=", "value" ]
45.5
0.010791
[ "def modifie(self, key: str, value: Any) -> None:\n", " \"\"\"Store the modification. `value` should be dumped in DB compatible format.\"\"\"\n", " if key in self.FIELDS_OPTIONS:\n", " self.modifie_options(key, value)\n", " else:\n", " self.modifications[key] = value" ]
[ 0, 0.022727272727272728, 0, 0, 0, 0.023255813953488372 ]
6
0.007664
def main(): """Main function.""" time_start = time.time() logging.info('loading vocab file from dataset: %s', args.vocab) vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab) tokenizer = BERTTokenizer( vocab=vocab_obj, lower='uncased' in args.vocab) input_files = [] for input_pattern in args.input_file.split(','): input_files.extend(glob.glob(os.path.expanduser(input_pattern))) logging.info('*** Reading from %d input files ***', len(input_files)) for input_file in input_files: logging.info(' %s', input_file) num_outputs = min(args.num_outputs, len(input_files)) output_dir = os.path.expanduser(args.output_dir) if not os.path.exists(output_dir): os.makedirs(output_dir) rng = random.Random(args.random_seed) nworker = args.num_workers # calculate the number of splits file_splits = [] split_size = (len(input_files) + num_outputs - 1) // num_outputs for i in range(num_outputs - 1): file_splits.append(input_files[i*split_size:(i+1)*split_size]) file_splits.append(input_files[(num_outputs-1)*split_size:]) # prepare workload suffix = 'npz' if args.format == 'numpy' else 'rec' count = 0 map_args = [] pool_args = (tokenizer, args.max_seq_length, args.dupe_factor,\ args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, rng) for i, file_split in enumerate(file_splits): out = os.path.join(output_dir, 'part-{}.{}'.format(str(i).zfill(3), suffix)) count += len(file_split) map_args.append((file_split, out) + pool_args) # sanity check assert count == len(input_files) # dispatch to workers if nworker > 1: pool = Pool(nworker) pool.map(create_training_instances, map_args) else: for map_arg in map_args: create_training_instances(map_arg) time_end = time.time() logging.info('Time cost=%.1f', time_end - time_start)
[ "def", "main", "(", ")", ":", "time_start", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "'loading vocab file from dataset: %s'", ",", "args", ".", "vocab", ")", "vocab_obj", "=", "nlp", ".", "data", ".", "utils", ".", "_load_pretrained_vocab", "(", "args", ".", "vocab", ")", "tokenizer", "=", "BERTTokenizer", "(", "vocab", "=", "vocab_obj", ",", "lower", "=", "'uncased'", "in", "args", ".", "vocab", ")", "input_files", "=", "[", "]", "for", "input_pattern", "in", "args", ".", "input_file", ".", "split", "(", "','", ")", ":", "input_files", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "expanduser", "(", "input_pattern", ")", ")", ")", "logging", ".", "info", "(", "'*** Reading from %d input files ***'", ",", "len", "(", "input_files", ")", ")", "for", "input_file", "in", "input_files", ":", "logging", ".", "info", "(", "' %s'", ",", "input_file", ")", "num_outputs", "=", "min", "(", "args", ".", "num_outputs", ",", "len", "(", "input_files", ")", ")", "output_dir", "=", "os", ".", "path", ".", "expanduser", "(", "args", ".", "output_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "rng", "=", "random", ".", "Random", "(", "args", ".", "random_seed", ")", "nworker", "=", "args", ".", "num_workers", "# calculate the number of splits", "file_splits", "=", "[", "]", "split_size", "=", "(", "len", "(", "input_files", ")", "+", "num_outputs", "-", "1", ")", "//", "num_outputs", "for", "i", "in", "range", "(", "num_outputs", "-", "1", ")", ":", "file_splits", ".", "append", "(", "input_files", "[", "i", "*", "split_size", ":", "(", "i", "+", "1", ")", "*", "split_size", "]", ")", "file_splits", ".", "append", "(", "input_files", "[", "(", "num_outputs", "-", "1", ")", "*", "split_size", ":", "]", ")", "# prepare workload", "suffix", "=", "'npz'", "if", "args", ".", "format", "==", "'numpy'", "else", "'rec'", "count", "=", "0", "map_args", "=", "[", "]", "pool_args", "=", "(", "tokenizer", ",", "args", ".", "max_seq_length", ",", "args", ".", "dupe_factor", ",", "args", ".", "short_seq_prob", ",", "args", ".", "masked_lm_prob", ",", "args", ".", "max_predictions_per_seq", ",", "rng", ")", "for", "i", ",", "file_split", "in", "enumerate", "(", "file_splits", ")", ":", "out", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'part-{}.{}'", ".", "format", "(", "str", "(", "i", ")", ".", "zfill", "(", "3", ")", ",", "suffix", ")", ")", "count", "+=", "len", "(", "file_split", ")", "map_args", ".", "append", "(", "(", "file_split", ",", "out", ")", "+", "pool_args", ")", "# sanity check", "assert", "count", "==", "len", "(", "input_files", ")", "# dispatch to workers", "if", "nworker", ">", "1", ":", "pool", "=", "Pool", "(", "nworker", ")", "pool", ".", "map", "(", "create_training_instances", ",", "map_args", ")", "else", ":", "for", "map_arg", "in", "map_args", ":", "create_training_instances", "(", "map_arg", ")", "time_end", "=", "time", ".", "time", "(", ")", "logging", ".", "info", "(", "'Time cost=%.1f'", ",", "time_end", "-", "time_start", ")" ]
34.45614
0.001485
[ "def main():\n", " \"\"\"Main function.\"\"\"\n", " time_start = time.time()\n", " logging.info('loading vocab file from dataset: %s', args.vocab)\n", " vocab_obj = nlp.data.utils._load_pretrained_vocab(args.vocab)\n", " tokenizer = BERTTokenizer(\n", " vocab=vocab_obj, lower='uncased' in args.vocab)\n", "\n", " input_files = []\n", " for input_pattern in args.input_file.split(','):\n", " input_files.extend(glob.glob(os.path.expanduser(input_pattern)))\n", "\n", " logging.info('*** Reading from %d input files ***', len(input_files))\n", " for input_file in input_files:\n", " logging.info(' %s', input_file)\n", "\n", " num_outputs = min(args.num_outputs, len(input_files))\n", "\n", " output_dir = os.path.expanduser(args.output_dir)\n", " if not os.path.exists(output_dir):\n", " os.makedirs(output_dir)\n", "\n", " rng = random.Random(args.random_seed)\n", " nworker = args.num_workers\n", "\n", " # calculate the number of splits\n", " file_splits = []\n", " split_size = (len(input_files) + num_outputs - 1) // num_outputs\n", " for i in range(num_outputs - 1):\n", " file_splits.append(input_files[i*split_size:(i+1)*split_size])\n", " file_splits.append(input_files[(num_outputs-1)*split_size:])\n", "\n", " # prepare workload\n", " suffix = 'npz' if args.format == 'numpy' else 'rec'\n", " count = 0\n", " map_args = []\n", " pool_args = (tokenizer, args.max_seq_length, args.dupe_factor,\\\n", " args.short_seq_prob, args.masked_lm_prob,\n", " args.max_predictions_per_seq, rng)\n", " for i, file_split in enumerate(file_splits):\n", " out = os.path.join(output_dir, 'part-{}.{}'.format(str(i).zfill(3), suffix))\n", " count += len(file_split)\n", " map_args.append((file_split, out) + pool_args)\n", "\n", " # sanity check\n", " assert count == len(input_files)\n", "\n", " # dispatch to workers\n", " if nworker > 1:\n", " pool = Pool(nworker)\n", " pool.map(create_training_instances, map_args)\n", " else:\n", " for map_arg in map_args:\n", " create_training_instances(map_arg)\n", "\n", " time_end = time.time()\n", " logging.info('Time cost=%.1f', time_end - time_start)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806 ]
57
0.000772
def write_namespaces(self, namespaces): """write the module-level namespace-generating callable.""" self.printer.writelines( "def _mako_get_namespace(context, name):", "try:", "return context.namespaces[(__name__, name)]", "except KeyError:", "_mako_generate_namespaces(context)", "return context.namespaces[(__name__, name)]", None, None ) self.printer.writeline("def _mako_generate_namespaces(context):") for node in namespaces.values(): if 'import' in node.attributes: self.compiler.has_ns_imports = True self.printer.start_source(node.lineno) if len(node.nodes): self.printer.writeline("def make_namespace():") export = [] identifiers = self.compiler.identifiers.branch(node) self.in_def = True class NSDefVisitor(object): def visitDefTag(s, node): s.visitDefOrBase(node) def visitBlockTag(s, node): s.visitDefOrBase(node) def visitDefOrBase(s, node): if node.is_anonymous: raise exceptions.CompileException( "Can't put anonymous blocks inside " "<%namespace>", **node.exception_kwargs ) self.write_inline_def(node, identifiers, nested=False) export.append(node.funcname) vis = NSDefVisitor() for n in node.nodes: n.accept_visitor(vis) self.printer.writeline("return [%s]" % (','.join(export))) self.printer.writeline(None) self.in_def = False callable_name = "make_namespace()" else: callable_name = "None" if 'file' in node.parsed_attributes: self.printer.writeline( "ns = runtime.TemplateNamespace(%r," " context._clean_inheritance_tokens()," " templateuri=%s, callables=%s, " " calling_uri=_template_uri)" % ( node.name, node.parsed_attributes.get('file', 'None'), callable_name, ) ) elif 'module' in node.parsed_attributes: self.printer.writeline( "ns = runtime.ModuleNamespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri," " module=%s)" % ( node.name, callable_name, node.parsed_attributes.get( 'module', 'None') ) ) else: self.printer.writeline( "ns = runtime.Namespace(%r," " context._clean_inheritance_tokens()," " callables=%s, calling_uri=_template_uri)" % ( node.name, callable_name, ) ) if eval(node.attributes.get('inheritable', "False")): self.printer.writeline("context['self'].%s = ns" % (node.name)) self.printer.writeline( "context.namespaces[(__name__, %s)] = ns" % repr(node.name)) self.printer.write_blanks(1) if not len(namespaces): self.printer.writeline("pass") self.printer.writeline(None)
[ "def", "write_namespaces", "(", "self", ",", "namespaces", ")", ":", "self", ".", "printer", ".", "writelines", "(", "\"def _mako_get_namespace(context, name):\"", ",", "\"try:\"", ",", "\"return context.namespaces[(__name__, name)]\"", ",", "\"except KeyError:\"", ",", "\"_mako_generate_namespaces(context)\"", ",", "\"return context.namespaces[(__name__, name)]\"", ",", "None", ",", "None", ")", "self", ".", "printer", ".", "writeline", "(", "\"def _mako_generate_namespaces(context):\"", ")", "for", "node", "in", "namespaces", ".", "values", "(", ")", ":", "if", "'import'", "in", "node", ".", "attributes", ":", "self", ".", "compiler", ".", "has_ns_imports", "=", "True", "self", ".", "printer", ".", "start_source", "(", "node", ".", "lineno", ")", "if", "len", "(", "node", ".", "nodes", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"def make_namespace():\"", ")", "export", "=", "[", "]", "identifiers", "=", "self", ".", "compiler", ".", "identifiers", ".", "branch", "(", "node", ")", "self", ".", "in_def", "=", "True", "class", "NSDefVisitor", "(", "object", ")", ":", "def", "visitDefTag", "(", "s", ",", "node", ")", ":", "s", ".", "visitDefOrBase", "(", "node", ")", "def", "visitBlockTag", "(", "s", ",", "node", ")", ":", "s", ".", "visitDefOrBase", "(", "node", ")", "def", "visitDefOrBase", "(", "s", ",", "node", ")", ":", "if", "node", ".", "is_anonymous", ":", "raise", "exceptions", ".", "CompileException", "(", "\"Can't put anonymous blocks inside \"", "\"<%namespace>\"", ",", "*", "*", "node", ".", "exception_kwargs", ")", "self", ".", "write_inline_def", "(", "node", ",", "identifiers", ",", "nested", "=", "False", ")", "export", ".", "append", "(", "node", ".", "funcname", ")", "vis", "=", "NSDefVisitor", "(", ")", "for", "n", "in", "node", ".", "nodes", ":", "n", ".", "accept_visitor", "(", "vis", ")", "self", ".", "printer", ".", "writeline", "(", "\"return [%s]\"", "%", "(", "','", ".", "join", "(", "export", ")", ")", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")", "self", ".", "in_def", "=", "False", "callable_name", "=", "\"make_namespace()\"", "else", ":", "callable_name", "=", "\"None\"", "if", "'file'", "in", "node", ".", "parsed_attributes", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.TemplateNamespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" templateuri=%s, callables=%s, \"", "\" calling_uri=_template_uri)\"", "%", "(", "node", ".", "name", ",", "node", ".", "parsed_attributes", ".", "get", "(", "'file'", ",", "'None'", ")", ",", "callable_name", ",", ")", ")", "elif", "'module'", "in", "node", ".", "parsed_attributes", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.ModuleNamespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" callables=%s, calling_uri=_template_uri,\"", "\" module=%s)\"", "%", "(", "node", ".", "name", ",", "callable_name", ",", "node", ".", "parsed_attributes", ".", "get", "(", "'module'", ",", "'None'", ")", ")", ")", "else", ":", "self", ".", "printer", ".", "writeline", "(", "\"ns = runtime.Namespace(%r,\"", "\" context._clean_inheritance_tokens(),\"", "\" callables=%s, calling_uri=_template_uri)\"", "%", "(", "node", ".", "name", ",", "callable_name", ",", ")", ")", "if", "eval", "(", "node", ".", "attributes", ".", "get", "(", "'inheritable'", ",", "\"False\"", ")", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"context['self'].%s = ns\"", "%", "(", "node", ".", "name", ")", ")", "self", ".", "printer", ".", "writeline", "(", "\"context.namespaces[(__name__, %s)] = ns\"", "%", "repr", "(", "node", ".", "name", ")", ")", "self", ".", "printer", ".", "write_blanks", "(", "1", ")", "if", "not", "len", "(", "namespaces", ")", ":", "self", ".", "printer", ".", "writeline", "(", "\"pass\"", ")", "self", ".", "printer", ".", "writeline", "(", "None", ")" ]
44.946237
0.001404
[ "def write_namespaces(self, namespaces):\n", " \"\"\"write the module-level namespace-generating callable.\"\"\"\n", " self.printer.writelines(\n", " \"def _mako_get_namespace(context, name):\",\n", " \"try:\",\n", " \"return context.namespaces[(__name__, name)]\",\n", " \"except KeyError:\",\n", " \"_mako_generate_namespaces(context)\",\n", " \"return context.namespaces[(__name__, name)]\",\n", " None, None\n", " )\n", " self.printer.writeline(\"def _mako_generate_namespaces(context):\")\n", "\n", "\n", " for node in namespaces.values():\n", " if 'import' in node.attributes:\n", " self.compiler.has_ns_imports = True\n", " self.printer.start_source(node.lineno)\n", " if len(node.nodes):\n", " self.printer.writeline(\"def make_namespace():\")\n", " export = []\n", " identifiers = self.compiler.identifiers.branch(node)\n", " self.in_def = True\n", " class NSDefVisitor(object):\n", " def visitDefTag(s, node):\n", " s.visitDefOrBase(node)\n", "\n", " def visitBlockTag(s, node):\n", " s.visitDefOrBase(node)\n", "\n", " def visitDefOrBase(s, node):\n", " if node.is_anonymous:\n", " raise exceptions.CompileException(\n", " \"Can't put anonymous blocks inside \"\n", " \"<%namespace>\",\n", " **node.exception_kwargs\n", " )\n", " self.write_inline_def(node, identifiers, nested=False)\n", " export.append(node.funcname)\n", " vis = NSDefVisitor()\n", " for n in node.nodes:\n", " n.accept_visitor(vis)\n", " self.printer.writeline(\"return [%s]\" % (','.join(export)))\n", " self.printer.writeline(None)\n", " self.in_def = False\n", " callable_name = \"make_namespace()\"\n", " else:\n", " callable_name = \"None\"\n", "\n", " if 'file' in node.parsed_attributes:\n", " self.printer.writeline(\n", " \"ns = runtime.TemplateNamespace(%r,\"\n", " \" context._clean_inheritance_tokens(),\"\n", " \" templateuri=%s, callables=%s, \"\n", " \" calling_uri=_template_uri)\" %\n", " (\n", " node.name,\n", " node.parsed_attributes.get('file', 'None'),\n", " callable_name,\n", " )\n", " )\n", " elif 'module' in node.parsed_attributes:\n", " self.printer.writeline(\n", " \"ns = runtime.ModuleNamespace(%r,\"\n", " \" context._clean_inheritance_tokens(),\"\n", " \" callables=%s, calling_uri=_template_uri,\"\n", " \" module=%s)\" %\n", " (\n", " node.name,\n", " callable_name,\n", " node.parsed_attributes.get(\n", " 'module', 'None')\n", " )\n", " )\n", " else:\n", " self.printer.writeline(\n", " \"ns = runtime.Namespace(%r,\"\n", " \" context._clean_inheritance_tokens(),\"\n", " \" callables=%s, calling_uri=_template_uri)\" %\n", " (\n", " node.name,\n", " callable_name,\n", " )\n", " )\n", " if eval(node.attributes.get('inheritable', \"False\")):\n", " self.printer.writeline(\"context['self'].%s = ns\" % (node.name))\n", "\n", " self.printer.writeline(\n", " \"context.namespaces[(__name__, %s)] = ns\" % repr(node.name))\n", " self.printer.write_blanks(1)\n", " if not len(namespaces):\n", " self.printer.writeline(\"pass\")\n", " self.printer.writeline(None)" ]
[ 0, 0.014705882352941176, 0, 0, 0.041666666666666664, 0.014925373134328358, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0.022727272727272728, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027777777777777776 ]
93
0.001572
def stem(self, word): """Return the stem of a word according to the Schinke stemmer. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = Schinke() >>> stmr.stem('atque') {'n': 'atque', 'v': 'atque'} >>> stmr.stem('census') {'n': 'cens', 'v': 'censu'} >>> stmr.stem('virum') {'n': 'uir', 'v': 'uiru'} >>> stmr.stem('populusque') {'n': 'popul', 'v': 'populu'} >>> stmr.stem('senatus') {'n': 'senat', 'v': 'senatu'} """ word = normalize('NFKD', text_type(word.lower())) word = ''.join( c for c in word if c in { 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', } ) # Rule 2 word = word.replace('j', 'i').replace('v', 'u') # Rule 3 if word[-3:] == 'que': # This diverges from the paper by also returning 'que' itself # unstemmed if word[:-3] in self._keep_que or word == 'que': return {'n': word, 'v': word} else: word = word[:-3] # Base case will mean returning the words as is noun = word verb = word # Rule 4 for endlen in range(4, 0, -1): if word[-endlen:] in self._n_endings[endlen]: if len(word) - 2 >= endlen: noun = word[:-endlen] else: noun = word break for endlen in range(6, 0, -1): if word[-endlen:] in self._v_endings_strip[endlen]: if len(word) - 2 >= endlen: verb = word[:-endlen] else: verb = word break if word[-endlen:] in self._v_endings_alter[endlen]: if word[-endlen:] in { 'iuntur', 'erunt', 'untur', 'iunt', 'unt', }: new_word = word[:-endlen] + 'i' addlen = 1 elif word[-endlen:] in {'beris', 'bor', 'bo'}: new_word = word[:-endlen] + 'bi' addlen = 2 else: new_word = word[:-endlen] + 'eri' addlen = 3 # Technically this diverges from the paper by considering the # length of the stem without the new suffix if len(new_word) >= 2 + addlen: verb = new_word else: verb = word break return {'n': noun, 'v': verb}
[ "def", "stem", "(", "self", ",", "word", ")", ":", "word", "=", "normalize", "(", "'NFKD'", ",", "text_type", "(", "word", ".", "lower", "(", ")", ")", ")", "word", "=", "''", ".", "join", "(", "c", "for", "c", "in", "word", "if", "c", "in", "{", "'a'", ",", "'b'", ",", "'c'", ",", "'d'", ",", "'e'", ",", "'f'", ",", "'g'", ",", "'h'", ",", "'i'", ",", "'j'", ",", "'k'", ",", "'l'", ",", "'m'", ",", "'n'", ",", "'o'", ",", "'p'", ",", "'q'", ",", "'r'", ",", "'s'", ",", "'t'", ",", "'u'", ",", "'v'", ",", "'w'", ",", "'x'", ",", "'y'", ",", "'z'", ",", "}", ")", "# Rule 2", "word", "=", "word", ".", "replace", "(", "'j'", ",", "'i'", ")", ".", "replace", "(", "'v'", ",", "'u'", ")", "# Rule 3", "if", "word", "[", "-", "3", ":", "]", "==", "'que'", ":", "# This diverges from the paper by also returning 'que' itself", "# unstemmed", "if", "word", "[", ":", "-", "3", "]", "in", "self", ".", "_keep_que", "or", "word", "==", "'que'", ":", "return", "{", "'n'", ":", "word", ",", "'v'", ":", "word", "}", "else", ":", "word", "=", "word", "[", ":", "-", "3", "]", "# Base case will mean returning the words as is", "noun", "=", "word", "verb", "=", "word", "# Rule 4", "for", "endlen", "in", "range", "(", "4", ",", "0", ",", "-", "1", ")", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_n_endings", "[", "endlen", "]", ":", "if", "len", "(", "word", ")", "-", "2", ">=", "endlen", ":", "noun", "=", "word", "[", ":", "-", "endlen", "]", "else", ":", "noun", "=", "word", "break", "for", "endlen", "in", "range", "(", "6", ",", "0", ",", "-", "1", ")", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_v_endings_strip", "[", "endlen", "]", ":", "if", "len", "(", "word", ")", "-", "2", ">=", "endlen", ":", "verb", "=", "word", "[", ":", "-", "endlen", "]", "else", ":", "verb", "=", "word", "break", "if", "word", "[", "-", "endlen", ":", "]", "in", "self", ".", "_v_endings_alter", "[", "endlen", "]", ":", "if", "word", "[", "-", "endlen", ":", "]", "in", "{", "'iuntur'", ",", "'erunt'", ",", "'untur'", ",", "'iunt'", ",", "'unt'", ",", "}", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'i'", "addlen", "=", "1", "elif", "word", "[", "-", "endlen", ":", "]", "in", "{", "'beris'", ",", "'bor'", ",", "'bo'", "}", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'bi'", "addlen", "=", "2", "else", ":", "new_word", "=", "word", "[", ":", "-", "endlen", "]", "+", "'eri'", "addlen", "=", "3", "# Technically this diverges from the paper by considering the", "# length of the stem without the new suffix", "if", "len", "(", "new_word", ")", ">=", "2", "+", "addlen", ":", "verb", "=", "new_word", "else", ":", "verb", "=", "word", "break", "return", "{", "'n'", ":", "noun", ",", "'v'", ":", "verb", "}" ]
26.735537
0.000596
[ "def stem(self, word):\n", " \"\"\"Return the stem of a word according to the Schinke stemmer.\n", "\n", " Parameters\n", " ----------\n", " word : str\n", " The word to stem\n", "\n", " Returns\n", " -------\n", " str\n", " Word stem\n", "\n", " Examples\n", " --------\n", " >>> stmr = Schinke()\n", " >>> stmr.stem('atque')\n", " {'n': 'atque', 'v': 'atque'}\n", " >>> stmr.stem('census')\n", " {'n': 'cens', 'v': 'censu'}\n", " >>> stmr.stem('virum')\n", " {'n': 'uir', 'v': 'uiru'}\n", " >>> stmr.stem('populusque')\n", " {'n': 'popul', 'v': 'populu'}\n", " >>> stmr.stem('senatus')\n", " {'n': 'senat', 'v': 'senatu'}\n", "\n", " \"\"\"\n", " word = normalize('NFKD', text_type(word.lower()))\n", " word = ''.join(\n", " c\n", " for c in word\n", " if c\n", " in {\n", " 'a',\n", " 'b',\n", " 'c',\n", " 'd',\n", " 'e',\n", " 'f',\n", " 'g',\n", " 'h',\n", " 'i',\n", " 'j',\n", " 'k',\n", " 'l',\n", " 'm',\n", " 'n',\n", " 'o',\n", " 'p',\n", " 'q',\n", " 'r',\n", " 's',\n", " 't',\n", " 'u',\n", " 'v',\n", " 'w',\n", " 'x',\n", " 'y',\n", " 'z',\n", " }\n", " )\n", "\n", " # Rule 2\n", " word = word.replace('j', 'i').replace('v', 'u')\n", "\n", " # Rule 3\n", " if word[-3:] == 'que':\n", " # This diverges from the paper by also returning 'que' itself\n", " # unstemmed\n", " if word[:-3] in self._keep_que or word == 'que':\n", " return {'n': word, 'v': word}\n", " else:\n", " word = word[:-3]\n", "\n", " # Base case will mean returning the words as is\n", " noun = word\n", " verb = word\n", "\n", " # Rule 4\n", " for endlen in range(4, 0, -1):\n", " if word[-endlen:] in self._n_endings[endlen]:\n", " if len(word) - 2 >= endlen:\n", " noun = word[:-endlen]\n", " else:\n", " noun = word\n", " break\n", "\n", " for endlen in range(6, 0, -1):\n", " if word[-endlen:] in self._v_endings_strip[endlen]:\n", " if len(word) - 2 >= endlen:\n", " verb = word[:-endlen]\n", " else:\n", " verb = word\n", " break\n", " if word[-endlen:] in self._v_endings_alter[endlen]:\n", " if word[-endlen:] in {\n", " 'iuntur',\n", " 'erunt',\n", " 'untur',\n", " 'iunt',\n", " 'unt',\n", " }:\n", " new_word = word[:-endlen] + 'i'\n", " addlen = 1\n", " elif word[-endlen:] in {'beris', 'bor', 'bo'}:\n", " new_word = word[:-endlen] + 'bi'\n", " addlen = 2\n", " else:\n", " new_word = word[:-endlen] + 'eri'\n", " addlen = 3\n", "\n", " # Technically this diverges from the paper by considering the\n", " # length of the stem without the new suffix\n", " if len(new_word) >= 2 + addlen:\n", " verb = new_word\n", " else:\n", " verb = word\n", " break\n", "\n", " return {'n': noun, 'v': verb}" ]
[ 0, 0.014084507042253521, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703 ]
121
0.00034
def adjacent(predicate, iterable, distance=1): """Return an iterable over `(bool, item)` tuples where the `item` is drawn from *iterable* and the `bool` indicates whether that item satisfies the *predicate* or is adjacent to an item that does. For example, to find whether items are adjacent to a ``3``:: >>> list(adjacent(lambda x: x == 3, range(6))) [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)] Set *distance* to change what counts as adjacent. For example, to find whether items are two places away from a ``3``: >>> list(adjacent(lambda x: x == 3, range(6), distance=2)) [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)] This is useful for contextualizing the results of a search function. For example, a code comparison tool might want to identify lines that have changed, but also surrounding lines to give the viewer of the diff context. The predicate function will only be called once for each item in the iterable. See also :func:`groupby_transform`, which can be used with this function to group ranges of items with the same `bool` value. """ # Allow distance=0 mainly for testing that it reproduces results with map() if distance < 0: raise ValueError('distance must be at least 0') i1, i2 = tee(iterable) padding = [False] * distance selected = chain(padding, map(predicate, i1), padding) adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1)) return zip(adjacent_to_selected, i2)
[ "def", "adjacent", "(", "predicate", ",", "iterable", ",", "distance", "=", "1", ")", ":", "# Allow distance=0 mainly for testing that it reproduces results with map()", "if", "distance", "<", "0", ":", "raise", "ValueError", "(", "'distance must be at least 0'", ")", "i1", ",", "i2", "=", "tee", "(", "iterable", ")", "padding", "=", "[", "False", "]", "*", "distance", "selected", "=", "chain", "(", "padding", ",", "map", "(", "predicate", ",", "i1", ")", ",", "padding", ")", "adjacent_to_selected", "=", "map", "(", "any", ",", "windowed", "(", "selected", ",", "2", "*", "distance", "+", "1", ")", ")", "return", "zip", "(", "adjacent_to_selected", ",", "i2", ")" ]
41.945946
0.00063
[ "def adjacent(predicate, iterable, distance=1):\n", " \"\"\"Return an iterable over `(bool, item)` tuples where the `item` is\n", " drawn from *iterable* and the `bool` indicates whether\n", " that item satisfies the *predicate* or is adjacent to an item that does.\n", "\n", " For example, to find whether items are adjacent to a ``3``::\n", "\n", " >>> list(adjacent(lambda x: x == 3, range(6)))\n", " [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]\n", "\n", " Set *distance* to change what counts as adjacent. For example, to find\n", " whether items are two places away from a ``3``:\n", "\n", " >>> list(adjacent(lambda x: x == 3, range(6), distance=2))\n", " [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]\n", "\n", " This is useful for contextualizing the results of a search function.\n", " For example, a code comparison tool might want to identify lines that\n", " have changed, but also surrounding lines to give the viewer of the diff\n", " context.\n", "\n", " The predicate function will only be called once for each item in the\n", " iterable.\n", "\n", " See also :func:`groupby_transform`, which can be used with this function\n", " to group ranges of items with the same `bool` value.\n", "\n", " \"\"\"\n", " # Allow distance=0 mainly for testing that it reproduces results with map()\n", " if distance < 0:\n", " raise ValueError('distance must be at least 0')\n", "\n", " i1, i2 = tee(iterable)\n", " padding = [False] * distance\n", " selected = chain(padding, map(predicate, i1), padding)\n", " adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))\n", " return zip(adjacent_to_selected, i2)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.025 ]
37
0.000676
def handle(cls, value, **kwargs): """Use a value from the environment or fall back to a default if the environment doesn't contain the variable. Format of value: <env_var>::<default value> For example: Groups: ${default app_security_groups::sg-12345,sg-67890} If `app_security_groups` is defined in the environment, its defined value will be returned. Otherwise, `sg-12345,sg-67890` will be the returned value. This allows defaults to be set at the config file level. """ try: env_var_name, default_val = value.split("::", 1) except ValueError: raise ValueError("Invalid value for default: %s. Must be in " "<env_var>::<default value> format." % value) if env_var_name in kwargs['context'].environment: return kwargs['context'].environment[env_var_name] else: return default_val
[ "def", "handle", "(", "cls", ",", "value", ",", "*", "*", "kwargs", ")", ":", "try", ":", "env_var_name", ",", "default_val", "=", "value", ".", "split", "(", "\"::\"", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Invalid value for default: %s. Must be in \"", "\"<env_var>::<default value> format.\"", "%", "value", ")", "if", "env_var_name", "in", "kwargs", "[", "'context'", "]", ".", "environment", ":", "return", "kwargs", "[", "'context'", "]", ".", "environment", "[", "env_var_name", "]", "else", ":", "return", "default_val" ]
33.310345
0.002012
[ "def handle(cls, value, **kwargs):\n", " \"\"\"Use a value from the environment or fall back to a default if the\n", " environment doesn't contain the variable.\n", "\n", " Format of value:\n", "\n", " <env_var>::<default value>\n", "\n", " For example:\n", "\n", " Groups: ${default app_security_groups::sg-12345,sg-67890}\n", "\n", " If `app_security_groups` is defined in the environment, its defined\n", " value will be returned. Otherwise, `sg-12345,sg-67890` will be the\n", " returned value.\n", "\n", " This allows defaults to be set at the config file level.\n", " \"\"\"\n", "\n", " try:\n", " env_var_name, default_val = value.split(\"::\", 1)\n", " except ValueError:\n", " raise ValueError(\"Invalid value for default: %s. Must be in \"\n", " \"<env_var>::<default value> format.\" % value)\n", "\n", " if env_var_name in kwargs['context'].environment:\n", " return kwargs['context'].environment[env_var_name]\n", " else:\n", " return default_val" ]
[ 0, 0.012987012987012988, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333 ]
29
0.001597
def preScale(self, sx, sy): """Calculate pre scaling and replace current matrix.""" self.a *= sx self.b *= sx self.c *= sy self.d *= sy return self
[ "def", "preScale", "(", "self", ",", "sx", ",", "sy", ")", ":", "self", ".", "a", "*=", "sx", "self", ".", "b", "*=", "sx", "self", ".", "c", "*=", "sy", "self", ".", "d", "*=", "sy", "return", "self" ]
27
0.010256
[ "def preScale(self, sx, sy):\n", " \"\"\"Calculate pre scaling and replace current matrix.\"\"\"\n", " self.a *= sx\n", " self.b *= sx\n", " self.c *= sy\n", " self.d *= sy\n", " return self" ]
[ 0, 0.015625, 0, 0, 0, 0, 0.05263157894736842 ]
7
0.009751
def find_amp_phase(angle, data, npeaks=3, min_amp=None, min_phase=None): """Estimate amplitude and phase of an approximately sinusoidal quantity using `scipy.optimize.curve_fit`. Phase is defined as the angle at which the cosine curve fit reaches its first peak. It is assumed that phase is positive. For example: data_fit = amp*np.cos(npeaks*(angle - phase)) + mean_data Parameters ---------- angle : numpy array Time series of angle values in radians data : numpy array Time series of data to be fit npeaks : int Number of peaks per revolution, or normalized frequency min_phase : float Minimum phase to allow for guess to least squares fit Returns ------- amp : float Amplitude of regressed cosine phase : float Angle of the first peak in radians """ # First subtract the mean of the data data = data - data.mean() # Make some guesses for parameters from a subset of data starting at an # integer multiple of periods if angle[0] != 0.0: angle1 = angle[0] + (2*np.pi/npeaks - (2*np.pi/npeaks) % angle[0]) else: angle1 = angle[0] angle1 += min_phase angle2 = angle1 + 2*np.pi/npeaks ind = np.logical_and(angle >= angle1, angle <= angle2) angle_sub = angle[ind] data_sub = data[ind] amp_guess = (data_sub.max() - data_sub.min())/2 phase_guess = angle[np.where(data_sub == data_sub.max())[0][0]] \ % (np.pi*2/npeaks) # Define the function we will try to fit to def func(angle, amp, phase, mean): return amp*np.cos(npeaks*(angle - phase)) + mean # Calculate fit p0 = amp_guess, phase_guess, 0.0 popt, pcov = curve_fit(func, angle, data, p0=p0) amp, phase, mean = popt return amp, phase
[ "def", "find_amp_phase", "(", "angle", ",", "data", ",", "npeaks", "=", "3", ",", "min_amp", "=", "None", ",", "min_phase", "=", "None", ")", ":", "# First subtract the mean of the data\r", "data", "=", "data", "-", "data", ".", "mean", "(", ")", "# Make some guesses for parameters from a subset of data starting at an\r", "# integer multiple of periods\r", "if", "angle", "[", "0", "]", "!=", "0.0", ":", "angle1", "=", "angle", "[", "0", "]", "+", "(", "2", "*", "np", ".", "pi", "/", "npeaks", "-", "(", "2", "*", "np", ".", "pi", "/", "npeaks", ")", "%", "angle", "[", "0", "]", ")", "else", ":", "angle1", "=", "angle", "[", "0", "]", "angle1", "+=", "min_phase", "angle2", "=", "angle1", "+", "2", "*", "np", ".", "pi", "/", "npeaks", "ind", "=", "np", ".", "logical_and", "(", "angle", ">=", "angle1", ",", "angle", "<=", "angle2", ")", "angle_sub", "=", "angle", "[", "ind", "]", "data_sub", "=", "data", "[", "ind", "]", "amp_guess", "=", "(", "data_sub", ".", "max", "(", ")", "-", "data_sub", ".", "min", "(", ")", ")", "/", "2", "phase_guess", "=", "angle", "[", "np", ".", "where", "(", "data_sub", "==", "data_sub", ".", "max", "(", ")", ")", "[", "0", "]", "[", "0", "]", "]", "%", "(", "np", ".", "pi", "*", "2", "/", "npeaks", ")", "# Define the function we will try to fit to\r", "def", "func", "(", "angle", ",", "amp", ",", "phase", ",", "mean", ")", ":", "return", "amp", "*", "np", ".", "cos", "(", "npeaks", "*", "(", "angle", "-", "phase", ")", ")", "+", "mean", "# Calculate fit\r", "p0", "=", "amp_guess", ",", "phase_guess", ",", "0.0", "popt", ",", "pcov", "=", "curve_fit", "(", "func", ",", "angle", ",", "data", ",", "p0", "=", "p0", ")", "amp", ",", "phase", ",", "mean", "=", "popt", "return", "amp", ",", "phase" ]
35.666667
0.001605
[ "def find_amp_phase(angle, data, npeaks=3, min_amp=None, min_phase=None):\r\n", " \"\"\"Estimate amplitude and phase of an approximately sinusoidal quantity\r\n", " using `scipy.optimize.curve_fit`.\r\n", "\r\n", " Phase is defined as the angle at which the cosine curve fit reaches its\r\n", " first peak. It is assumed that phase is positive. For example:\r\n", "\r\n", " data_fit = amp*np.cos(npeaks*(angle - phase)) + mean_data\r\n", "\r\n", " Parameters\r\n", " ----------\r\n", " angle : numpy array\r\n", " Time series of angle values in radians\r\n", " data : numpy array\r\n", " Time series of data to be fit\r\n", " npeaks : int\r\n", " Number of peaks per revolution, or normalized frequency\r\n", " min_phase : float\r\n", " Minimum phase to allow for guess to least squares fit\r\n", "\r\n", " Returns\r\n", " -------\r\n", " amp : float\r\n", " Amplitude of regressed cosine\r\n", " phase : float\r\n", " Angle of the first peak in radians\r\n", " \"\"\"\r\n", " # First subtract the mean of the data\r\n", " data = data - data.mean()\r\n", " # Make some guesses for parameters from a subset of data starting at an\r\n", " # integer multiple of periods\r\n", " if angle[0] != 0.0:\r\n", " angle1 = angle[0] + (2*np.pi/npeaks - (2*np.pi/npeaks) % angle[0])\r\n", " else:\r\n", " angle1 = angle[0]\r\n", " angle1 += min_phase\r\n", " angle2 = angle1 + 2*np.pi/npeaks\r\n", " ind = np.logical_and(angle >= angle1, angle <= angle2)\r\n", " angle_sub = angle[ind]\r\n", " data_sub = data[ind]\r\n", " amp_guess = (data_sub.max() - data_sub.min())/2\r\n", " phase_guess = angle[np.where(data_sub == data_sub.max())[0][0]] \\\r\n", " % (np.pi*2/npeaks)\r\n", " # Define the function we will try to fit to\r\n", " def func(angle, amp, phase, mean):\r\n", " return amp*np.cos(npeaks*(angle - phase)) + mean\r\n", " # Calculate fit\r\n", " p0 = amp_guess, phase_guess, 0.0\r\n", " popt, pcov = curve_fit(func, angle, data, p0=p0)\r\n", " amp, phase, mean = popt\r\n", " return amp, phase" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0.025, 0, 0, 0, 0, 0, 0.047619047619047616 ]
51
0.002037
def get_ngroups(self, field=None): ''' Returns ngroups count if it was specified in the query, otherwise ValueError. If grouping on more than one field, provide the field argument to specify which count you are looking for. ''' field = field if field else self._determine_group_field(field) if 'ngroups' in self.data['grouped'][field]: return self.data['grouped'][field]['ngroups'] raise ValueError("ngroups not found in response. specify group.ngroups in the query.")
[ "def", "get_ngroups", "(", "self", ",", "field", "=", "None", ")", ":", "field", "=", "field", "if", "field", "else", "self", ".", "_determine_group_field", "(", "field", ")", "if", "'ngroups'", "in", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", ":", "return", "self", ".", "data", "[", "'grouped'", "]", "[", "field", "]", "[", "'ngroups'", "]", "raise", "ValueError", "(", "\"ngroups not found in response. specify group.ngroups in the query.\"", ")" ]
52.8
0.009311
[ "def get_ngroups(self, field=None):\n", " '''\n", " Returns ngroups count if it was specified in the query, otherwise ValueError.\n", "\n", " If grouping on more than one field, provide the field argument to specify which count you are looking for.\n", " '''\n", " field = field if field else self._determine_group_field(field)\n", " if 'ngroups' in self.data['grouped'][field]:\n", " return self.data['grouped'][field]['ngroups']\n", " raise ValueError(\"ngroups not found in response. specify group.ngroups in the query.\")" ]
[ 0, 0.08333333333333333, 0.011627906976744186, 0, 0.008695652173913044, 0, 0, 0, 0, 0.02127659574468085 ]
10
0.012493
def get_next_iteration(self, iteration, iteration_kwargs={}): """ Returns a SH iteration with only evaluations on the biggest budget Parameters ---------- iteration: int the index of the iteration to be instantiated Returns ------- SuccessiveHalving: the SuccessiveHalving iteration with the corresponding number of configurations """ budgets = [self.max_budget] ns = [self.budget_per_iteration//self.max_budget] return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs))
[ "def", "get_next_iteration", "(", "self", ",", "iteration", ",", "iteration_kwargs", "=", "{", "}", ")", ":", "budgets", "=", "[", "self", ".", "max_budget", "]", "ns", "=", "[", "self", ".", "budget_per_iteration", "//", "self", ".", "max_budget", "]", "return", "(", "SuccessiveHalving", "(", "HPB_iter", "=", "iteration", ",", "num_configs", "=", "ns", ",", "budgets", "=", "budgets", ",", "config_sampler", "=", "self", ".", "config_generator", ".", "get_config", ",", "*", "*", "iteration_kwargs", ")", ")" ]
29.4
0.042834
[ "def get_next_iteration(self, iteration, iteration_kwargs={}):\n", "\t\t\"\"\"\n", "\t\tReturns a SH iteration with only evaluations on the biggest budget\n", "\t\t\n", "\t\tParameters\n", "\t\t----------\n", "\t\t\titeration: int\n", "\t\t\t\tthe index of the iteration to be instantiated\n", "\n", "\t\tReturns\n", "\t\t-------\n", "\t\t\tSuccessiveHalving: the SuccessiveHalving iteration with the\n", "\t\t\t\tcorresponding number of configurations\n", "\t\t\"\"\"\n", "\t\t\n", "\t\t\n", "\t\tbudgets = [self.max_budget]\n", "\t\tns = [self.budget_per_iteration//self.max_budget]\n", "\t\t\n", "\t\treturn(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs))" ]
[ 0, 0.3333333333333333, 0.014492753623188406, 0.6666666666666666, 0.07692307692307693, 0.07692307692307693, 0.05555555555555555, 0.02, 0, 0.1, 0.1, 0.015873015873015872, 0.023255813953488372, 0.16666666666666666, 0.6666666666666666, 0.6666666666666666, 0.06666666666666667, 0.019230769230769232, 0.6666666666666666, 0.020134228187919462 ]
20
0.187786
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage): """Internal usage only""" if not is_seq(rgb24): raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple " "or bytearray) as first argument") is_str = is_pure_str(rgb24) if is_str: if not width or not height: raise ValueError("When giving a string as data, you must also " "supply width and height") if np and isinstance(rgb24, np.ndarray): if rgb24.ndim != 3: if not width or not height: raise ValueError("When giving a non 2D numpy array, width and " "height must be supplied") if rgb24.nbytes / 3 != width * height: raise ValueError("numpy array size mismatch") else: if rgb24.itemsize != 1: raise TypeError("Expected numpy array with itemsize == 1") if not rgb24.flags.c_contiguous: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not rgb24.flags.aligned: raise TypeError("Currently, only contiguous, aligned numpy arrays " "are supported") if not is_str and (not width or not height): height = len(rgb24) if height < 1: raise IndexError("Expected sequence with at least one row") row0 = rgb24[0] if not is_seq(row0): raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or " "bytearray) inside a sequence") width = len(row0) if is_pure_str(row0) or type(row0) == bytearray: width /= 3 if format == _ImageFormat.RawImage: self._encode_rgb24(rgb24, width, height) elif format == _ImageFormat.JpegImage: self._encode_jpeg_rgb24(rgb24, width, height, quality)
[ "def", "__EncodedAttribute_generic_encode_rgb24", "(", "self", ",", "rgb24", ",", "width", "=", "0", ",", "height", "=", "0", ",", "quality", "=", "0", ",", "format", "=", "_ImageFormat", ".", "RawImage", ")", ":", "if", "not", "is_seq", "(", "rgb24", ")", ":", "raise", "TypeError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple \"", "\"or bytearray) as first argument\"", ")", "is_str", "=", "is_pure_str", "(", "rgb24", ")", "if", "is_str", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a string as data, you must also \"", "\"supply width and height\"", ")", "if", "np", "and", "isinstance", "(", "rgb24", ",", "np", ".", "ndarray", ")", ":", "if", "rgb24", ".", "ndim", "!=", "3", ":", "if", "not", "width", "or", "not", "height", ":", "raise", "ValueError", "(", "\"When giving a non 2D numpy array, width and \"", "\"height must be supplied\"", ")", "if", "rgb24", ".", "nbytes", "/", "3", "!=", "width", "*", "height", ":", "raise", "ValueError", "(", "\"numpy array size mismatch\"", ")", "else", ":", "if", "rgb24", ".", "itemsize", "!=", "1", ":", "raise", "TypeError", "(", "\"Expected numpy array with itemsize == 1\"", ")", "if", "not", "rgb24", ".", "flags", ".", "c_contiguous", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "rgb24", ".", "flags", ".", "aligned", ":", "raise", "TypeError", "(", "\"Currently, only contiguous, aligned numpy arrays \"", "\"are supported\"", ")", "if", "not", "is_str", "and", "(", "not", "width", "or", "not", "height", ")", ":", "height", "=", "len", "(", "rgb24", ")", "if", "height", "<", "1", ":", "raise", "IndexError", "(", "\"Expected sequence with at least one row\"", ")", "row0", "=", "rgb24", "[", "0", "]", "if", "not", "is_seq", "(", "row0", ")", ":", "raise", "IndexError", "(", "\"Expected sequence (str, numpy.ndarray, list, tuple or \"", "\"bytearray) inside a sequence\"", ")", "width", "=", "len", "(", "row0", ")", "if", "is_pure_str", "(", "row0", ")", "or", "type", "(", "row0", ")", "==", "bytearray", ":", "width", "/=", "3", "if", "format", "==", "_ImageFormat", ".", "RawImage", ":", "self", ".", "_encode_rgb24", "(", "rgb24", ",", "width", ",", "height", ")", "elif", "format", "==", "_ImageFormat", ".", "JpegImage", ":", "self", ".", "_encode_jpeg_rgb24", "(", "rgb24", ",", "width", ",", "height", ",", "quality", ")" ]
43.911111
0.001485
[ "def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage):\n", " \"\"\"Internal usage only\"\"\"\n", " if not is_seq(rgb24):\n", " raise TypeError(\"Expected sequence (str, numpy.ndarray, list, tuple \"\n", " \"or bytearray) as first argument\")\n", "\n", " is_str = is_pure_str(rgb24)\n", " if is_str:\n", " if not width or not height:\n", " raise ValueError(\"When giving a string as data, you must also \"\n", " \"supply width and height\")\n", "\n", " if np and isinstance(rgb24, np.ndarray):\n", " if rgb24.ndim != 3:\n", " if not width or not height:\n", " raise ValueError(\"When giving a non 2D numpy array, width and \"\n", " \"height must be supplied\")\n", " if rgb24.nbytes / 3 != width * height:\n", " raise ValueError(\"numpy array size mismatch\")\n", " else:\n", " if rgb24.itemsize != 1:\n", " raise TypeError(\"Expected numpy array with itemsize == 1\")\n", " if not rgb24.flags.c_contiguous:\n", " raise TypeError(\"Currently, only contiguous, aligned numpy arrays \"\n", " \"are supported\")\n", " if not rgb24.flags.aligned:\n", " raise TypeError(\"Currently, only contiguous, aligned numpy arrays \"\n", " \"are supported\")\n", "\n", " if not is_str and (not width or not height):\n", " height = len(rgb24)\n", " if height < 1:\n", " raise IndexError(\"Expected sequence with at least one row\")\n", "\n", " row0 = rgb24[0]\n", " if not is_seq(row0):\n", " raise IndexError(\"Expected sequence (str, numpy.ndarray, list, tuple or \"\n", " \"bytearray) inside a sequence\")\n", " width = len(row0)\n", " if is_pure_str(row0) or type(row0) == bytearray:\n", " width /= 3\n", " if format == _ImageFormat.RawImage:\n", " self._encode_rgb24(rgb24, width, height)\n", " elif format == _ImageFormat.JpegImage:\n", " self._encode_jpeg_rgb24(rgb24, width, height, quality)" ]
[ 0.00847457627118644, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0, 0, 0, 0.016129032258064516 ]
45
0.000805
def disconnect_handler(remote, *args, **kwargs): """Handle unlinking of remote account. :param remote: The remote application. :returns: The HTML response. """ if not current_user.is_authenticated: return current_app.login_manager.unauthorized() remote_account = RemoteAccount.get(user_id=current_user.get_id(), client_id=remote.consumer_key) external_ids = [i.id for i in current_user.external_identifiers if i.method == GLOBUS_EXTERNAL_METHOD] if external_ids: oauth_unlink_external_id(dict(id=external_ids[0], method=GLOBUS_EXTERNAL_METHOD)) if remote_account: with db.session.begin_nested(): remote_account.delete() return redirect(url_for('invenio_oauthclient_settings.index'))
[ "def", "disconnect_handler", "(", "remote", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "current_user", ".", "is_authenticated", ":", "return", "current_app", ".", "login_manager", ".", "unauthorized", "(", ")", "remote_account", "=", "RemoteAccount", ".", "get", "(", "user_id", "=", "current_user", ".", "get_id", "(", ")", ",", "client_id", "=", "remote", ".", "consumer_key", ")", "external_ids", "=", "[", "i", ".", "id", "for", "i", "in", "current_user", ".", "external_identifiers", "if", "i", ".", "method", "==", "GLOBUS_EXTERNAL_METHOD", "]", "if", "external_ids", ":", "oauth_unlink_external_id", "(", "dict", "(", "id", "=", "external_ids", "[", "0", "]", ",", "method", "=", "GLOBUS_EXTERNAL_METHOD", ")", ")", "if", "remote_account", ":", "with", "db", ".", "session", ".", "begin_nested", "(", ")", ":", "remote_account", ".", "delete", "(", ")", "return", "redirect", "(", "url_for", "(", "'invenio_oauthclient_settings.index'", ")", ")" ]
36.434783
0.001163
[ "def disconnect_handler(remote, *args, **kwargs):\n", " \"\"\"Handle unlinking of remote account.\n", "\n", " :param remote: The remote application.\n", " :returns: The HTML response.\n", " \"\"\"\n", " if not current_user.is_authenticated:\n", " return current_app.login_manager.unauthorized()\n", "\n", " remote_account = RemoteAccount.get(user_id=current_user.get_id(),\n", " client_id=remote.consumer_key)\n", " external_ids = [i.id for i in current_user.external_identifiers\n", " if i.method == GLOBUS_EXTERNAL_METHOD]\n", "\n", " if external_ids:\n", " oauth_unlink_external_id(dict(id=external_ids[0],\n", " method=GLOBUS_EXTERNAL_METHOD))\n", "\n", " if remote_account:\n", " with db.session.begin_nested():\n", " remote_account.delete()\n", "\n", " return redirect(url_for('invenio_oauthclient_settings.index'))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015151515151515152 ]
23
0.000659
def updateMesh(self, polydata): """ Overwrite the polygonal mesh of the actor with a new one. """ self.poly = polydata self.mapper.SetInputData(polydata) self.mapper.Modified() return self
[ "def", "updateMesh", "(", "self", ",", "polydata", ")", ":", "self", ".", "poly", "=", "polydata", "self", ".", "mapper", ".", "SetInputData", "(", "polydata", ")", "self", ".", "mapper", ".", "Modified", "(", ")", "return", "self" ]
29.625
0.008197
[ "def updateMesh(self, polydata):\n", " \"\"\"\n", " Overwrite the polygonal mesh of the actor with a new one.\n", " \"\"\"\n", " self.poly = polydata\n", " self.mapper.SetInputData(polydata)\n", " self.mapper.Modified()\n", " return self" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0.05263157894736842 ]
8
0.016996
def Y_ampl(self, new_y_scale): """Make scaling on Y axis using predefined values""" self.parent.value('y_scale', new_y_scale) self.parent.traces.display()
[ "def", "Y_ampl", "(", "self", ",", "new_y_scale", ")", ":", "self", ".", "parent", ".", "value", "(", "'y_scale'", ",", "new_y_scale", ")", "self", ".", "parent", ".", "traces", ".", "display", "(", ")" ]
43.75
0.011236
[ "def Y_ampl(self, new_y_scale):\n", " \"\"\"Make scaling on Y axis using predefined values\"\"\"\n", " self.parent.value('y_scale', new_y_scale)\n", " self.parent.traces.display()" ]
[ 0, 0.01639344262295082, 0, 0.027777777777777776 ]
4
0.011043
def Print(self): """Prints the hypotheses and their probabilities.""" for hypo, prob in sorted(self.Items()): print(hypo, prob)
[ "def", "Print", "(", "self", ")", ":", "for", "hypo", ",", "prob", "in", "sorted", "(", "self", ".", "Items", "(", ")", ")", ":", "print", "(", "hypo", ",", "prob", ")" ]
38
0.012903
[ "def Print(self):\n", " \"\"\"Prints the hypotheses and their probabilities.\"\"\"\n", " for hypo, prob in sorted(self.Items()):\n", " print(hypo, prob)" ]
[ 0, 0.01639344262295082, 0, 0.034482758620689655 ]
4
0.012719
def processFiles(args): """ Generates and error checks each file's information before the compilation actually starts """ to_process = [] for filename in args['filenames']: file = dict() if args['include']: file['include'] = INCLUDE_STRING + ''.join( ['-I' + item for item in args['include']]) else: file['include'] = INCLUDE_STRING file['file_path'] = getPath(filename) file['file_base_name'] = \ os.path.splitext(os.path.basename(file['file_path']))[0] file['no_extension'], file['extension'] = os.path.splitext( file['file_path']) if file['extension'] not in CYTHONIZABLE_FILE_EXTS: raise CytherError( "The file '{}' is not a designated cython file".format( file['file_path'])) base_path = os.path.dirname(file['file_path']) local_build = args['local'] if not local_build: cache_name = os.path.join(base_path, '__cythercache__') os.makedirs(cache_name, exist_ok=True) file['c_name'] = os.path.join(cache_name, file['file_base_name']) + '.c' else: file['c_name'] = file['no_extension'] + '.c' file['object_file_name'] = os.path.splitext(file['c_name'])[0] + '.o' output_name = args['output_name'] if args['watch']: file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION elif output_name: if os.path.exists(output_name) and os.path.isfile(output_name): file['output_name'] = output_name else: dirname = os.path.dirname(output_name) if not dirname: dirname = os.getcwd() if os.path.exists(dirname): file['output_name'] = output_name else: raise CytherError('The directory specified to write' 'the output file in does not exist') else: file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION file['stamp_if_error'] = 0 to_process.append(file) return to_process
[ "def", "processFiles", "(", "args", ")", ":", "to_process", "=", "[", "]", "for", "filename", "in", "args", "[", "'filenames'", "]", ":", "file", "=", "dict", "(", ")", "if", "args", "[", "'include'", "]", ":", "file", "[", "'include'", "]", "=", "INCLUDE_STRING", "+", "''", ".", "join", "(", "[", "'-I'", "+", "item", "for", "item", "in", "args", "[", "'include'", "]", "]", ")", "else", ":", "file", "[", "'include'", "]", "=", "INCLUDE_STRING", "file", "[", "'file_path'", "]", "=", "getPath", "(", "filename", ")", "file", "[", "'file_base_name'", "]", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "file", "[", "'file_path'", "]", ")", ")", "[", "0", "]", "file", "[", "'no_extension'", "]", ",", "file", "[", "'extension'", "]", "=", "os", ".", "path", ".", "splitext", "(", "file", "[", "'file_path'", "]", ")", "if", "file", "[", "'extension'", "]", "not", "in", "CYTHONIZABLE_FILE_EXTS", ":", "raise", "CytherError", "(", "\"The file '{}' is not a designated cython file\"", ".", "format", "(", "file", "[", "'file_path'", "]", ")", ")", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "file", "[", "'file_path'", "]", ")", "local_build", "=", "args", "[", "'local'", "]", "if", "not", "local_build", ":", "cache_name", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "'__cythercache__'", ")", "os", ".", "makedirs", "(", "cache_name", ",", "exist_ok", "=", "True", ")", "file", "[", "'c_name'", "]", "=", "os", ".", "path", ".", "join", "(", "cache_name", ",", "file", "[", "'file_base_name'", "]", ")", "+", "'.c'", "else", ":", "file", "[", "'c_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "'.c'", "file", "[", "'object_file_name'", "]", "=", "os", ".", "path", ".", "splitext", "(", "file", "[", "'c_name'", "]", ")", "[", "0", "]", "+", "'.o'", "output_name", "=", "args", "[", "'output_name'", "]", "if", "args", "[", "'watch'", "]", ":", "file", "[", "'output_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "DEFAULT_OUTPUT_EXTENSION", "elif", "output_name", ":", "if", "os", ".", "path", ".", "exists", "(", "output_name", ")", "and", "os", ".", "path", ".", "isfile", "(", "output_name", ")", ":", "file", "[", "'output_name'", "]", "=", "output_name", "else", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "output_name", ")", "if", "not", "dirname", ":", "dirname", "=", "os", ".", "getcwd", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "file", "[", "'output_name'", "]", "=", "output_name", "else", ":", "raise", "CytherError", "(", "'The directory specified to write'", "'the output file in does not exist'", ")", "else", ":", "file", "[", "'output_name'", "]", "=", "file", "[", "'no_extension'", "]", "+", "DEFAULT_OUTPUT_EXTENSION", "file", "[", "'stamp_if_error'", "]", "=", "0", "to_process", ".", "append", "(", "file", ")", "return", "to_process" ]
40.472727
0.001316
[ "def processFiles(args):\n", " \"\"\"\n", " Generates and error checks each file's information before the compilation actually starts\n", " \"\"\"\n", " to_process = []\n", "\n", " for filename in args['filenames']:\n", " file = dict()\n", "\n", " if args['include']:\n", " file['include'] = INCLUDE_STRING + ''.join(\n", " ['-I' + item for item in args['include']])\n", " else:\n", " file['include'] = INCLUDE_STRING\n", "\n", " file['file_path'] = getPath(filename)\n", " file['file_base_name'] = \\\n", " os.path.splitext(os.path.basename(file['file_path']))[0]\n", " file['no_extension'], file['extension'] = os.path.splitext(\n", " file['file_path'])\n", " if file['extension'] not in CYTHONIZABLE_FILE_EXTS:\n", " raise CytherError(\n", " \"The file '{}' is not a designated cython file\".format(\n", " file['file_path']))\n", " base_path = os.path.dirname(file['file_path'])\n", " local_build = args['local']\n", " if not local_build:\n", " cache_name = os.path.join(base_path, '__cythercache__')\n", " os.makedirs(cache_name, exist_ok=True)\n", " file['c_name'] = os.path.join(cache_name,\n", " file['file_base_name']) + '.c'\n", " else:\n", " file['c_name'] = file['no_extension'] + '.c'\n", " file['object_file_name'] = os.path.splitext(file['c_name'])[0] + '.o'\n", " output_name = args['output_name']\n", " if args['watch']:\n", " file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION\n", " elif output_name:\n", " if os.path.exists(output_name) and os.path.isfile(output_name):\n", " file['output_name'] = output_name\n", " else:\n", " dirname = os.path.dirname(output_name)\n", " if not dirname:\n", " dirname = os.getcwd()\n", " if os.path.exists(dirname):\n", " file['output_name'] = output_name\n", " else:\n", " raise CytherError('The directory specified to write'\n", " 'the output file in does not exist')\n", " else:\n", " file['output_name'] = file['no_extension']+DEFAULT_OUTPUT_EXTENSION\n", "\n", " file['stamp_if_error'] = 0\n", " to_process.append(file)\n", " return to_process" ]
[ 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015384615384615385, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
55
0.001339
def rollback(self): """Implementation of NAPALM method rollback.""" commands = [] commands.append('configure replace flash:rollback-0') commands.append('write memory') self.device.run_commands(commands)
[ "def", "rollback", "(", "self", ")", ":", "commands", "=", "[", "]", "commands", ".", "append", "(", "'configure replace flash:rollback-0'", ")", "commands", ".", "append", "(", "'write memory'", ")", "self", ".", "device", ".", "run_commands", "(", "commands", ")" ]
39.5
0.008264
[ "def rollback(self):\n", " \"\"\"Implementation of NAPALM method rollback.\"\"\"\n", " commands = []\n", " commands.append('configure replace flash:rollback-0')\n", " commands.append('write memory')\n", " self.device.run_commands(commands)" ]
[ 0, 0.017857142857142856, 0, 0, 0, 0.023809523809523808 ]
6
0.006944
def start(): r"""Starts ec. """ processPendingModules() if not state.main_module_name in ModuleMembers: # don't start the core when main is not Ec-ed return MainModule = sys.modules[state.main_module_name] if not MainModule.__ec_member__.Members: # there was some error while loading script(s) return global BaseGroup BaseGroup = MainModule.__ec_member__ Argv = sys.argv[1:] global mode mode = 'd' if Argv else 's' # dispatch / shell mode if mode == 's': import shell shell.init() else: import dispatch dispatch.init(Argv) processExitHooks()
[ "def", "start", "(", ")", ":", "processPendingModules", "(", ")", "if", "not", "state", ".", "main_module_name", "in", "ModuleMembers", ":", "# don't start the core when main is not Ec-ed\r", "return", "MainModule", "=", "sys", ".", "modules", "[", "state", ".", "main_module_name", "]", "if", "not", "MainModule", ".", "__ec_member__", ".", "Members", ":", "# there was some error while loading script(s)\r", "return", "global", "BaseGroup", "BaseGroup", "=", "MainModule", ".", "__ec_member__", "Argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "global", "mode", "mode", "=", "'d'", "if", "Argv", "else", "'s'", "# dispatch / shell mode\r", "if", "mode", "==", "'s'", ":", "import", "shell", "shell", ".", "init", "(", ")", "else", ":", "import", "dispatch", "dispatch", ".", "init", "(", "Argv", ")", "processExitHooks", "(", ")" ]
20.724138
0.031797
[ "def start():\r\n", " r\"\"\"Starts ec.\r\n", " \"\"\"\r\n", " processPendingModules()\r\n", "\r\n", " if not state.main_module_name in ModuleMembers: # don't start the core when main is not Ec-ed\r\n", " return\r\n", "\r\n", " MainModule = sys.modules[state.main_module_name]\r\n", "\r\n", " if not MainModule.__ec_member__.Members: # there was some error while loading script(s)\r\n", " return\r\n", "\r\n", " global BaseGroup\r\n", " BaseGroup = MainModule.__ec_member__\r\n", "\r\n", " Argv = sys.argv[1:]\r\n", " global mode\r\n", " mode = 'd' if Argv else 's' # dispatch / shell mode\r\n", "\r\n", " if mode == 's':\r\n", " import shell\r\n", " shell.init()\r\n", "\r\n", " else:\r\n", " import dispatch\r\n", " dispatch.init(Argv)\r\n", "\r\n", " processExitHooks()" ]
[ 0, 0.05555555555555555, 0, 0.037037037037037035, 0, 0.041237113402061855, 0, 0, 0.019230769230769232, 0, 0.03296703296703297, 0, 0, 0.05, 0.025, 0, 0.043478260869565216, 0.06666666666666667, 0.03636363636363636, 0, 0.05263157894736842, 0, 0, 0, 0.1111111111111111, 0, 0, 0, 0.1 ]
29
0.023148
def generate_oauth2_headers(self): """Generates header for oauth2 """ encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8')) headers={ 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
[ "def", "generate_oauth2_headers", "(", "self", ")", ":", "encoded_credentials", "=", "base64", ".", "b64encode", "(", "(", "'{0}:{1}'", ".", "format", "(", "self", ".", "consumer_key", ",", "self", ".", "consumer_secret", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", "headers", "=", "{", "'Authorization'", ":", "'Basic {0}'", ".", "format", "(", "encoded_credentials", ".", "decode", "(", "'utf-8'", ")", ")", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", "}", "return", "headers" ]
40
0.017115
[ "def generate_oauth2_headers(self):\n", " \"\"\"Generates header for oauth2\n", " \"\"\"\n", " encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8'))\n", " headers={\n", " 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')),\n", " 'Content-Type': 'application/x-www-form-urlencoded'\n", " }\n", "\n", " return headers" ]
[ 0, 0.02564102564102564, 0, 0.016260162601626018, 0.05555555555555555, 0.023529411764705882, 0, 0, 0, 0.045454545454545456 ]
10
0.016644
def _add_snps( self, snps, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ): """ Add SNPs to this Individual. Parameters ---------- snps : SNPs SNPs to add discrepant_snp_positions_threshold : int see above discrepant_genotypes_threshold : int see above save_output see above Returns ------- discrepant_positions : pandas.DataFrame discrepant_genotypes : pandas.DataFrame """ discrepant_positions = pd.DataFrame() discrepant_genotypes = pd.DataFrame() if snps.snps is None: return discrepant_positions, discrepant_genotypes build = snps.build source = [s.strip() for s in snps.source.split(",")] if not snps.build_detected: print("build not detected, assuming build {}".format(snps.build)) if self._build is None: self._build = build elif self._build != build: print( "build / assembly mismatch between current build of SNPs and SNPs being loaded" ) # ensure there area always two X alleles snps = self._double_single_alleles(snps.snps, "X") if self._snps is None: self._source.extend(source) self._snps = snps else: common_snps = self._snps.join(snps, how="inner", rsuffix="_added") discrepant_positions = common_snps.loc[ (common_snps["chrom"] != common_snps["chrom_added"]) | (common_snps["pos"] != common_snps["pos_added"]) ] if 0 < len(discrepant_positions) < discrepant_snp_positions_threshold: print( str(len(discrepant_positions)) + " SNP positions were discrepant; " "keeping original positions" ) if save_output: self._discrepant_positions_file_count += 1 lineage.save_df_as_csv( discrepant_positions, self._output_dir, self.get_var_name() + "_discrepant_positions_" + str(self._discrepant_positions_file_count) + ".csv", ) elif len(discrepant_positions) >= discrepant_snp_positions_threshold: print( "too many SNPs differ in position; ensure same genome build is being used" ) return discrepant_positions, discrepant_genotypes # remove null genotypes common_snps = common_snps.loc[ ~common_snps["genotype"].isnull() & ~common_snps["genotype_added"].isnull() ] # discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the # same and not swapped) discrepant_genotypes = common_snps.loc[ ( (common_snps["genotype"].str.len() == 1) & (common_snps["genotype_added"].str.len() == 1) & ~( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[0] ) ) | ( (common_snps["genotype"].str.len() == 2) & (common_snps["genotype_added"].str.len() == 2) & ~( ( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[0] ) & ( common_snps["genotype"].str[1] == common_snps["genotype_added"].str[1] ) ) & ~( ( common_snps["genotype"].str[0] == common_snps["genotype_added"].str[1] ) & ( common_snps["genotype"].str[1] == common_snps["genotype_added"].str[0] ) ) ) ] if 0 < len(discrepant_genotypes) < discrepant_genotypes_threshold: print( str(len(discrepant_genotypes)) + " SNP genotypes were discrepant; " "marking those as null" ) if save_output: self._discrepant_genotypes_file_count += 1 lineage.save_df_as_csv( discrepant_genotypes, self._output_dir, self.get_var_name() + "_discrepant_genotypes_" + str(self._discrepant_genotypes_file_count) + ".csv", ) elif len(discrepant_genotypes) >= discrepant_genotypes_threshold: print( "too many SNPs differ in their genotype; ensure file is for same " "individual" ) return discrepant_positions, discrepant_genotypes # add new SNPs self._source.extend(source) self._snps = self._snps.combine_first(snps) self._snps.loc[discrepant_genotypes.index, "genotype"] = np.nan # combine_first converts position to float64, so convert it back to int64 self._snps["pos"] = self._snps["pos"].astype(np.int64) self._snps = sort_snps(self._snps) return discrepant_positions, discrepant_genotypes
[ "def", "_add_snps", "(", "self", ",", "snps", ",", "discrepant_snp_positions_threshold", ",", "discrepant_genotypes_threshold", ",", "save_output", ",", ")", ":", "discrepant_positions", "=", "pd", ".", "DataFrame", "(", ")", "discrepant_genotypes", "=", "pd", ".", "DataFrame", "(", ")", "if", "snps", ".", "snps", "is", "None", ":", "return", "discrepant_positions", ",", "discrepant_genotypes", "build", "=", "snps", ".", "build", "source", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "snps", ".", "source", ".", "split", "(", "\",\"", ")", "]", "if", "not", "snps", ".", "build_detected", ":", "print", "(", "\"build not detected, assuming build {}\"", ".", "format", "(", "snps", ".", "build", ")", ")", "if", "self", ".", "_build", "is", "None", ":", "self", ".", "_build", "=", "build", "elif", "self", ".", "_build", "!=", "build", ":", "print", "(", "\"build / assembly mismatch between current build of SNPs and SNPs being loaded\"", ")", "# ensure there area always two X alleles", "snps", "=", "self", ".", "_double_single_alleles", "(", "snps", ".", "snps", ",", "\"X\"", ")", "if", "self", ".", "_snps", "is", "None", ":", "self", ".", "_source", ".", "extend", "(", "source", ")", "self", ".", "_snps", "=", "snps", "else", ":", "common_snps", "=", "self", ".", "_snps", ".", "join", "(", "snps", ",", "how", "=", "\"inner\"", ",", "rsuffix", "=", "\"_added\"", ")", "discrepant_positions", "=", "common_snps", ".", "loc", "[", "(", "common_snps", "[", "\"chrom\"", "]", "!=", "common_snps", "[", "\"chrom_added\"", "]", ")", "|", "(", "common_snps", "[", "\"pos\"", "]", "!=", "common_snps", "[", "\"pos_added\"", "]", ")", "]", "if", "0", "<", "len", "(", "discrepant_positions", ")", "<", "discrepant_snp_positions_threshold", ":", "print", "(", "str", "(", "len", "(", "discrepant_positions", ")", ")", "+", "\" SNP positions were discrepant; \"", "\"keeping original positions\"", ")", "if", "save_output", ":", "self", ".", "_discrepant_positions_file_count", "+=", "1", "lineage", ".", "save_df_as_csv", "(", "discrepant_positions", ",", "self", ".", "_output_dir", ",", "self", ".", "get_var_name", "(", ")", "+", "\"_discrepant_positions_\"", "+", "str", "(", "self", ".", "_discrepant_positions_file_count", ")", "+", "\".csv\"", ",", ")", "elif", "len", "(", "discrepant_positions", ")", ">=", "discrepant_snp_positions_threshold", ":", "print", "(", "\"too many SNPs differ in position; ensure same genome build is being used\"", ")", "return", "discrepant_positions", ",", "discrepant_genotypes", "# remove null genotypes", "common_snps", "=", "common_snps", ".", "loc", "[", "~", "common_snps", "[", "\"genotype\"", "]", ".", "isnull", "(", ")", "&", "~", "common_snps", "[", "\"genotype_added\"", "]", ".", "isnull", "(", ")", "]", "# discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the", "# same and not swapped)", "discrepant_genotypes", "=", "common_snps", ".", "loc", "[", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", ".", "len", "(", ")", "==", "1", ")", "&", "(", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", ".", "len", "(", ")", "==", "1", ")", "&", "~", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", ")", "|", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", ".", "len", "(", ")", "==", "2", ")", "&", "(", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", ".", "len", "(", ")", "==", "2", ")", "&", "~", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", "&", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "1", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "1", "]", ")", ")", "&", "~", "(", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "1", "]", ")", "&", "(", "common_snps", "[", "\"genotype\"", "]", ".", "str", "[", "1", "]", "==", "common_snps", "[", "\"genotype_added\"", "]", ".", "str", "[", "0", "]", ")", ")", ")", "]", "if", "0", "<", "len", "(", "discrepant_genotypes", ")", "<", "discrepant_genotypes_threshold", ":", "print", "(", "str", "(", "len", "(", "discrepant_genotypes", ")", ")", "+", "\" SNP genotypes were discrepant; \"", "\"marking those as null\"", ")", "if", "save_output", ":", "self", ".", "_discrepant_genotypes_file_count", "+=", "1", "lineage", ".", "save_df_as_csv", "(", "discrepant_genotypes", ",", "self", ".", "_output_dir", ",", "self", ".", "get_var_name", "(", ")", "+", "\"_discrepant_genotypes_\"", "+", "str", "(", "self", ".", "_discrepant_genotypes_file_count", ")", "+", "\".csv\"", ",", ")", "elif", "len", "(", "discrepant_genotypes", ")", ">=", "discrepant_genotypes_threshold", ":", "print", "(", "\"too many SNPs differ in their genotype; ensure file is for same \"", "\"individual\"", ")", "return", "discrepant_positions", ",", "discrepant_genotypes", "# add new SNPs", "self", ".", "_source", ".", "extend", "(", "source", ")", "self", ".", "_snps", "=", "self", ".", "_snps", ".", "combine_first", "(", "snps", ")", "self", ".", "_snps", ".", "loc", "[", "discrepant_genotypes", ".", "index", ",", "\"genotype\"", "]", "=", "np", ".", "nan", "# combine_first converts position to float64, so convert it back to int64", "self", ".", "_snps", "[", "\"pos\"", "]", "=", "self", ".", "_snps", "[", "\"pos\"", "]", ".", "astype", "(", "np", ".", "int64", ")", "self", ".", "_snps", "=", "sort_snps", "(", "self", ".", "_snps", ")", "return", "discrepant_positions", ",", "discrepant_genotypes" ]
36.579618
0.002034
[ "def _add_snps(\n", " self,\n", " snps,\n", " discrepant_snp_positions_threshold,\n", " discrepant_genotypes_threshold,\n", " save_output,\n", " ):\n", " \"\"\" Add SNPs to this Individual.\n", "\n", " Parameters\n", " ----------\n", " snps : SNPs\n", " SNPs to add\n", " discrepant_snp_positions_threshold : int\n", " see above\n", " discrepant_genotypes_threshold : int\n", " see above\n", " save_output\n", " see above\n", "\n", " Returns\n", " -------\n", " discrepant_positions : pandas.DataFrame\n", " discrepant_genotypes : pandas.DataFrame\n", " \"\"\"\n", " discrepant_positions = pd.DataFrame()\n", " discrepant_genotypes = pd.DataFrame()\n", "\n", " if snps.snps is None:\n", " return discrepant_positions, discrepant_genotypes\n", "\n", " build = snps.build\n", " source = [s.strip() for s in snps.source.split(\",\")]\n", "\n", " if not snps.build_detected:\n", " print(\"build not detected, assuming build {}\".format(snps.build))\n", "\n", " if self._build is None:\n", " self._build = build\n", " elif self._build != build:\n", " print(\n", " \"build / assembly mismatch between current build of SNPs and SNPs being loaded\"\n", " )\n", "\n", " # ensure there area always two X alleles\n", " snps = self._double_single_alleles(snps.snps, \"X\")\n", "\n", " if self._snps is None:\n", " self._source.extend(source)\n", " self._snps = snps\n", " else:\n", " common_snps = self._snps.join(snps, how=\"inner\", rsuffix=\"_added\")\n", "\n", " discrepant_positions = common_snps.loc[\n", " (common_snps[\"chrom\"] != common_snps[\"chrom_added\"])\n", " | (common_snps[\"pos\"] != common_snps[\"pos_added\"])\n", " ]\n", "\n", " if 0 < len(discrepant_positions) < discrepant_snp_positions_threshold:\n", " print(\n", " str(len(discrepant_positions)) + \" SNP positions were discrepant; \"\n", " \"keeping original positions\"\n", " )\n", "\n", " if save_output:\n", " self._discrepant_positions_file_count += 1\n", " lineage.save_df_as_csv(\n", " discrepant_positions,\n", " self._output_dir,\n", " self.get_var_name()\n", " + \"_discrepant_positions_\"\n", " + str(self._discrepant_positions_file_count)\n", " + \".csv\",\n", " )\n", " elif len(discrepant_positions) >= discrepant_snp_positions_threshold:\n", " print(\n", " \"too many SNPs differ in position; ensure same genome build is being used\"\n", " )\n", " return discrepant_positions, discrepant_genotypes\n", "\n", " # remove null genotypes\n", " common_snps = common_snps.loc[\n", " ~common_snps[\"genotype\"].isnull()\n", " & ~common_snps[\"genotype_added\"].isnull()\n", " ]\n", "\n", " # discrepant genotypes are where alleles are not equivalent (i.e., alleles are not the\n", " # same and not swapped)\n", " discrepant_genotypes = common_snps.loc[\n", " (\n", " (common_snps[\"genotype\"].str.len() == 1)\n", " & (common_snps[\"genotype_added\"].str.len() == 1)\n", " & ~(\n", " common_snps[\"genotype\"].str[0]\n", " == common_snps[\"genotype_added\"].str[0]\n", " )\n", " )\n", " | (\n", " (common_snps[\"genotype\"].str.len() == 2)\n", " & (common_snps[\"genotype_added\"].str.len() == 2)\n", " & ~(\n", " (\n", " common_snps[\"genotype\"].str[0]\n", " == common_snps[\"genotype_added\"].str[0]\n", " )\n", " & (\n", " common_snps[\"genotype\"].str[1]\n", " == common_snps[\"genotype_added\"].str[1]\n", " )\n", " )\n", " & ~(\n", " (\n", " common_snps[\"genotype\"].str[0]\n", " == common_snps[\"genotype_added\"].str[1]\n", " )\n", " & (\n", " common_snps[\"genotype\"].str[1]\n", " == common_snps[\"genotype_added\"].str[0]\n", " )\n", " )\n", " )\n", " ]\n", "\n", " if 0 < len(discrepant_genotypes) < discrepant_genotypes_threshold:\n", " print(\n", " str(len(discrepant_genotypes)) + \" SNP genotypes were discrepant; \"\n", " \"marking those as null\"\n", " )\n", "\n", " if save_output:\n", " self._discrepant_genotypes_file_count += 1\n", " lineage.save_df_as_csv(\n", " discrepant_genotypes,\n", " self._output_dir,\n", " self.get_var_name()\n", " + \"_discrepant_genotypes_\"\n", " + str(self._discrepant_genotypes_file_count)\n", " + \".csv\",\n", " )\n", " elif len(discrepant_genotypes) >= discrepant_genotypes_threshold:\n", " print(\n", " \"too many SNPs differ in their genotype; ensure file is for same \"\n", " \"individual\"\n", " )\n", " return discrepant_positions, discrepant_genotypes\n", "\n", " # add new SNPs\n", " self._source.extend(source)\n", " self._snps = self._snps.combine_first(snps)\n", " self._snps.loc[discrepant_genotypes.index, \"genotype\"] = np.nan\n", "\n", " # combine_first converts position to float64, so convert it back to int64\n", " self._snps[\"pos\"] = self._snps[\"pos\"].astype(np.int64)\n", "\n", " self._snps = sort_snps(self._snps)\n", "\n", " return discrepant_positions, discrepant_genotypes" ]
[ 0, 0, 0, 0, 0, 0, 0.14285714285714285, 0.024390243902439025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0.010526315789473684, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011627906976744186, 0, 0, 0, 0, 0.017543859649122806 ]
157
0.001821
def get_points(orig, dest, taillen): """Return a pair of lists of points for use making an arrow. The first list is the beginning and end point of the trunk of the arrow. The second list is the arrowhead. """ # Adjust the start and end points so they're on the first non-transparent pixel. # y = slope(x-ox) + oy # x = (y - oy) / slope + ox ox, oy = orig.center ow, oh = orig.size dx, dy = dest.center dw, dh = dest.size if ox < dx: leftx = ox rightx = dx xco = 1 elif ox > dx: leftx = ox * -1 rightx = dx * -1 xco = -1 else: # straight up and down arrow return up_and_down(orig, dest, taillen) if oy < dy: boty = oy topy = dy yco = 1 elif oy > dy: boty = oy * -1 topy = dy * -1 yco = -1 else: # straight left and right arrow return left_and_right(orig, dest, taillen) slope = (topy - boty) / (rightx - leftx) # start from the earliest point that intersects the bounding box. # work toward the center to find a non-transparent pixel # y - boty = ((topy-boty)/(rightx-leftx))*(x - leftx) if slope <= 1: for rightx in range( int(rightx - dw / 2), int(rightx)+1 ): topy = slope * (rightx - leftx) + boty if dest.collide_point(rightx * xco, topy * yco): rightx = float(rightx - 1) for pip in range(10): rightx += 0.1 * pip topy = slope * (rightx - leftx) + boty if dest.collide_point(rightx * xco, topy * yco): break break for leftx in range( int(leftx + ow / 2), int(leftx)-1, -1 ): boty = slope * (leftx - rightx) + topy if orig.collide_point(leftx * xco, boty * yco): leftx = float(leftx + 1) for pip in range(10): leftx -= 0.1 * pip boty = slope * (leftx - rightx) + topy if orig.collide_point(leftx * xco, boty * yco): break break else: # x = leftx + ((rightx-leftx)(y - boty))/(topy-boty) for topy in range( int(topy - dh / 2), int(topy) + 1 ): rightx = leftx + (topy - boty) / slope if dest.collide_point(rightx * xco, topy * yco): topy = float(topy - 1) for pip in range(10): topy += 0.1 * pip rightx = leftx + (topy - boty) / slope if dest.collide_point(rightx * xco, topy * yco): break break for boty in range( int(boty + oh / 2), int(boty) - 1, -1 ): leftx = (boty - topy) / slope + rightx if orig.collide_point(leftx * xco, boty * yco): boty = float(boty + 1) for pip in range(10): boty -= 0.1 * pip leftx = (boty - topy) / slope + rightx if orig.collide_point(leftx * xco, boty * yco): break break rise = topy - boty run = rightx - leftx try: start_theta = atan(rise/run) except ZeroDivisionError: return up_and_down(orig, dest, taillen) try: end_theta = atan(run/rise) except ZeroDivisionError: return left_and_right(orig, dest, taillen) # make the little wedge at the end so you can tell which way the # arrow's pointing, and flip it all back around to the way it was top_theta = start_theta - fortyfive bot_theta = pi - fortyfive - end_theta xoff1 = cos(top_theta) * taillen yoff1 = sin(top_theta) * taillen xoff2 = cos(bot_theta) * taillen yoff2 = sin(bot_theta) * taillen x1 = (rightx - xoff1) * xco x2 = (rightx - xoff2) * xco y1 = (topy - yoff1) * yco y2 = (topy - yoff2) * yco startx = leftx * xco starty = boty * yco endx = rightx * xco endy = topy * yco return ( [startx, starty, endx, endy], [x1, y1, endx, endy, x2, y2] )
[ "def", "get_points", "(", "orig", ",", "dest", ",", "taillen", ")", ":", "# Adjust the start and end points so they're on the first non-transparent pixel.", "# y = slope(x-ox) + oy", "# x = (y - oy) / slope + ox", "ox", ",", "oy", "=", "orig", ".", "center", "ow", ",", "oh", "=", "orig", ".", "size", "dx", ",", "dy", "=", "dest", ".", "center", "dw", ",", "dh", "=", "dest", ".", "size", "if", "ox", "<", "dx", ":", "leftx", "=", "ox", "rightx", "=", "dx", "xco", "=", "1", "elif", "ox", ">", "dx", ":", "leftx", "=", "ox", "*", "-", "1", "rightx", "=", "dx", "*", "-", "1", "xco", "=", "-", "1", "else", ":", "# straight up and down arrow", "return", "up_and_down", "(", "orig", ",", "dest", ",", "taillen", ")", "if", "oy", "<", "dy", ":", "boty", "=", "oy", "topy", "=", "dy", "yco", "=", "1", "elif", "oy", ">", "dy", ":", "boty", "=", "oy", "*", "-", "1", "topy", "=", "dy", "*", "-", "1", "yco", "=", "-", "1", "else", ":", "# straight left and right arrow", "return", "left_and_right", "(", "orig", ",", "dest", ",", "taillen", ")", "slope", "=", "(", "topy", "-", "boty", ")", "/", "(", "rightx", "-", "leftx", ")", "# start from the earliest point that intersects the bounding box.", "# work toward the center to find a non-transparent pixel", "# y - boty = ((topy-boty)/(rightx-leftx))*(x - leftx)", "if", "slope", "<=", "1", ":", "for", "rightx", "in", "range", "(", "int", "(", "rightx", "-", "dw", "/", "2", ")", ",", "int", "(", "rightx", ")", "+", "1", ")", ":", "topy", "=", "slope", "*", "(", "rightx", "-", "leftx", ")", "+", "boty", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "rightx", "=", "float", "(", "rightx", "-", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "rightx", "+=", "0.1", "*", "pip", "topy", "=", "slope", "*", "(", "rightx", "-", "leftx", ")", "+", "boty", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "break", "break", "for", "leftx", "in", "range", "(", "int", "(", "leftx", "+", "ow", "/", "2", ")", ",", "int", "(", "leftx", ")", "-", "1", ",", "-", "1", ")", ":", "boty", "=", "slope", "*", "(", "leftx", "-", "rightx", ")", "+", "topy", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "leftx", "=", "float", "(", "leftx", "+", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "leftx", "-=", "0.1", "*", "pip", "boty", "=", "slope", "*", "(", "leftx", "-", "rightx", ")", "+", "topy", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "break", "break", "else", ":", "# x = leftx + ((rightx-leftx)(y - boty))/(topy-boty)", "for", "topy", "in", "range", "(", "int", "(", "topy", "-", "dh", "/", "2", ")", ",", "int", "(", "topy", ")", "+", "1", ")", ":", "rightx", "=", "leftx", "+", "(", "topy", "-", "boty", ")", "/", "slope", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "topy", "=", "float", "(", "topy", "-", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "topy", "+=", "0.1", "*", "pip", "rightx", "=", "leftx", "+", "(", "topy", "-", "boty", ")", "/", "slope", "if", "dest", ".", "collide_point", "(", "rightx", "*", "xco", ",", "topy", "*", "yco", ")", ":", "break", "break", "for", "boty", "in", "range", "(", "int", "(", "boty", "+", "oh", "/", "2", ")", ",", "int", "(", "boty", ")", "-", "1", ",", "-", "1", ")", ":", "leftx", "=", "(", "boty", "-", "topy", ")", "/", "slope", "+", "rightx", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "boty", "=", "float", "(", "boty", "+", "1", ")", "for", "pip", "in", "range", "(", "10", ")", ":", "boty", "-=", "0.1", "*", "pip", "leftx", "=", "(", "boty", "-", "topy", ")", "/", "slope", "+", "rightx", "if", "orig", ".", "collide_point", "(", "leftx", "*", "xco", ",", "boty", "*", "yco", ")", ":", "break", "break", "rise", "=", "topy", "-", "boty", "run", "=", "rightx", "-", "leftx", "try", ":", "start_theta", "=", "atan", "(", "rise", "/", "run", ")", "except", "ZeroDivisionError", ":", "return", "up_and_down", "(", "orig", ",", "dest", ",", "taillen", ")", "try", ":", "end_theta", "=", "atan", "(", "run", "/", "rise", ")", "except", "ZeroDivisionError", ":", "return", "left_and_right", "(", "orig", ",", "dest", ",", "taillen", ")", "# make the little wedge at the end so you can tell which way the", "# arrow's pointing, and flip it all back around to the way it was", "top_theta", "=", "start_theta", "-", "fortyfive", "bot_theta", "=", "pi", "-", "fortyfive", "-", "end_theta", "xoff1", "=", "cos", "(", "top_theta", ")", "*", "taillen", "yoff1", "=", "sin", "(", "top_theta", ")", "*", "taillen", "xoff2", "=", "cos", "(", "bot_theta", ")", "*", "taillen", "yoff2", "=", "sin", "(", "bot_theta", ")", "*", "taillen", "x1", "=", "(", "rightx", "-", "xoff1", ")", "*", "xco", "x2", "=", "(", "rightx", "-", "xoff2", ")", "*", "xco", "y1", "=", "(", "topy", "-", "yoff1", ")", "*", "yco", "y2", "=", "(", "topy", "-", "yoff2", ")", "*", "yco", "startx", "=", "leftx", "*", "xco", "starty", "=", "boty", "*", "yco", "endx", "=", "rightx", "*", "xco", "endy", "=", "topy", "*", "yco", "return", "(", "[", "startx", ",", "starty", ",", "endx", ",", "endy", "]", ",", "[", "x1", ",", "y1", ",", "endx", ",", "endy", ",", "x2", ",", "y2", "]", ")" ]
32.198473
0.00046
[ "def get_points(orig, dest, taillen):\n", " \"\"\"Return a pair of lists of points for use making an arrow.\n", "\n", " The first list is the beginning and end point of the trunk of the arrow.\n", "\n", " The second list is the arrowhead.\n", "\n", " \"\"\"\n", " # Adjust the start and end points so they're on the first non-transparent pixel.\n", " # y = slope(x-ox) + oy\n", " # x = (y - oy) / slope + ox\n", " ox, oy = orig.center\n", " ow, oh = orig.size\n", " dx, dy = dest.center\n", " dw, dh = dest.size\n", " if ox < dx:\n", " leftx = ox\n", " rightx = dx\n", " xco = 1\n", " elif ox > dx:\n", " leftx = ox * -1\n", " rightx = dx * -1\n", " xco = -1\n", " else:\n", " # straight up and down arrow\n", " return up_and_down(orig, dest, taillen)\n", " if oy < dy:\n", " boty = oy\n", " topy = dy\n", " yco = 1\n", " elif oy > dy:\n", " boty = oy * -1\n", " topy = dy * -1\n", " yco = -1\n", " else:\n", " # straight left and right arrow\n", " return left_and_right(orig, dest, taillen)\n", " slope = (topy - boty) / (rightx - leftx)\n", " # start from the earliest point that intersects the bounding box.\n", " # work toward the center to find a non-transparent pixel\n", " # y - boty = ((topy-boty)/(rightx-leftx))*(x - leftx)\n", " if slope <= 1:\n", " for rightx in range(\n", " int(rightx - dw / 2),\n", " int(rightx)+1\n", " ):\n", " topy = slope * (rightx - leftx) + boty\n", " if dest.collide_point(rightx * xco, topy * yco):\n", " rightx = float(rightx - 1)\n", " for pip in range(10):\n", " rightx += 0.1 * pip\n", " topy = slope * (rightx - leftx) + boty\n", " if dest.collide_point(rightx * xco, topy * yco):\n", " break\n", " break\n", " for leftx in range(\n", " int(leftx + ow / 2),\n", " int(leftx)-1,\n", " -1\n", " ):\n", " boty = slope * (leftx - rightx) + topy\n", " if orig.collide_point(leftx * xco, boty * yco):\n", " leftx = float(leftx + 1)\n", " for pip in range(10):\n", " leftx -= 0.1 * pip\n", " boty = slope * (leftx - rightx) + topy\n", " if orig.collide_point(leftx * xco, boty * yco):\n", " break\n", " break\n", " else:\n", " # x = leftx + ((rightx-leftx)(y - boty))/(topy-boty)\n", " for topy in range(\n", " int(topy - dh / 2),\n", " int(topy) + 1\n", " ):\n", " rightx = leftx + (topy - boty) / slope\n", " if dest.collide_point(rightx * xco, topy * yco):\n", " topy = float(topy - 1)\n", " for pip in range(10):\n", " topy += 0.1 * pip\n", " rightx = leftx + (topy - boty) / slope\n", " if dest.collide_point(rightx * xco, topy * yco):\n", " break\n", " break\n", " for boty in range(\n", " int(boty + oh / 2),\n", " int(boty) - 1,\n", " -1\n", " ):\n", " leftx = (boty - topy) / slope + rightx\n", " if orig.collide_point(leftx * xco, boty * yco):\n", " boty = float(boty + 1)\n", " for pip in range(10):\n", " boty -= 0.1 * pip\n", " leftx = (boty - topy) / slope + rightx\n", " if orig.collide_point(leftx * xco, boty * yco):\n", " break\n", " break\n", "\n", " rise = topy - boty\n", " run = rightx - leftx\n", "\n", " try:\n", " start_theta = atan(rise/run)\n", " except ZeroDivisionError:\n", " return up_and_down(orig, dest, taillen)\n", " try:\n", " end_theta = atan(run/rise)\n", " except ZeroDivisionError:\n", " return left_and_right(orig, dest, taillen)\n", "\n", " # make the little wedge at the end so you can tell which way the\n", " # arrow's pointing, and flip it all back around to the way it was\n", " top_theta = start_theta - fortyfive\n", " bot_theta = pi - fortyfive - end_theta\n", " xoff1 = cos(top_theta) * taillen\n", " yoff1 = sin(top_theta) * taillen\n", " xoff2 = cos(bot_theta) * taillen\n", " yoff2 = sin(bot_theta) * taillen\n", " x1 = (rightx - xoff1) * xco\n", " x2 = (rightx - xoff2) * xco\n", " y1 = (topy - yoff1) * yco\n", " y2 = (topy - yoff2) * yco\n", " startx = leftx * xco\n", " starty = boty * yco\n", " endx = rightx * xco\n", " endy = topy * yco\n", " return (\n", " [startx, starty, endx, endy],\n", " [x1, y1, endx, endy, x2, y2]\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0.011764705882352941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2 ]
131
0.001617
def depth_atleast(list_, depth): r""" Returns if depth of list is at least ``depth`` Args: list_ (list): depth (int): Returns: bool: True CommandLine: python -m utool.util_dict --exec-depth_atleast --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> list_ = [[[[0]]], [[0]]] >>> depth = 0 >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)] >>> print(result) """ if depth == 0: return True else: try: return all([depth_atleast(item, depth - 1) for item in list_]) except TypeError: return False
[ "def", "depth_atleast", "(", "list_", ",", "depth", ")", ":", "if", "depth", "==", "0", ":", "return", "True", "else", ":", "try", ":", "return", "all", "(", "[", "depth_atleast", "(", "item", ",", "depth", "-", "1", ")", "for", "item", "in", "list_", "]", ")", "except", "TypeError", ":", "return", "False" ]
23.733333
0.00135
[ "def depth_atleast(list_, depth):\n", " r\"\"\"\n", " Returns if depth of list is at least ``depth``\n", "\n", " Args:\n", " list_ (list):\n", " depth (int):\n", "\n", " Returns:\n", " bool: True\n", "\n", " CommandLine:\n", " python -m utool.util_dict --exec-depth_atleast --show\n", "\n", " Example:\n", " >>> # DISABLE_DOCTEST\n", " >>> from utool.util_dict import * # NOQA\n", " >>> import utool as ut\n", " >>> list_ = [[[[0]]], [[0]]]\n", " >>> depth = 0\n", " >>> result = [depth_atleast(list_, depth) for depth in range(0, 7)]\n", " >>> print(result)\n", " \"\"\"\n", " if depth == 0:\n", " return True\n", " else:\n", " try:\n", " return all([depth_atleast(item, depth - 1) for item in list_])\n", " except TypeError:\n", " return False" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664 ]
30
0.001389
def nsum0(lx): """ Accepts log-values as input, exponentiates them, sums down the rows (first dimension), normalizes and returns the result. Handles underflow by rescaling so that the largest values is exactly 1.0. """ lx = numpy.asarray(lx) base = lx.max() x = numpy.exp(lx - base) ssum = x.sum(0) result = ssum / ssum.sum() conventional = (numpy.exp(lx).sum(0) / numpy.exp(lx).sum()) assert similar(result, conventional) return result
[ "def", "nsum0", "(", "lx", ")", ":", "lx", "=", "numpy", ".", "asarray", "(", "lx", ")", "base", "=", "lx", ".", "max", "(", ")", "x", "=", "numpy", ".", "exp", "(", "lx", "-", "base", ")", "ssum", "=", "x", ".", "sum", "(", "0", ")", "result", "=", "ssum", "/", "ssum", ".", "sum", "(", ")", "conventional", "=", "(", "numpy", ".", "exp", "(", "lx", ")", ".", "sum", "(", "0", ")", "/", "numpy", ".", "exp", "(", "lx", ")", ".", "sum", "(", ")", ")", "assert", "similar", "(", "result", ",", "conventional", ")", "return", "result" ]
28.0625
0.021552
[ "def nsum0(lx):\n", " \"\"\"\n", " Accepts log-values as input, exponentiates them, sums down the rows\n", " (first dimension), normalizes and returns the result.\n", " Handles underflow by rescaling so that the largest values is exactly 1.0.\n", " \"\"\"\n", " lx = numpy.asarray(lx)\n", " base = lx.max()\n", " x = numpy.exp(lx - base)\n", " ssum = x.sum(0)\n", " result = ssum / ssum.sum()\n", "\n", " conventional = (numpy.exp(lx).sum(0) / numpy.exp(lx).sum())\n", " assert similar(result, conventional)\n", "\n", " return result" ]
[ 0, 0.16666666666666666, 0, 0, 0, 0, 0.04, 0.05555555555555555, 0.037037037037037035, 0.05555555555555555, 0.034482758620689655, 0, 0.016129032258064516, 0.02564102564102564, 0, 0.13333333333333333 ]
16
0.035275
def unblock_all(self): """ Unblock all emitters in this group. """ self.unblock() for em in self._emitters.values(): em.unblock()
[ "def", "unblock_all", "(", "self", ")", ":", "self", ".", "unblock", "(", ")", "for", "em", "in", "self", ".", "_emitters", ".", "values", "(", ")", ":", "em", ".", "unblock", "(", ")" ]
28
0.011561
[ "def unblock_all(self):\n", " \"\"\" Unblock all emitters in this group.\n", " \"\"\"\n", " self.unblock()\n", " for em in self._emitters.values():\n", " em.unblock()" ]
[ 0, 0.020833333333333332, 0, 0, 0, 0.041666666666666664 ]
6
0.010417
def version(self) -> Optional[str]: """ 获取 http 版本 """ if self._version is None: self._version = self._parser.get_http_version() return self._version
[ "def", "version", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "_version", "is", "None", ":", "self", ".", "_version", "=", "self", ".", "_parser", ".", "get_http_version", "(", ")", "return", "self", ".", "_version" ]
27.857143
0.00995
[ "def version(self) -> Optional[str]:\n", " \"\"\"\n", " 获取 http 版本\n", " \"\"\"\n", " if self._version is None:\n", " self._version = self._parser.get_http_version()\n", " return self._version" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0.03571428571428571 ]
7
0.017007
def _2ndDerivInt(x,y,z,dens,densDeriv,b2,c2,i,j,glx=None,glw=None): """Integral that gives the 2nd derivative of the potential in x,y,z""" def integrand(s): t= 1/s**2.-1. m= numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)) return (densDeriv(m) *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2)) *(x/(1.+t)*(j==0)+y/(b2+t)*(j==1)+z/(c2+t)*(j==2))/m\ +dens(m)*(i==j)*((1./(1.+t)*(i==0)+1./(b2+t)*(i==1)+1./(c2+t)*(i==2))))\ /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.)) if glx is None: return integrate.quad(integrand,0.,1.)[0] else: return numpy.sum(glw*integrand(glx))
[ "def", "_2ndDerivInt", "(", "x", ",", "y", ",", "z", ",", "dens", ",", "densDeriv", ",", "b2", ",", "c2", ",", "i", ",", "j", ",", "glx", "=", "None", ",", "glw", "=", "None", ")", ":", "def", "integrand", "(", "s", ")", ":", "t", "=", "1", "/", "s", "**", "2.", "-", "1.", "m", "=", "numpy", ".", "sqrt", "(", "x", "**", "2.", "/", "(", "1.", "+", "t", ")", "+", "y", "**", "2.", "/", "(", "b2", "+", "t", ")", "+", "z", "**", "2.", "/", "(", "c2", "+", "t", ")", ")", "return", "(", "densDeriv", "(", "m", ")", "*", "(", "x", "/", "(", "1.", "+", "t", ")", "*", "(", "i", "==", "0", ")", "+", "y", "/", "(", "b2", "+", "t", ")", "*", "(", "i", "==", "1", ")", "+", "z", "/", "(", "c2", "+", "t", ")", "*", "(", "i", "==", "2", ")", ")", "*", "(", "x", "/", "(", "1.", "+", "t", ")", "*", "(", "j", "==", "0", ")", "+", "y", "/", "(", "b2", "+", "t", ")", "*", "(", "j", "==", "1", ")", "+", "z", "/", "(", "c2", "+", "t", ")", "*", "(", "j", "==", "2", ")", ")", "/", "m", "+", "dens", "(", "m", ")", "*", "(", "i", "==", "j", ")", "*", "(", "(", "1.", "/", "(", "1.", "+", "t", ")", "*", "(", "i", "==", "0", ")", "+", "1.", "/", "(", "b2", "+", "t", ")", "*", "(", "i", "==", "1", ")", "+", "1.", "/", "(", "c2", "+", "t", ")", "*", "(", "i", "==", "2", ")", ")", ")", ")", "/", "numpy", ".", "sqrt", "(", "(", "1.", "+", "(", "b2", "-", "1.", ")", "*", "s", "**", "2.", ")", "*", "(", "1.", "+", "(", "c2", "-", "1.", ")", "*", "s", "**", "2.", ")", ")", "if", "glx", "is", "None", ":", "return", "integrate", ".", "quad", "(", "integrand", ",", "0.", ",", "1.", ")", "[", "0", "]", "else", ":", "return", "numpy", ".", "sum", "(", "glw", "*", "integrand", "(", "glx", ")", ")" ]
49.285714
0.046942
[ "def _2ndDerivInt(x,y,z,dens,densDeriv,b2,c2,i,j,glx=None,glw=None):\n", " \"\"\"Integral that gives the 2nd derivative of the potential in x,y,z\"\"\"\n", " def integrand(s):\n", " t= 1/s**2.-1.\n", " m= numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t))\n", " return (densDeriv(m)\n", " *(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))\n", " *(x/(1.+t)*(j==0)+y/(b2+t)*(j==1)+z/(c2+t)*(j==2))/m\\\n", " +dens(m)*(i==j)*((1./(1.+t)*(i==0)+1./(b2+t)*(i==1)+1./(c2+t)*(i==2))))\\\n", " /numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))\n", " if glx is None:\n", " return integrate.quad(integrand,0.,1.)[0]\n", " else:\n", " return numpy.sum(glw*integrand(glx))" ]
[ 0.14705882352941177, 0, 0, 0.045454545454545456, 0.016129032258064516, 0, 0.05970149253731343, 0.07142857142857142, 0.07526881720430108, 0.028169014084507043, 0, 0.04, 0, 0.022727272727272728 ]
14
0.036138
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): """ Sets the title of the marginal histograms. Parameters ---------- ax : Axes The `Axes` instance for the plot. fmt : str The string to add to the title. color : str The color of the text to add to the title. label : str If title does not exist, then include label at beginning of the string. rotated : bool If `True` then rotate the text 270 degrees for sideways title. """ # get rotation angle of the title rotation = 270 if rotated else 0 # get how much to displace title on axes xscale = 1.05 if rotated else 0.0 if rotated: yscale = 1.0 elif len(ax.get_figure().axes) > 1: yscale = 1.15 else: yscale = 1.05 # get class that packs text boxes vertical or horizonitally packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker # if no title exists if not hasattr(ax, "title_boxes"): # create a text box title = "{} = {}".format(label, fmt) tbox1 = offsetbox.TextArea( title, textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) # save a list of text boxes as attribute for later ax.title_boxes = [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # else append existing title else: # delete old title ax.title_anchor.remove() # add new text box to list tbox1 = offsetbox.TextArea( " {}".format(fmt), textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) ax.title_boxes = ax.title_boxes + [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # add new title and keep reference to instance as an attribute anchored_ybox = offsetbox.AnchoredOffsetbox( loc=2, child=ybox, pad=0., frameon=False, bbox_to_anchor=(xscale, yscale), bbox_transform=ax.transAxes, borderpad=0.) ax.title_anchor = ax.add_artist(anchored_ybox)
[ "def", "set_marginal_histogram_title", "(", "ax", ",", "fmt", ",", "color", ",", "label", "=", "None", ",", "rotated", "=", "False", ")", ":", "# get rotation angle of the title", "rotation", "=", "270", "if", "rotated", "else", "0", "# get how much to displace title on axes", "xscale", "=", "1.05", "if", "rotated", "else", "0.0", "if", "rotated", ":", "yscale", "=", "1.0", "elif", "len", "(", "ax", ".", "get_figure", "(", ")", ".", "axes", ")", ">", "1", ":", "yscale", "=", "1.15", "else", ":", "yscale", "=", "1.05", "# get class that packs text boxes vertical or horizonitally", "packer_class", "=", "offsetbox", ".", "VPacker", "if", "rotated", "else", "offsetbox", ".", "HPacker", "# if no title exists", "if", "not", "hasattr", "(", "ax", ",", "\"title_boxes\"", ")", ":", "# create a text box", "title", "=", "\"{} = {}\"", ".", "format", "(", "label", ",", "fmt", ")", "tbox1", "=", "offsetbox", ".", "TextArea", "(", "title", ",", "textprops", "=", "dict", "(", "color", "=", "color", ",", "size", "=", "15", ",", "rotation", "=", "rotation", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ")", ")", "# save a list of text boxes as attribute for later", "ax", ".", "title_boxes", "=", "[", "tbox1", "]", "# pack text boxes", "ybox", "=", "packer_class", "(", "children", "=", "ax", ".", "title_boxes", ",", "align", "=", "\"bottom\"", ",", "pad", "=", "0", ",", "sep", "=", "5", ")", "# else append existing title", "else", ":", "# delete old title", "ax", ".", "title_anchor", ".", "remove", "(", ")", "# add new text box to list", "tbox1", "=", "offsetbox", ".", "TextArea", "(", "\" {}\"", ".", "format", "(", "fmt", ")", ",", "textprops", "=", "dict", "(", "color", "=", "color", ",", "size", "=", "15", ",", "rotation", "=", "rotation", ",", "ha", "=", "'left'", ",", "va", "=", "'bottom'", ")", ")", "ax", ".", "title_boxes", "=", "ax", ".", "title_boxes", "+", "[", "tbox1", "]", "# pack text boxes", "ybox", "=", "packer_class", "(", "children", "=", "ax", ".", "title_boxes", ",", "align", "=", "\"bottom\"", ",", "pad", "=", "0", ",", "sep", "=", "5", ")", "# add new title and keep reference to instance as an attribute", "anchored_ybox", "=", "offsetbox", ".", "AnchoredOffsetbox", "(", "loc", "=", "2", ",", "child", "=", "ybox", ",", "pad", "=", "0.", ",", "frameon", "=", "False", ",", "bbox_to_anchor", "=", "(", "xscale", ",", "yscale", ")", ",", "bbox_transform", "=", "ax", ".", "transAxes", ",", "borderpad", "=", "0.", ")", "ax", ".", "title_anchor", "=", "ax", ".", "add_artist", "(", "anchored_ybox", ")" ]
32.5
0.000415
[ "def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):\n", " \"\"\" Sets the title of the marginal histograms.\n", "\n", " Parameters\n", " ----------\n", " ax : Axes\n", " The `Axes` instance for the plot.\n", " fmt : str\n", " The string to add to the title.\n", " color : str\n", " The color of the text to add to the title.\n", " label : str\n", " If title does not exist, then include label at beginning of the string.\n", " rotated : bool\n", " If `True` then rotate the text 270 degrees for sideways title.\n", " \"\"\"\n", "\n", " # get rotation angle of the title\n", " rotation = 270 if rotated else 0\n", "\n", " # get how much to displace title on axes\n", " xscale = 1.05 if rotated else 0.0\n", " if rotated:\n", " yscale = 1.0\n", " elif len(ax.get_figure().axes) > 1:\n", " yscale = 1.15\n", " else:\n", " yscale = 1.05\n", "\n", " # get class that packs text boxes vertical or horizonitally\n", " packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker\n", "\n", " # if no title exists\n", " if not hasattr(ax, \"title_boxes\"):\n", "\n", " # create a text box\n", " title = \"{} = {}\".format(label, fmt)\n", " tbox1 = offsetbox.TextArea(\n", " title,\n", " textprops=dict(color=color, size=15, rotation=rotation,\n", " ha='left', va='bottom'))\n", "\n", " # save a list of text boxes as attribute for later\n", " ax.title_boxes = [tbox1]\n", "\n", " # pack text boxes\n", " ybox = packer_class(children=ax.title_boxes,\n", " align=\"bottom\", pad=0, sep=5)\n", "\n", " # else append existing title\n", " else:\n", "\n", " # delete old title\n", " ax.title_anchor.remove()\n", "\n", " # add new text box to list\n", " tbox1 = offsetbox.TextArea(\n", " \" {}\".format(fmt),\n", " textprops=dict(color=color, size=15, rotation=rotation,\n", " ha='left', va='bottom'))\n", " ax.title_boxes = ax.title_boxes + [tbox1]\n", "\n", " # pack text boxes\n", " ybox = packer_class(children=ax.title_boxes,\n", " align=\"bottom\", pad=0, sep=5)\n", "\n", " # add new title and keep reference to instance as an attribute\n", " anchored_ybox = offsetbox.AnchoredOffsetbox(\n", " loc=2, child=ybox, pad=0.,\n", " frameon=False, bbox_to_anchor=(xscale, yscale),\n", " bbox_transform=ax.transAxes, borderpad=0.)\n", " ax.title_anchor = ax.add_artist(anchored_ybox)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02 ]
72
0.000278
def to_str(string): """ Return the given string (either byte string or Unicode string) converted to native-str, that is, a byte string on Python 2, or a Unicode string on Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to convert to native-str :rtype: native-str """ if string is None: return None if isinstance(string, str): return string if PY2: return string.encode("utf-8") return string.decode("utf-8")
[ "def", "to_str", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "None", "if", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "if", "PY2", ":", "return", "string", ".", "encode", "(", "\"utf-8\"", ")", "return", "string", ".", "decode", "(", "\"utf-8\"", ")" ]
27.722222
0.001938
[ "def to_str(string):\n", " \"\"\"\n", " Return the given string (either byte string or Unicode string)\n", " converted to native-str, that is,\n", " a byte string on Python 2, or a Unicode string on Python 3.\n", "\n", " Return ``None`` if ``string`` is ``None``.\n", "\n", " :param str string: the string to convert to native-str\n", " :rtype: native-str\n", " \"\"\"\n", " if string is None:\n", " return None\n", " if isinstance(string, str):\n", " return string\n", " if PY2:\n", " return string.encode(\"utf-8\")\n", " return string.decode(\"utf-8\")" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.030303030303030304 ]
18
0.001684
def attach_team(context, id, team_id): """attach_team(context, id, team_id) Attach a team to a topic. >>> dcictl topic-attach-team [OPTIONS] :param string id: ID of the topic to attach to [required] :param string team_id: ID of the team to attach to this topic [required] """ team_id = team_id or identity.my_team_id(context) result = topic.attach_team(context, id=id, team_id=team_id) utils.format_output(result, context.format)
[ "def", "attach_team", "(", "context", ",", "id", ",", "team_id", ")", ":", "team_id", "=", "team_id", "or", "identity", ".", "my_team_id", "(", "context", ")", "result", "=", "topic", ".", "attach_team", "(", "context", ",", "id", "=", "id", ",", "team_id", "=", "team_id", ")", "utils", ".", "format_output", "(", "result", ",", "context", ".", "format", ")" ]
35.076923
0.002137
[ "def attach_team(context, id, team_id):\n", " \"\"\"attach_team(context, id, team_id)\n", "\n", " Attach a team to a topic.\n", "\n", " >>> dcictl topic-attach-team [OPTIONS]\n", "\n", " :param string id: ID of the topic to attach to [required]\n", " :param string team_id: ID of the team to attach to this topic [required]\n", " \"\"\"\n", " team_id = team_id or identity.my_team_id(context)\n", " result = topic.attach_team(context, id=id, team_id=team_id)\n", " utils.format_output(result, context.format)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085 ]
13
0.001637
def visit_Div(self, node: AST, dfltChaining: bool = True) -> str: """Return division sign.""" return '/' if self.compact else ' / '
[ "def", "visit_Div", "(", "self", ",", "node", ":", "AST", ",", "dfltChaining", ":", "bool", "=", "True", ")", "->", "str", ":", "return", "'/'", "if", "self", ".", "compact", "else", "' / '" ]
48.333333
0.013605
[ "def visit_Div(self, node: AST, dfltChaining: bool = True) -> str:\n", " \"\"\"Return division sign.\"\"\"\n", " return '/' if self.compact else ' / '" ]
[ 0, 0.027777777777777776, 0.022222222222222223 ]
3
0.016667
def flush(self): """Forces a flush from the internal queue to the server""" queue = self.queue size = queue.qsize() queue.join() self.log.debug('successfully flushed %s items.', size)
[ "def", "flush", "(", "self", ")", ":", "queue", "=", "self", ".", "queue", "size", "=", "queue", ".", "qsize", "(", ")", "queue", ".", "join", "(", ")", "self", ".", "log", ".", "debug", "(", "'successfully flushed %s items.'", ",", "size", ")" ]
36.333333
0.008969
[ "def flush(self):\n", " \"\"\"Forces a flush from the internal queue to the server\"\"\"\n", " queue = self.queue\n", " size = queue.qsize()\n", " queue.join()\n", " self.log.debug('successfully flushed %s items.', size)" ]
[ 0, 0.014925373134328358, 0, 0, 0, 0.016129032258064516 ]
6
0.005176
def hugoniot_t_single(rho, rho0, c0, s, gamma0, q, theta0, n, mass, three_r=3. * constants.R, t_ref=300., c_v=0.): """ internal function to calculate pressure along Hugoniot :param rho: density in g/cm^3 :param rho0: density at 1 bar in g/cm^3 :param c0: velocity at 1 bar in km/s :param s: slope of the velocity change :param gamma0: Gruneisen parameter at 1 bar :param q: logarithmic derivative of Gruneisen parameter :param theta0: Debye temperature in K :param n: number of elements in a chemical formula :param mass: molar mass in gram :param three_r: 3 times gas constant. Jamieson modified this value to compensate for mismatches :param t_ref: reference temperature, 300 K :param c_v: heat capacity, see Jamieson 1983 for detail :return: temperature along hugoniot """ eta = 1. - rho0 / rho if eta == 0.0: return 300. threenk = three_r / mass * n # [J/mol/K] / [g/mol] = [J/g/K] k = [rho0, c0, s, gamma0, q, theta0 / 1.e3] t_h = odeint(_dT_h_delta, t_ref / 1.e3, [0., eta], args=(k, threenk, c_v), full_output=1) temp_h = np.squeeze(t_h[0][1]) return temp_h * 1.e3
[ "def", "hugoniot_t_single", "(", "rho", ",", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", ",", "n", ",", "mass", ",", "three_r", "=", "3.", "*", "constants", ".", "R", ",", "t_ref", "=", "300.", ",", "c_v", "=", "0.", ")", ":", "eta", "=", "1.", "-", "rho0", "/", "rho", "if", "eta", "==", "0.0", ":", "return", "300.", "threenk", "=", "three_r", "/", "mass", "*", "n", "# [J/mol/K] / [g/mol] = [J/g/K]", "k", "=", "[", "rho0", ",", "c0", ",", "s", ",", "gamma0", ",", "q", ",", "theta0", "/", "1.e3", "]", "t_h", "=", "odeint", "(", "_dT_h_delta", ",", "t_ref", "/", "1.e3", ",", "[", "0.", ",", "eta", "]", ",", "args", "=", "(", "k", ",", "threenk", ",", "c_v", ")", ",", "full_output", "=", "1", ")", "temp_h", "=", "np", ".", "squeeze", "(", "t_h", "[", "0", "]", "[", "1", "]", ")", "return", "temp_h", "*", "1.e3" ]
41.103448
0.00082
[ "def hugoniot_t_single(rho, rho0, c0, s, gamma0, q, theta0, n, mass,\n", " three_r=3. * constants.R, t_ref=300., c_v=0.):\n", " \"\"\"\n", " internal function to calculate pressure along Hugoniot\n", "\n", " :param rho: density in g/cm^3\n", " :param rho0: density at 1 bar in g/cm^3\n", " :param c0: velocity at 1 bar in km/s\n", " :param s: slope of the velocity change\n", " :param gamma0: Gruneisen parameter at 1 bar\n", " :param q: logarithmic derivative of Gruneisen parameter\n", " :param theta0: Debye temperature in K\n", " :param n: number of elements in a chemical formula\n", " :param mass: molar mass in gram\n", " :param three_r: 3 times gas constant.\n", " Jamieson modified this value to compensate for mismatches\n", " :param t_ref: reference temperature, 300 K\n", " :param c_v: heat capacity, see Jamieson 1983 for detail\n", " :return: temperature along hugoniot\n", " \"\"\"\n", " eta = 1. - rho0 / rho\n", " if eta == 0.0:\n", " return 300.\n", " threenk = three_r / mass * n # [J/mol/K] / [g/mol] = [J/g/K]\n", " k = [rho0, c0, s, gamma0, q, theta0 / 1.e3]\n", " t_h = odeint(_dT_h_delta, t_ref / 1.e3, [0., eta],\n", " args=(k, threenk, c_v), full_output=1)\n", " temp_h = np.squeeze(t_h[0][1])\n", " return temp_h * 1.e3" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664 ]
29
0.001437
def print_plugin_list(plugins: Dict[str, pkg_resources.EntryPoint]): """ Prints all registered plugins and checks if they can be loaded or not. :param plugins: plugins :type plugins: Dict[str, ~pkg_resources.EntryPoint] """ for trigger, entry_point in plugins.items(): try: plugin_class = entry_point.load() version = str(plugin_class._info.version) print( f"{trigger} (ok)\n" f" {version}" ) except Exception: print( f"{trigger} (failed)" )
[ "def", "print_plugin_list", "(", "plugins", ":", "Dict", "[", "str", ",", "pkg_resources", ".", "EntryPoint", "]", ")", ":", "for", "trigger", ",", "entry_point", "in", "plugins", ".", "items", "(", ")", ":", "try", ":", "plugin_class", "=", "entry_point", ".", "load", "(", ")", "version", "=", "str", "(", "plugin_class", ".", "_info", ".", "version", ")", "print", "(", "f\"{trigger} (ok)\\n\"", "f\" {version}\"", ")", "except", "Exception", ":", "print", "(", "f\"{trigger} (failed)\"", ")" ]
30.894737
0.001653
[ "def print_plugin_list(plugins: Dict[str, pkg_resources.EntryPoint]):\n", " \"\"\"\n", " Prints all registered plugins and checks if they can be loaded or not.\n", "\n", " :param plugins: plugins\n", " :type plugins: Dict[str, ~pkg_resources.EntryPoint]\n", " \"\"\"\n", " for trigger, entry_point in plugins.items():\n", " try:\n", " plugin_class = entry_point.load()\n", " version = str(plugin_class._info.version)\n", " print(\n", " f\"{trigger} (ok)\\n\"\n", " f\" {version}\"\n", " )\n", " except Exception:\n", " print(\n", " f\"{trigger} (failed)\"\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07692307692307693 ]
19
0.004049
def scan(self, data, part): """Scan a string. Parameters ---------- data : `str` String to scan. part : `bool` True if data is partial. Returns ------- `generator` of (`str` or `markovchain.scanner.Scanner.END`) Token generator. """ if not self.end_chars: yield from data self.start = self.start or bool(data) self.end = False else: for char in data: if char in self.end_chars: if not self.start: continue self.end = True else: if self.end: yield self.END self.end = False self.start = True yield char if not part and self.start: if not self.end and self.default_end is not None: yield self.default_end yield self.END self.reset()
[ "def", "scan", "(", "self", ",", "data", ",", "part", ")", ":", "if", "not", "self", ".", "end_chars", ":", "yield", "from", "data", "self", ".", "start", "=", "self", ".", "start", "or", "bool", "(", "data", ")", "self", ".", "end", "=", "False", "else", ":", "for", "char", "in", "data", ":", "if", "char", "in", "self", ".", "end_chars", ":", "if", "not", "self", ".", "start", ":", "continue", "self", ".", "end", "=", "True", "else", ":", "if", "self", ".", "end", ":", "yield", "self", ".", "END", "self", ".", "end", "=", "False", "self", ".", "start", "=", "True", "yield", "char", "if", "not", "part", "and", "self", ".", "start", ":", "if", "not", "self", ".", "end", "and", "self", ".", "default_end", "is", "not", "None", ":", "yield", "self", ".", "default_end", "yield", "self", ".", "END", "self", ".", "reset", "(", ")" ]
27.810811
0.001878
[ "def scan(self, data, part):\n", " \"\"\"Scan a string.\n", "\n", " Parameters\n", " ----------\n", " data : `str`\n", " String to scan.\n", " part : `bool`\n", " True if data is partial.\n", "\n", " Returns\n", " -------\n", " `generator` of (`str` or `markovchain.scanner.Scanner.END`)\n", " Token generator.\n", " \"\"\"\n", " if not self.end_chars:\n", " yield from data\n", " self.start = self.start or bool(data)\n", " self.end = False\n", " else:\n", " for char in data:\n", " if char in self.end_chars:\n", " if not self.start:\n", " continue\n", " self.end = True\n", " else:\n", " if self.end:\n", " yield self.END\n", " self.end = False\n", " self.start = True\n", " yield char\n", "\n", " if not part and self.start:\n", " if not self.end and self.default_end is not None:\n", " yield self.default_end\n", " yield self.END\n", " self.reset()" ]
[ 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664 ]
37
0.002166
def is_app_folder(self, folder): """ checks if a folder """ with open('%s/%s/build.gradle' % (self.path, folder)) as f: for line in f.readlines(): if config.gradle_plugin in line: return True return False
[ "def", "is_app_folder", "(", "self", ",", "folder", ")", ":", "with", "open", "(", "'%s/%s/build.gradle'", "%", "(", "self", ".", "path", ",", "folder", ")", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "if", "config", ".", "gradle_plugin", "in", "line", ":", "return", "True", "return", "False" ]
26.777778
0.016064
[ "def is_app_folder(self, folder):\n", " \"\"\"\n", " checks if a folder \n", " \"\"\"\n", " with open('%s/%s/build.gradle' % (self.path, folder)) as f:\n", " for line in f.readlines():\n", " if config.gradle_plugin in line:\n", " return True\n", " return False" ]
[ 0, 0, 0.041666666666666664, 0, 0, 0.030303030303030304, 0, 0.045454545454545456, 0.0625 ]
9
0.019992
def cache_get(key): """ Wrapper for ``cache.get``. The expiry time for the cache entry is stored with the entry. If the expiry time has past, put the stale entry back into cache, and don't return it to trigger a fake cache miss. """ packed = cache.get(_hashed_key(key)) if packed is None: return None value, refresh_time, refreshed = packed if (time() > refresh_time) and not refreshed: cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True) return None return value
[ "def", "cache_get", "(", "key", ")", ":", "packed", "=", "cache", ".", "get", "(", "_hashed_key", "(", "key", ")", ")", "if", "packed", "is", "None", ":", "return", "None", "value", ",", "refresh_time", ",", "refreshed", "=", "packed", "if", "(", "time", "(", ")", ">", "refresh_time", ")", "and", "not", "refreshed", ":", "cache_set", "(", "key", ",", "value", ",", "settings", ".", "CACHE_SET_DELAY_SECONDS", ",", "True", ")", "return", "None", "return", "value" ]
35.133333
0.001848
[ "def cache_get(key):\n", " \"\"\"\n", " Wrapper for ``cache.get``. The expiry time for the cache entry\n", " is stored with the entry. If the expiry time has past, put the\n", " stale entry back into cache, and don't return it to trigger a\n", " fake cache miss.\n", " \"\"\"\n", " packed = cache.get(_hashed_key(key))\n", " if packed is None:\n", " return None\n", " value, refresh_time, refreshed = packed\n", " if (time() > refresh_time) and not refreshed:\n", " cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True)\n", " return None\n", " return value" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
15
0.004167
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): """A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). """ path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test
[ "def", "_load_mnist_dataset", "(", "shape", ",", "path", ",", "name", "=", "'mnist'", ",", "url", "=", "'http://yann.lecun.com/exdb/mnist/'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "name", ")", "# Define functions for loading mnist-like data's images and labels.", "# For convenience, they also download the requested files if needed.", "def", "load_mnist_images", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "logging", ".", "info", "(", "filepath", ")", "# Read the inputs in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "16", ")", "# The inputs are vectors now, we reshape them to monochrome 2D images,", "# following the shape convention: (examples, channels, rows, columns)", "data", "=", "data", ".", "reshape", "(", "shape", ")", "# The inputs come as bytes, we convert them to float32 in range [0,1].", "# (Actually to range [0, 255/256], for compatibility to the version", "# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)", "return", "data", "/", "np", ".", "float32", "(", "256", ")", "def", "load_mnist_labels", "(", "path", ",", "filename", ")", ":", "filepath", "=", "maybe_download_and_extract", "(", "filename", ",", "path", ",", "url", ")", "# Read the labels in Yann LeCun's binary format.", "with", "gzip", ".", "open", "(", "filepath", ",", "'rb'", ")", "as", "f", ":", "data", "=", "np", ".", "frombuffer", "(", "f", ".", "read", "(", ")", ",", "np", ".", "uint8", ",", "offset", "=", "8", ")", "# The labels are vectors of integers now, that's exactly what we want.", "return", "data", "# Download and read the training and test set images and labels.", "logging", ".", "info", "(", "\"Load or Download {0} > {1}\"", ".", "format", "(", "name", ".", "upper", "(", ")", ",", "path", ")", ")", "X_train", "=", "load_mnist_images", "(", "path", ",", "'train-images-idx3-ubyte.gz'", ")", "y_train", "=", "load_mnist_labels", "(", "path", ",", "'train-labels-idx1-ubyte.gz'", ")", "X_test", "=", "load_mnist_images", "(", "path", ",", "'t10k-images-idx3-ubyte.gz'", ")", "y_test", "=", "load_mnist_labels", "(", "path", ",", "'t10k-labels-idx1-ubyte.gz'", ")", "# We reserve the last 10000 training examples for validation.", "X_train", ",", "X_val", "=", "X_train", "[", ":", "-", "10000", "]", ",", "X_train", "[", "-", "10000", ":", "]", "y_train", ",", "y_val", "=", "y_train", "[", ":", "-", "10000", "]", ",", "y_train", "[", "-", "10000", ":", "]", "# We just return all the arrays in order, as expected in main().", "# (It doesn't matter how we do this as long as we can read them again.)", "X_train", "=", "np", ".", "asarray", "(", "X_train", ",", "dtype", "=", "np", ".", "float32", ")", "y_train", "=", "np", ".", "asarray", "(", "y_train", ",", "dtype", "=", "np", ".", "int32", ")", "X_val", "=", "np", ".", "asarray", "(", "X_val", ",", "dtype", "=", "np", ".", "float32", ")", "y_val", "=", "np", ".", "asarray", "(", "y_val", ",", "dtype", "=", "np", ".", "int32", ")", "X_test", "=", "np", ".", "asarray", "(", "X_test", ",", "dtype", "=", "np", ".", "float32", ")", "y_test", "=", "np", ".", "asarray", "(", "y_test", ",", "dtype", "=", "np", ".", "int32", ")", "return", "X_train", ",", "y_train", ",", "X_val", ",", "y_val", ",", "X_test", ",", "y_test" ]
46.377049
0.000692
[ "def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'):\n", " \"\"\"A generic function to load mnist-like dataset.\n", "\n", " Parameters:\n", " ----------\n", " shape : tuple\n", " The shape of digit images.\n", " path : str\n", " The path that the data is downloaded to.\n", " name : str\n", " The dataset name you want to use(the default is 'mnist').\n", " url : str\n", " The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').\n", " \"\"\"\n", " path = os.path.join(path, name)\n", "\n", " # Define functions for loading mnist-like data's images and labels.\n", " # For convenience, they also download the requested files if needed.\n", " def load_mnist_images(path, filename):\n", " filepath = maybe_download_and_extract(filename, path, url)\n", "\n", " logging.info(filepath)\n", " # Read the inputs in Yann LeCun's binary format.\n", " with gzip.open(filepath, 'rb') as f:\n", " data = np.frombuffer(f.read(), np.uint8, offset=16)\n", " # The inputs are vectors now, we reshape them to monochrome 2D images,\n", " # following the shape convention: (examples, channels, rows, columns)\n", " data = data.reshape(shape)\n", " # The inputs come as bytes, we convert them to float32 in range [0,1].\n", " # (Actually to range [0, 255/256], for compatibility to the version\n", " # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n", " return data / np.float32(256)\n", "\n", " def load_mnist_labels(path, filename):\n", " filepath = maybe_download_and_extract(filename, path, url)\n", " # Read the labels in Yann LeCun's binary format.\n", " with gzip.open(filepath, 'rb') as f:\n", " data = np.frombuffer(f.read(), np.uint8, offset=8)\n", " # The labels are vectors of integers now, that's exactly what we want.\n", " return data\n", "\n", " # Download and read the training and test set images and labels.\n", " logging.info(\"Load or Download {0} > {1}\".format(name.upper(), path))\n", " X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz')\n", " y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz')\n", " X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz')\n", " y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz')\n", "\n", " # We reserve the last 10000 training examples for validation.\n", " X_train, X_val = X_train[:-10000], X_train[-10000:]\n", " y_train, y_val = y_train[:-10000], y_train[-10000:]\n", "\n", " # We just return all the arrays in order, as expected in main().\n", " # (It doesn't matter how we do this as long as we can read them again.)\n", " X_train = np.asarray(X_train, dtype=np.float32)\n", " y_train = np.asarray(y_train, dtype=np.int32)\n", " X_val = np.asarray(X_val, dtype=np.float32)\n", " y_val = np.asarray(y_val, dtype=np.int32)\n", " X_test = np.asarray(X_test, dtype=np.float32)\n", " y_test = np.asarray(y_test, dtype=np.int32)\n", " return X_train, y_train, X_val, y_val, X_test, y_test" ]
[ 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806 ]
61
0.000464
def add_method(function, klass, name=None): '''Add an existing function to a class as a method. Note: Consider using the extend decorator as a more readable alternative to using this function directly. Args: function: The function to be added to the class klass. klass: The class to which the new method will be added. name: An optional name for the new method. If omitted or None the original name of the function is used. Returns: The function argument unmodified. Raises: ValueError: If klass already has an attribute with the same name as the extension method. ''' # Should we be using functools.update_wrapper in here? if name is None: name = function_name(function) if hasattr(klass, name): raise ValueError("Cannot replace existing attribute with method " "'{name}'".format(name=name)) setattr(klass, name, function) return function
[ "def", "add_method", "(", "function", ",", "klass", ",", "name", "=", "None", ")", ":", "# Should we be using functools.update_wrapper in here?\r", "if", "name", "is", "None", ":", "name", "=", "function_name", "(", "function", ")", "if", "hasattr", "(", "klass", ",", "name", ")", ":", "raise", "ValueError", "(", "\"Cannot replace existing attribute with method \"", "\"'{name}'\"", ".", "format", "(", "name", "=", "name", ")", ")", "setattr", "(", "klass", ",", "name", ",", "function", ")", "return", "function" ]
34.586207
0.00097
[ "def add_method(function, klass, name=None):\r\n", " '''Add an existing function to a class as a method.\r\n", "\r\n", " Note: Consider using the extend decorator as a more readable alternative\r\n", " to using this function directly.\r\n", "\r\n", " Args:\r\n", " function: The function to be added to the class klass.\r\n", "\r\n", " klass: The class to which the new method will be added.\r\n", "\r\n", " name: An optional name for the new method. If omitted or None the\r\n", " original name of the function is used.\r\n", "\r\n", " Returns:\r\n", " The function argument unmodified.\r\n", "\r\n", " Raises:\r\n", " ValueError: If klass already has an attribute with the same name as the\r\n", " extension method.\r\n", " '''\r\n", " # Should we be using functools.update_wrapper in here?\r\n", " if name is None:\r\n", " name = function_name(function)\r\n", " if hasattr(klass, name):\r\n", " raise ValueError(\"Cannot replace existing attribute with method \"\r\n", " \"'{name}'\".format(name=name))\r\n", " setattr(klass, name, function)\r\n", " return function" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842 ]
29
0.001815
def get_file(self, name, save_to, add_to_cache=True, force_refresh=False, _lock_exclusive=False): """Retrieves file identified by ``name``. The file is saved as ``save_to``. If ``add_to_cache`` is ``True``, the file is added to the local store. If ``force_refresh`` is ``True``, local cache is not examined if a remote store is configured. If a remote store is configured, but ``name`` does not contain a version, the local data store is not used, as we cannot guarantee that the version there is fresh. Local data store implemented in :class:`LocalDataStore` tries to not copy the entire file to ``save_to`` if possible, but instead uses hardlinking. Therefore you should not modify the file if you don't want to totally blow something. This method returns the full versioned name of the retrieved file. """ uname, version = split_name(name) lock = None if self.local_store: lock = self.lock_manager.lock_for(uname) if _lock_exclusive: lock.lock_exclusive() else: lock.lock_shared() else: add_to_cache = False t = time.time() logger.debug(' downloading %s', name) try: if not self.remote_store or (version is not None and not force_refresh): try: if self.local_store and self.local_store.exists(name): return self.local_store.get_file(name, save_to) except Exception: if self.remote_store: logger.warning("Error getting '%s' from local store", name, exc_info=True) else: raise if self.remote_store: if not _lock_exclusive and add_to_cache: if lock: lock.unlock() return self.get_file(name, save_to, add_to_cache, _lock_exclusive=True) vname = self.remote_store.get_file(name, save_to) if add_to_cache: self._add_to_cache(vname, save_to) return vname raise FiletrackerError("File not available: %s" % name) finally: if lock: lock.close() logger.debug(' processed %s in %.2fs', name, time.time() - t)
[ "def", "get_file", "(", "self", ",", "name", ",", "save_to", ",", "add_to_cache", "=", "True", ",", "force_refresh", "=", "False", ",", "_lock_exclusive", "=", "False", ")", ":", "uname", ",", "version", "=", "split_name", "(", "name", ")", "lock", "=", "None", "if", "self", ".", "local_store", ":", "lock", "=", "self", ".", "lock_manager", ".", "lock_for", "(", "uname", ")", "if", "_lock_exclusive", ":", "lock", ".", "lock_exclusive", "(", ")", "else", ":", "lock", ".", "lock_shared", "(", ")", "else", ":", "add_to_cache", "=", "False", "t", "=", "time", ".", "time", "(", ")", "logger", ".", "debug", "(", "' downloading %s'", ",", "name", ")", "try", ":", "if", "not", "self", ".", "remote_store", "or", "(", "version", "is", "not", "None", "and", "not", "force_refresh", ")", ":", "try", ":", "if", "self", ".", "local_store", "and", "self", ".", "local_store", ".", "exists", "(", "name", ")", ":", "return", "self", ".", "local_store", ".", "get_file", "(", "name", ",", "save_to", ")", "except", "Exception", ":", "if", "self", ".", "remote_store", ":", "logger", ".", "warning", "(", "\"Error getting '%s' from local store\"", ",", "name", ",", "exc_info", "=", "True", ")", "else", ":", "raise", "if", "self", ".", "remote_store", ":", "if", "not", "_lock_exclusive", "and", "add_to_cache", ":", "if", "lock", ":", "lock", ".", "unlock", "(", ")", "return", "self", ".", "get_file", "(", "name", ",", "save_to", ",", "add_to_cache", ",", "_lock_exclusive", "=", "True", ")", "vname", "=", "self", ".", "remote_store", ".", "get_file", "(", "name", ",", "save_to", ")", "if", "add_to_cache", ":", "self", ".", "_add_to_cache", "(", "vname", ",", "save_to", ")", "return", "vname", "raise", "FiletrackerError", "(", "\"File not available: %s\"", "%", "name", ")", "finally", ":", "if", "lock", ":", "lock", ".", "close", "(", ")", "logger", ".", "debug", "(", "' processed %s in %.2fs'", ",", "name", ",", "time", ".", "time", "(", ")", "-", "t", ")" ]
41.322581
0.001525
[ "def get_file(self, name, save_to, add_to_cache=True,\n", " force_refresh=False, _lock_exclusive=False):\n", " \"\"\"Retrieves file identified by ``name``.\n", "\n", " The file is saved as ``save_to``. If ``add_to_cache`` is ``True``,\n", " the file is added to the local store. If ``force_refresh`` is\n", " ``True``, local cache is not examined if a remote store is\n", " configured.\n", "\n", " If a remote store is configured, but ``name`` does not contain a\n", " version, the local data store is not used, as we cannot guarantee\n", " that the version there is fresh.\n", "\n", " Local data store implemented in :class:`LocalDataStore` tries to not\n", " copy the entire file to ``save_to`` if possible, but instead uses\n", " hardlinking. Therefore you should not modify the file if you don't\n", " want to totally blow something.\n", "\n", " This method returns the full versioned name of the retrieved file.\n", " \"\"\"\n", "\n", " uname, version = split_name(name)\n", "\n", " lock = None\n", " if self.local_store:\n", " lock = self.lock_manager.lock_for(uname)\n", " if _lock_exclusive:\n", " lock.lock_exclusive()\n", " else:\n", " lock.lock_shared()\n", " else:\n", " add_to_cache = False\n", "\n", " t = time.time()\n", " logger.debug(' downloading %s', name)\n", " try:\n", " if not self.remote_store or (version is not None\n", " and not force_refresh):\n", " try:\n", " if self.local_store and self.local_store.exists(name):\n", " return self.local_store.get_file(name, save_to)\n", " except Exception:\n", " if self.remote_store:\n", " logger.warning(\"Error getting '%s' from local store\",\n", " name, exc_info=True)\n", " else:\n", " raise\n", " if self.remote_store:\n", " if not _lock_exclusive and add_to_cache:\n", " if lock:\n", " lock.unlock()\n", " return self.get_file(name, save_to, add_to_cache,\n", " _lock_exclusive=True)\n", " vname = self.remote_store.get_file(name, save_to)\n", " if add_to_cache:\n", " self._add_to_cache(vname, save_to)\n", " return vname\n", " raise FiletrackerError(\"File not available: %s\" % name)\n", " finally:\n", " if lock:\n", " lock.close()\n", " logger.debug(' processed %s in %.2fs', name, time.time() - t)" ]
[ 0, 0.016129032258064516, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018867924528301886, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013157894736842105 ]
62
0.001099
def usage(path): ''' Show in which disk the chunks are allocated. CLI Example: .. code-block:: bash salt '*' btrfs.usage /your/mountpoint ''' out = __salt__['cmd.run_all']("btrfs filesystem usage {0}".format(path)) salt.utils.fsutils._verify_run(out) ret = {} for section in out['stdout'].split("\n\n"): if section.startswith("Overall:\n"): ret['overall'] = _usage_overall(section) elif section.startswith("Unallocated:\n"): ret['unallocated'] = _usage_unallocated(section) else: ret.update(_usage_specific(section)) return ret
[ "def", "usage", "(", "path", ")", ":", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "\"btrfs filesystem usage {0}\"", ".", "format", "(", "path", ")", ")", "salt", ".", "utils", ".", "fsutils", ".", "_verify_run", "(", "out", ")", "ret", "=", "{", "}", "for", "section", "in", "out", "[", "'stdout'", "]", ".", "split", "(", "\"\\n\\n\"", ")", ":", "if", "section", ".", "startswith", "(", "\"Overall:\\n\"", ")", ":", "ret", "[", "'overall'", "]", "=", "_usage_overall", "(", "section", ")", "elif", "section", ".", "startswith", "(", "\"Unallocated:\\n\"", ")", ":", "ret", "[", "'unallocated'", "]", "=", "_usage_unallocated", "(", "section", ")", "else", ":", "ret", ".", "update", "(", "_usage_specific", "(", "section", ")", ")", "return", "ret" ]
26.869565
0.001563
[ "def usage(path):\n", " '''\n", " Show in which disk the chunks are allocated.\n", "\n", " CLI Example:\n", "\n", " .. code-block:: bash\n", "\n", " salt '*' btrfs.usage /your/mountpoint\n", " '''\n", " out = __salt__['cmd.run_all'](\"btrfs filesystem usage {0}\".format(path))\n", " salt.utils.fsutils._verify_run(out)\n", "\n", " ret = {}\n", " for section in out['stdout'].split(\"\\n\\n\"):\n", " if section.startswith(\"Overall:\\n\"):\n", " ret['overall'] = _usage_overall(section)\n", " elif section.startswith(\"Unallocated:\\n\"):\n", " ret['unallocated'] = _usage_unallocated(section)\n", " else:\n", " ret.update(_usage_specific(section))\n", "\n", " return ret" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
23
0.003106
def ast2str(expr, level=0, names=None): """convert compiled ast to gene_reaction_rule str Parameters ---------- expr : str string for a gene reaction rule, e.g "a and b" level : int internal use only names : dict Dict where each element id a gene identifier and the value is the gene name. Use this to get a rule str which uses names instead. This should be done for display purposes only. All gene_reaction_rule strings which are computed with should use the id. Returns ------ string The gene reaction rule """ if isinstance(expr, Expression): return ast2str(expr.body, 0, names) \ if hasattr(expr, "body") else "" elif isinstance(expr, Name): return names.get(expr.id, expr.id) if names else expr.id elif isinstance(expr, BoolOp): op = expr.op if isinstance(op, Or): str_exp = " or ".join(ast2str(i, level + 1, names) for i in expr.values) elif isinstance(op, And): str_exp = " and ".join(ast2str(i, level + 1, names) for i in expr.values) else: raise TypeError("unsupported operation " + op.__class__.__name) return "(" + str_exp + ")" if level else str_exp elif expr is None: return "" else: raise TypeError("unsupported operation " + repr(expr))
[ "def", "ast2str", "(", "expr", ",", "level", "=", "0", ",", "names", "=", "None", ")", ":", "if", "isinstance", "(", "expr", ",", "Expression", ")", ":", "return", "ast2str", "(", "expr", ".", "body", ",", "0", ",", "names", ")", "if", "hasattr", "(", "expr", ",", "\"body\"", ")", "else", "\"\"", "elif", "isinstance", "(", "expr", ",", "Name", ")", ":", "return", "names", ".", "get", "(", "expr", ".", "id", ",", "expr", ".", "id", ")", "if", "names", "else", "expr", ".", "id", "elif", "isinstance", "(", "expr", ",", "BoolOp", ")", ":", "op", "=", "expr", ".", "op", "if", "isinstance", "(", "op", ",", "Or", ")", ":", "str_exp", "=", "\" or \"", ".", "join", "(", "ast2str", "(", "i", ",", "level", "+", "1", ",", "names", ")", "for", "i", "in", "expr", ".", "values", ")", "elif", "isinstance", "(", "op", ",", "And", ")", ":", "str_exp", "=", "\" and \"", ".", "join", "(", "ast2str", "(", "i", ",", "level", "+", "1", ",", "names", ")", "for", "i", "in", "expr", ".", "values", ")", "else", ":", "raise", "TypeError", "(", "\"unsupported operation \"", "+", "op", ".", "__class__", ".", "__name", ")", "return", "\"(\"", "+", "str_exp", "+", "\")\"", "if", "level", "else", "str_exp", "elif", "expr", "is", "None", ":", "return", "\"\"", "else", ":", "raise", "TypeError", "(", "\"unsupported operation \"", "+", "repr", "(", "expr", ")", ")" ]
35.5
0.000685
[ "def ast2str(expr, level=0, names=None):\n", " \"\"\"convert compiled ast to gene_reaction_rule str\n", "\n", " Parameters\n", " ----------\n", " expr : str\n", " string for a gene reaction rule, e.g \"a and b\"\n", " level : int\n", " internal use only\n", " names : dict\n", " Dict where each element id a gene identifier and the value is the\n", " gene name. Use this to get a rule str which uses names instead. This\n", " should be done for display purposes only. All gene_reaction_rule\n", " strings which are computed with should use the id.\n", "\n", " Returns\n", " ------\n", " string\n", " The gene reaction rule\n", " \"\"\"\n", " if isinstance(expr, Expression):\n", " return ast2str(expr.body, 0, names) \\\n", " if hasattr(expr, \"body\") else \"\"\n", " elif isinstance(expr, Name):\n", " return names.get(expr.id, expr.id) if names else expr.id\n", " elif isinstance(expr, BoolOp):\n", " op = expr.op\n", " if isinstance(op, Or):\n", " str_exp = \" or \".join(ast2str(i, level + 1, names)\n", " for i in expr.values)\n", " elif isinstance(op, And):\n", " str_exp = \" and \".join(ast2str(i, level + 1, names)\n", " for i in expr.values)\n", " else:\n", " raise TypeError(\"unsupported operation \" + op.__class__.__name)\n", " return \"(\" + str_exp + \")\" if level else str_exp\n", " elif expr is None:\n", " return \"\"\n", " else:\n", " raise TypeError(\"unsupported operation \" + repr(expr))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872 ]
40
0.000397
def update_serial(self, new_serial): """Updates the serial number of a device. The "serial number" used with adb's `-s` arg is not necessarily the actual serial number. For remote devices, it could be a combination of host names and port numbers. This is used for when such identifier of remote devices changes during a test. For example, when a remote device reboots, it may come back with a different serial number. This is NOT meant for switching the object to represent another device. We intentionally did not make it a regular setter of the serial property so people don't accidentally call this without understanding the consequences. Args: new_serial: string, the new serial number for the same device. Raises: DeviceError: tries to update serial when any service is running. """ new_serial = str(new_serial) if self.has_active_service: raise DeviceError( self, 'Cannot change device serial number when there is service running.' ) if self._debug_tag == self.serial: self._debug_tag = new_serial self._serial = new_serial self.adb.serial = new_serial self.fastboot.serial = new_serial
[ "def", "update_serial", "(", "self", ",", "new_serial", ")", ":", "new_serial", "=", "str", "(", "new_serial", ")", "if", "self", ".", "has_active_service", ":", "raise", "DeviceError", "(", "self", ",", "'Cannot change device serial number when there is service running.'", ")", "if", "self", ".", "_debug_tag", "==", "self", ".", "serial", ":", "self", ".", "_debug_tag", "=", "new_serial", "self", ".", "_serial", "=", "new_serial", "self", ".", "adb", ".", "serial", "=", "new_serial", "self", ".", "fastboot", ".", "serial", "=", "new_serial" ]
38.735294
0.002222
[ "def update_serial(self, new_serial):\n", " \"\"\"Updates the serial number of a device.\n", "\n", " The \"serial number\" used with adb's `-s` arg is not necessarily the\n", " actual serial number. For remote devices, it could be a combination of\n", " host names and port numbers.\n", "\n", " This is used for when such identifier of remote devices changes during\n", " a test. For example, when a remote device reboots, it may come back\n", " with a different serial number.\n", "\n", " This is NOT meant for switching the object to represent another device.\n", "\n", " We intentionally did not make it a regular setter of the serial\n", " property so people don't accidentally call this without understanding\n", " the consequences.\n", "\n", " Args:\n", " new_serial: string, the new serial number for the same device.\n", "\n", " Raises:\n", " DeviceError: tries to update serial when any service is running.\n", " \"\"\"\n", " new_serial = str(new_serial)\n", " if self.has_active_service:\n", " raise DeviceError(\n", " self,\n", " 'Cannot change device serial number when there is service running.'\n", " )\n", " if self._debug_tag == self.serial:\n", " self._debug_tag = new_serial\n", " self._serial = new_serial\n", " self.adb.serial = new_serial\n", " self.fastboot.serial = new_serial" ]
[ 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0, 0, 0, 0.024390243902439025 ]
34
0.001656
def find_store_dirs(cls): """ Returns the primary package directory and any additional ones from QUILT_PACKAGE_DIRS. """ store_dirs = [default_store_location()] extra_dirs_str = os.getenv('QUILT_PACKAGE_DIRS') if extra_dirs_str: store_dirs.extend(extra_dirs_str.split(':')) return store_dirs
[ "def", "find_store_dirs", "(", "cls", ")", ":", "store_dirs", "=", "[", "default_store_location", "(", ")", "]", "extra_dirs_str", "=", "os", ".", "getenv", "(", "'QUILT_PACKAGE_DIRS'", ")", "if", "extra_dirs_str", ":", "store_dirs", ".", "extend", "(", "extra_dirs_str", ".", "split", "(", "':'", ")", ")", "return", "store_dirs" ]
39
0.008357
[ "def find_store_dirs(cls):\n", " \"\"\"\n", " Returns the primary package directory and any additional ones from QUILT_PACKAGE_DIRS.\n", " \"\"\"\n", " store_dirs = [default_store_location()]\n", " extra_dirs_str = os.getenv('QUILT_PACKAGE_DIRS')\n", " if extra_dirs_str:\n", " store_dirs.extend(extra_dirs_str.split(':'))\n", " return store_dirs" ]
[ 0, 0.08333333333333333, 0.010526315789473684, 0, 0, 0, 0, 0, 0.04 ]
9
0.014873
def requests_admin(request, pk): """Table display of each request for a given product. Allows the given Page pk to refer to a direct parent of the ProductVariant model or be the ProductVariant model itself. This allows for the standard longclaw product modelling philosophy where ProductVariant refers to the actual product (in the case where there is only 1 variant) or to be variants of the product page. """ page = Page.objects.get(pk=pk).specific if hasattr(page, 'variants'): requests = ProductRequest.objects.filter( variant__in=page.variants.all() ) else: requests = ProductRequest.objects.filter(variant=page) return render( request, "productrequests/requests_admin.html", {'page': page, 'requests': requests} )
[ "def", "requests_admin", "(", "request", ",", "pk", ")", ":", "page", "=", "Page", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", ".", "specific", "if", "hasattr", "(", "page", ",", "'variants'", ")", ":", "requests", "=", "ProductRequest", ".", "objects", ".", "filter", "(", "variant__in", "=", "page", ".", "variants", ".", "all", "(", ")", ")", "else", ":", "requests", "=", "ProductRequest", ".", "objects", ".", "filter", "(", "variant", "=", "page", ")", "return", "render", "(", "request", ",", "\"productrequests/requests_admin.html\"", ",", "{", "'page'", ":", "page", ",", "'requests'", ":", "requests", "}", ")" ]
38.47619
0.001208
[ "def requests_admin(request, pk):\n", " \"\"\"Table display of each request for a given product.\n", "\n", " Allows the given Page pk to refer to a direct parent of\n", " the ProductVariant model or be the ProductVariant model itself.\n", " This allows for the standard longclaw product modelling philosophy where\n", " ProductVariant refers to the actual product (in the case where there is\n", " only 1 variant) or to be variants of the product page.\n", " \"\"\"\n", " page = Page.objects.get(pk=pk).specific\n", " if hasattr(page, 'variants'):\n", " requests = ProductRequest.objects.filter(\n", " variant__in=page.variants.all()\n", " )\n", " else:\n", " requests = ProductRequest.objects.filter(variant=page)\n", " return render(\n", " request,\n", " \"productrequests/requests_admin.html\",\n", " {'page': page, 'requests': requests}\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2 ]
21
0.009524
def get(self, master_id): """ Get a list of revisions by master ID :param master_id: :return: """ collection_name = self.request.headers.get("collection") self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name) limit = self.get_query_argument("limit", 2) add_current_revision = self.get_arg_value_as_type("addCurrent", "false") show_history = self.get_arg_value_as_type("showHistory", "false") objects_processed = [] if isinstance(limit, unicode): limit = int(limit) objects = yield self.client.find({"master_id": master_id, "processed": False}, orderby="toa", order_by_direction=1, page=0, limit=20) # If this is a document that should have a revision and doesn't we # orchestratioin creation of the first one if len(objects) == 0: new_revision = yield self.__lazy_migration(master_id) if not new_revision: return if show_history: objects_processed = yield self.client.find({"master_id": master_id, "processed": True}, orderby="toa", order_by_direction=-1, page=0, limit=limit) elif add_current_revision: objects_processed = yield self.client.find({"master_id": master_id, "processed": True}, orderby="toa", order_by_direction=-1, page=0, limit=1) if len(objects_processed) > 0: objects_processed = objects_processed[::-1] objects_processed[-1]["current"] = True objects = objects_processed + objects self.write({ "count": len(objects), "results": objects })
[ "def", "get", "(", "self", ",", "master_id", ")", ":", "collection_name", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"collection\"", ")", "self", ".", "client", "=", "BaseAsyncMotorDocument", "(", "\"%s_revisions\"", "%", "collection_name", ")", "limit", "=", "self", ".", "get_query_argument", "(", "\"limit\"", ",", "2", ")", "add_current_revision", "=", "self", ".", "get_arg_value_as_type", "(", "\"addCurrent\"", ",", "\"false\"", ")", "show_history", "=", "self", ".", "get_arg_value_as_type", "(", "\"showHistory\"", ",", "\"false\"", ")", "objects_processed", "=", "[", "]", "if", "isinstance", "(", "limit", ",", "unicode", ")", ":", "limit", "=", "int", "(", "limit", ")", "objects", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "False", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "1", ",", "page", "=", "0", ",", "limit", "=", "20", ")", "# If this is a document that should have a revision and doesn't we", "# orchestratioin creation of the first one", "if", "len", "(", "objects", ")", "==", "0", ":", "new_revision", "=", "yield", "self", ".", "__lazy_migration", "(", "master_id", ")", "if", "not", "new_revision", ":", "return", "if", "show_history", ":", "objects_processed", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "True", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "-", "1", ",", "page", "=", "0", ",", "limit", "=", "limit", ")", "elif", "add_current_revision", ":", "objects_processed", "=", "yield", "self", ".", "client", ".", "find", "(", "{", "\"master_id\"", ":", "master_id", ",", "\"processed\"", ":", "True", "}", ",", "orderby", "=", "\"toa\"", ",", "order_by_direction", "=", "-", "1", ",", "page", "=", "0", ",", "limit", "=", "1", ")", "if", "len", "(", "objects_processed", ")", ">", "0", ":", "objects_processed", "=", "objects_processed", "[", ":", ":", "-", "1", "]", "objects_processed", "[", "-", "1", "]", "[", "\"current\"", "]", "=", "True", "objects", "=", "objects_processed", "+", "objects", "self", ".", "write", "(", "{", "\"count\"", ":", "len", "(", "objects", ")", ",", "\"results\"", ":", "objects", "}", ")" ]
40.766667
0.000798
[ "def get(self, master_id):\n", " \"\"\"\n", " Get a list of revisions by master ID\n", "\n", " :param master_id:\n", " :return:\n", " \"\"\"\n", " collection_name = self.request.headers.get(\"collection\")\n", " self.client = BaseAsyncMotorDocument(\"%s_revisions\" % collection_name)\n", "\n", " limit = self.get_query_argument(\"limit\", 2)\n", " add_current_revision = self.get_arg_value_as_type(\"addCurrent\",\n", " \"false\")\n", " show_history = self.get_arg_value_as_type(\"showHistory\", \"false\")\n", "\n", " objects_processed = []\n", "\n", " if isinstance(limit, unicode):\n", " limit = int(limit)\n", "\n", " objects = yield self.client.find({\"master_id\": master_id,\n", " \"processed\": False},\n", " orderby=\"toa\",\n", " order_by_direction=1,\n", " page=0,\n", " limit=20)\n", "\n", " # If this is a document that should have a revision and doesn't we\n", " # orchestratioin creation of the first one\n", " if len(objects) == 0:\n", "\n", " new_revision = yield self.__lazy_migration(master_id)\n", " if not new_revision:\n", " return\n", "\n", " if show_history:\n", " objects_processed = yield self.client.find({\"master_id\": master_id,\n", " \"processed\": True},\n", " orderby=\"toa\",\n", " order_by_direction=-1,\n", " page=0,\n", " limit=limit)\n", "\n", " elif add_current_revision:\n", " objects_processed = yield self.client.find({\"master_id\": master_id,\n", " \"processed\": True},\n", " orderby=\"toa\",\n", " order_by_direction=-1,\n", " page=0,\n", " limit=1)\n", "\n", " if len(objects_processed) > 0:\n", " objects_processed = objects_processed[::-1]\n", " objects_processed[-1][\"current\"] = True\n", " objects = objects_processed + objects\n", "\n", " self.write({\n", " \"count\": len(objects),\n", " \"results\": objects\n", " })" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1 ]
60
0.003056
def now(years=0, days=0, hours=0, minutes=0, seconds=0): """ :param years: int delta of years from now :param days: int delta of days from now :param hours: int delta of hours from now :param minutes: int delta of minutes from now :param seconds: float delta of seconds from now :return: str of the now timestamp """ date_time = datetime.utcnow() date_time += timedelta(days=days + years * 365, hours=hours, minutes=minutes, seconds=seconds) return datetime_to_str(date_time)
[ "def", "now", "(", "years", "=", "0", ",", "days", "=", "0", ",", "hours", "=", "0", ",", "minutes", "=", "0", ",", "seconds", "=", "0", ")", ":", "date_time", "=", "datetime", ".", "utcnow", "(", ")", "date_time", "+=", "timedelta", "(", "days", "=", "days", "+", "years", "*", "365", ",", "hours", "=", "hours", ",", "minutes", "=", "minutes", ",", "seconds", "=", "seconds", ")", "return", "datetime_to_str", "(", "date_time", ")" ]
42.153846
0.001786
[ "def now(years=0, days=0, hours=0, minutes=0, seconds=0):\n", " \"\"\"\n", " :param years: int delta of years from now\n", " :param days: int delta of days from now\n", " :param hours: int delta of hours from now\n", " :param minutes: int delta of minutes from now\n", " :param seconds: float delta of seconds from now\n", " :return: str of the now timestamp\n", " \"\"\"\n", " date_time = datetime.utcnow()\n", " date_time += timedelta(days=days + years * 365, hours=hours,\n", " minutes=minutes, seconds=seconds)\n", " return datetime_to_str(date_time)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02702702702702703 ]
13
0.002079
def set_tile(self, codepoint: int, tile: np.array) -> None: """Upload a tile into this array. The tile can be in 32-bit color (height, width, rgba), or grey-scale (height, width). The tile should have a dtype of ``np.uint8``. This data may need to be sent to graphics card memory, this is a slow operation. """ tile = np.ascontiguousarray(tile, dtype=np.uint8) if tile.shape == self.tile_shape: full_tile = np.empty(self.tile_shape + (4,), dtype=np.uint8) full_tile[:, :, :3] = 255 full_tile[:, :, 3] = tile return self.set_tile(codepoint, full_tile) required = self.tile_shape + (4,) if tile.shape != required: raise ValueError( "Tile shape must be %r or %r, got %r." % (required, self.tile_shape, tile.shape) ) lib.TCOD_tileset_set_tile_( self._tileset_p, codepoint, ffi.cast("struct TCOD_ColorRGBA*", tile.ctypes.data), )
[ "def", "set_tile", "(", "self", ",", "codepoint", ":", "int", ",", "tile", ":", "np", ".", "array", ")", "->", "None", ":", "tile", "=", "np", ".", "ascontiguousarray", "(", "tile", ",", "dtype", "=", "np", ".", "uint8", ")", "if", "tile", ".", "shape", "==", "self", ".", "tile_shape", ":", "full_tile", "=", "np", ".", "empty", "(", "self", ".", "tile_shape", "+", "(", "4", ",", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "full_tile", "[", ":", ",", ":", ",", ":", "3", "]", "=", "255", "full_tile", "[", ":", ",", ":", ",", "3", "]", "=", "tile", "return", "self", ".", "set_tile", "(", "codepoint", ",", "full_tile", ")", "required", "=", "self", ".", "tile_shape", "+", "(", "4", ",", ")", "if", "tile", ".", "shape", "!=", "required", ":", "raise", "ValueError", "(", "\"Tile shape must be %r or %r, got %r.\"", "%", "(", "required", ",", "self", ".", "tile_shape", ",", "tile", ".", "shape", ")", ")", "lib", ".", "TCOD_tileset_set_tile_", "(", "self", ".", "_tileset_p", ",", "codepoint", ",", "ffi", ".", "cast", "(", "\"struct TCOD_ColorRGBA*\"", ",", "tile", ".", "ctypes", ".", "data", ")", ",", ")" ]
39.923077
0.001881
[ "def set_tile(self, codepoint: int, tile: np.array) -> None:\n", " \"\"\"Upload a tile into this array.\n", "\n", " The tile can be in 32-bit color (height, width, rgba), or grey-scale\n", " (height, width). The tile should have a dtype of ``np.uint8``.\n", "\n", " This data may need to be sent to graphics card memory, this is a slow\n", " operation.\n", " \"\"\"\n", " tile = np.ascontiguousarray(tile, dtype=np.uint8)\n", " if tile.shape == self.tile_shape:\n", " full_tile = np.empty(self.tile_shape + (4,), dtype=np.uint8)\n", " full_tile[:, :, :3] = 255\n", " full_tile[:, :, 3] = tile\n", " return self.set_tile(codepoint, full_tile)\n", " required = self.tile_shape + (4,)\n", " if tile.shape != required:\n", " raise ValueError(\n", " \"Tile shape must be %r or %r, got %r.\"\n", " % (required, self.tile_shape, tile.shape)\n", " )\n", " lib.TCOD_tileset_set_tile_(\n", " self._tileset_p,\n", " codepoint,\n", " ffi.cast(\"struct TCOD_ColorRGBA*\", tile.ctypes.data),\n", " )" ]
[ 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111 ]
26
0.005189
def _SMOTE(T, N, k, h = 1.0): """ Returns (N/100) * n_minority_samples synthetic minority samples. Parameters ---------- T : array-like, shape = [n_minority_samples, n_features] Holds the minority samples N : percetange of new synthetic samples: n_synthetic_samples = N/100 * n_minority_samples. Can be < 100. k : int. Number of nearest neighbours. Returns ------- S : Synthetic samples. array, shape = [(N/100) * n_minority_samples, n_features]. """ n_minority_samples, n_features = T.shape if N < 100: #create synthetic samples only for a subset of T. #TODO: select random minortiy samples N = 100 pass if (N % 100) != 0: raise ValueError("N must be < 100 or multiple of 100") N = N/100 n_synthetic_samples = N * n_minority_samples S = np.zeros(shape=(n_synthetic_samples, n_features)) #Learn nearest neighbours neigh = NearestNeighbors(n_neighbors = k) neigh.fit(T) #Calculate synthetic samples for i in range(n_minority_samples): nn = neigh.kneighbors(T[i], return_distance=False) for n in range(int(N)): nn_index = choice(nn[0]) #NOTE: nn includes T[i], we don't want to select it while nn_index == i: nn_index = choice(nn[0]) dif = T[nn_index] - T[i] gap = np.random.uniform(low = 0.0, high = h) S[n + i * N, :] = T[i,:] + gap * dif[:] return S
[ "def", "_SMOTE", "(", "T", ",", "N", ",", "k", ",", "h", "=", "1.0", ")", ":", "n_minority_samples", ",", "n_features", "=", "T", ".", "shape", "if", "N", "<", "100", ":", "#create synthetic samples only for a subset of T.", "#TODO: select random minortiy samples", "N", "=", "100", "pass", "if", "(", "N", "%", "100", ")", "!=", "0", ":", "raise", "ValueError", "(", "\"N must be < 100 or multiple of 100\"", ")", "N", "=", "N", "/", "100", "n_synthetic_samples", "=", "N", "*", "n_minority_samples", "S", "=", "np", ".", "zeros", "(", "shape", "=", "(", "n_synthetic_samples", ",", "n_features", ")", ")", "#Learn nearest neighbours", "neigh", "=", "NearestNeighbors", "(", "n_neighbors", "=", "k", ")", "neigh", ".", "fit", "(", "T", ")", "#Calculate synthetic samples", "for", "i", "in", "range", "(", "n_minority_samples", ")", ":", "nn", "=", "neigh", ".", "kneighbors", "(", "T", "[", "i", "]", ",", "return_distance", "=", "False", ")", "for", "n", "in", "range", "(", "int", "(", "N", ")", ")", ":", "nn_index", "=", "choice", "(", "nn", "[", "0", "]", ")", "#NOTE: nn includes T[i], we don't want to select it", "while", "nn_index", "==", "i", ":", "nn_index", "=", "choice", "(", "nn", "[", "0", "]", ")", "dif", "=", "T", "[", "nn_index", "]", "-", "T", "[", "i", "]", "gap", "=", "np", ".", "random", ".", "uniform", "(", "low", "=", "0.0", ",", "high", "=", "h", ")", "S", "[", "n", "+", "i", "*", "N", ",", ":", "]", "=", "T", "[", "i", ",", ":", "]", "+", "gap", "*", "dif", "[", ":", "]", "return", "S" ]
29.4
0.009875
[ "def _SMOTE(T, N, k, h = 1.0):\n", " \"\"\"\n", " Returns (N/100) * n_minority_samples synthetic minority samples.\n", "\n", " Parameters\n", " ----------\n", " T : array-like, shape = [n_minority_samples, n_features]\n", " Holds the minority samples\n", " N : percetange of new synthetic samples:\n", " n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.\n", " k : int. Number of nearest neighbours.\n", "\n", " Returns\n", " -------\n", " S : Synthetic samples. array,\n", " shape = [(N/100) * n_minority_samples, n_features].\n", " \"\"\"\n", " n_minority_samples, n_features = T.shape\n", "\n", " if N < 100:\n", " #create synthetic samples only for a subset of T.\n", " #TODO: select random minortiy samples\n", " N = 100\n", " pass\n", "\n", " if (N % 100) != 0:\n", " raise ValueError(\"N must be < 100 or multiple of 100\")\n", "\n", " N = N/100\n", " n_synthetic_samples = N * n_minority_samples\n", " S = np.zeros(shape=(n_synthetic_samples, n_features))\n", "\n", " #Learn nearest neighbours\n", " neigh = NearestNeighbors(n_neighbors = k)\n", " neigh.fit(T)\n", "\n", " #Calculate synthetic samples\n", " for i in range(n_minority_samples):\n", " nn = neigh.kneighbors(T[i], return_distance=False)\n", " for n in range(int(N)):\n", " nn_index = choice(nn[0])\n", " #NOTE: nn includes T[i], we don't want to select it\n", " while nn_index == i:\n", " nn_index = choice(nn[0])\n", "\n", " dif = T[nn_index] - T[i]\n", " gap = np.random.uniform(low = 0.0, high = h)\n", " S[n + i * N, :] = T[i,:] + gap * dif[:]\n", "\n", " return S" ]
[ 0.06666666666666667, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017241379310344827, 0.021739130434782608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0.043478260869565216, 0, 0, 0.030303030303030304, 0, 0, 0, 0, 0.015625, 0, 0, 0, 0, 0.07017543859649122, 0.019230769230769232, 0, 0.08333333333333333 ]
50
0.008023
def infographic_people_section_notes_extractor( impact_report, component_metadata): """Extracting notes for people section in the infographic. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.2 """ extra_args = component_metadata.extra_args provenance = impact_report.impact_function.provenance hazard_keywords = provenance['hazard_keywords'] exposure_keywords = provenance['exposure_keywords'] context = {} context['notes'] = [] note = { 'title': None, 'description': resolve_from_dictionary(extra_args, 'extra_note'), 'citations': None } context['notes'].append(note) concept_keys = ['affected_people', 'displaced_people'] for key in concept_keys: note = { 'title': concepts[key].get('name'), 'description': concepts[key].get('description'), 'citations': concepts[key].get('citations')[0]['text'] } context['notes'].append(note) hazard_classification = definition( active_classification(hazard_keywords, exposure_keywords['exposure'])) # generate rate description displacement_rates_note_format = resolve_from_dictionary( extra_args, 'hazard_displacement_rates_note_format') displacement_rates_note = [] for hazard_class in hazard_classification['classes']: hazard_class['classification_unit'] = ( hazard_classification['classification_unit']) displacement_rates_note.append( displacement_rates_note_format.format(**hazard_class)) rate_description = ', '.join(displacement_rates_note) note = { 'title': concepts['displacement_rate'].get('name'), 'description': rate_description, 'citations': concepts['displacement_rate'].get('citations')[0]['text'] } context['notes'].append(note) return context
[ "def", "infographic_people_section_notes_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "extra_args", "=", "component_metadata", ".", "extra_args", "provenance", "=", "impact_report", ".", "impact_function", ".", "provenance", "hazard_keywords", "=", "provenance", "[", "'hazard_keywords'", "]", "exposure_keywords", "=", "provenance", "[", "'exposure_keywords'", "]", "context", "=", "{", "}", "context", "[", "'notes'", "]", "=", "[", "]", "note", "=", "{", "'title'", ":", "None", ",", "'description'", ":", "resolve_from_dictionary", "(", "extra_args", ",", "'extra_note'", ")", ",", "'citations'", ":", "None", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "concept_keys", "=", "[", "'affected_people'", ",", "'displaced_people'", "]", "for", "key", "in", "concept_keys", ":", "note", "=", "{", "'title'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'name'", ")", ",", "'description'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'description'", ")", ",", "'citations'", ":", "concepts", "[", "key", "]", ".", "get", "(", "'citations'", ")", "[", "0", "]", "[", "'text'", "]", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "hazard_classification", "=", "definition", "(", "active_classification", "(", "hazard_keywords", ",", "exposure_keywords", "[", "'exposure'", "]", ")", ")", "# generate rate description", "displacement_rates_note_format", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'hazard_displacement_rates_note_format'", ")", "displacement_rates_note", "=", "[", "]", "for", "hazard_class", "in", "hazard_classification", "[", "'classes'", "]", ":", "hazard_class", "[", "'classification_unit'", "]", "=", "(", "hazard_classification", "[", "'classification_unit'", "]", ")", "displacement_rates_note", ".", "append", "(", "displacement_rates_note_format", ".", "format", "(", "*", "*", "hazard_class", ")", ")", "rate_description", "=", "', '", ".", "join", "(", "displacement_rates_note", ")", "note", "=", "{", "'title'", ":", "concepts", "[", "'displacement_rate'", "]", ".", "get", "(", "'name'", ")", ",", "'description'", ":", "rate_description", ",", "'citations'", ":", "concepts", "[", "'displacement_rate'", "]", ".", "get", "(", "'citations'", ")", "[", "0", "]", "[", "'text'", "]", "}", "context", "[", "'notes'", "]", ".", "append", "(", "note", ")", "return", "context" ]
34
0.000433
[ "def infographic_people_section_notes_extractor(\n", " impact_report, component_metadata):\n", " \"\"\"Extracting notes for people section in the infographic.\n", "\n", " :param impact_report: the impact report that acts as a proxy to fetch\n", " all the data that extractor needed\n", " :type impact_report: safe.report.impact_report.ImpactReport\n", "\n", " :param component_metadata: the component metadata. Used to obtain\n", " information about the component we want to render\n", " :type component_metadata: safe.report.report_metadata.\n", " ReportComponentsMetadata\n", "\n", " :return: context for rendering phase\n", " :rtype: dict\n", "\n", " .. versionadded:: 4.2\n", " \"\"\"\n", " extra_args = component_metadata.extra_args\n", " provenance = impact_report.impact_function.provenance\n", " hazard_keywords = provenance['hazard_keywords']\n", " exposure_keywords = provenance['exposure_keywords']\n", "\n", " context = {}\n", " context['notes'] = []\n", "\n", " note = {\n", " 'title': None,\n", " 'description': resolve_from_dictionary(extra_args, 'extra_note'),\n", " 'citations': None\n", " }\n", " context['notes'].append(note)\n", "\n", " concept_keys = ['affected_people', 'displaced_people']\n", " for key in concept_keys:\n", " note = {\n", " 'title': concepts[key].get('name'),\n", " 'description': concepts[key].get('description'),\n", " 'citations': concepts[key].get('citations')[0]['text']\n", " }\n", " context['notes'].append(note)\n", "\n", " hazard_classification = definition(\n", " active_classification(hazard_keywords, exposure_keywords['exposure']))\n", "\n", " # generate rate description\n", " displacement_rates_note_format = resolve_from_dictionary(\n", " extra_args, 'hazard_displacement_rates_note_format')\n", " displacement_rates_note = []\n", " for hazard_class in hazard_classification['classes']:\n", " hazard_class['classification_unit'] = (\n", " hazard_classification['classification_unit'])\n", " displacement_rates_note.append(\n", " displacement_rates_note_format.format(**hazard_class))\n", "\n", " rate_description = ', '.join(displacement_rates_note)\n", "\n", " note = {\n", " 'title': concepts['displacement_rate'].get('name'),\n", " 'description': rate_description,\n", " 'citations': concepts['displacement_rate'].get('citations')[0]['text']\n", " }\n", "\n", " context['notes'].append(note)\n", "\n", " return context" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
66
0.000842
def _filter(self, value): """ Predicate used to exclude, False, or include, True, a computed value. """ if self.ignores and value in self.ignores: return False return True
[ "def", "_filter", "(", "self", ",", "value", ")", ":", "if", "self", ".", "ignores", "and", "value", "in", "self", ".", "ignores", ":", "return", "False", "return", "True" ]
31
0.008969
[ "def _filter(self, value):\n", " \"\"\"\n", " Predicate used to exclude, False, or include, True, a computed value.\n", " \"\"\"\n", " if self.ignores and value in self.ignores:\n", " return False\n", " return True" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0.05263157894736842 ]
7
0.019424
def x_build_action( self, node ): ''' Given a build action log, process into the corresponding test log and specific test log sub-part. ''' action_node = node name = self.get_child(action_node,tag='name') if name: name = self.get_data(name) #~ Based on the action, we decide what sub-section the log #~ should go into. action_type = None if re.match('[^%]+%[^.]+[.](compile)',name): action_type = 'compile' elif re.match('[^%]+%[^.]+[.](link|archive)',name): action_type = 'link' elif re.match('[^%]+%testing[.](capture-output)',name): action_type = 'run' elif re.match('[^%]+%testing[.](expect-failure|expect-success)',name): action_type = 'result' else: # TODO: Enable to see what other actions can be included in the test results. # action_type = None action_type = 'other' #~ print "+ [%s] %s %s :: %s" %(action_type,name,'','') if action_type: #~ Get the corresponding test. (target,test) = self.get_test(action_node,type=action_type) #~ Skip action that have no corresponding test as they are #~ regular build actions and don't need to show up in the #~ regression results. if not test: ##print "??? [%s] %s %s :: %s" %(action_type,name,target,test) return None ##print "+++ [%s] %s %s :: %s" %(action_type,name,target,test) #~ Collect some basic info about the action. action = { 'command' : self.get_action_command(action_node,action_type), 'output' : self.get_action_output(action_node,action_type), 'info' : self.get_action_info(action_node,action_type) } #~ For the test result status we find the appropriate node #~ based on the type of test. Then adjust the result status #~ accordingly. This makes the result status reflect the #~ expectation as the result pages post processing does not #~ account for this inversion. action['type'] = action_type if action_type == 'result': if re.match(r'^compile',test['test-type']): action['type'] = 'compile' elif re.match(r'^link',test['test-type']): action['type'] = 'link' elif re.match(r'^run',test['test-type']): action['type'] = 'run' #~ The result sub-part we will add this result to. if action_node.getAttribute('status') == '0': action['result'] = 'succeed' else: action['result'] = 'fail' # Add the action to the test. test['actions'].append(action) # Set the test result if this is the result action for the test. if action_type == 'result': test['result'] = action['result'] return None
[ "def", "x_build_action", "(", "self", ",", "node", ")", ":", "action_node", "=", "node", "name", "=", "self", ".", "get_child", "(", "action_node", ",", "tag", "=", "'name'", ")", "if", "name", ":", "name", "=", "self", ".", "get_data", "(", "name", ")", "#~ Based on the action, we decide what sub-section the log", "#~ should go into.", "action_type", "=", "None", "if", "re", ".", "match", "(", "'[^%]+%[^.]+[.](compile)'", ",", "name", ")", ":", "action_type", "=", "'compile'", "elif", "re", ".", "match", "(", "'[^%]+%[^.]+[.](link|archive)'", ",", "name", ")", ":", "action_type", "=", "'link'", "elif", "re", ".", "match", "(", "'[^%]+%testing[.](capture-output)'", ",", "name", ")", ":", "action_type", "=", "'run'", "elif", "re", ".", "match", "(", "'[^%]+%testing[.](expect-failure|expect-success)'", ",", "name", ")", ":", "action_type", "=", "'result'", "else", ":", "# TODO: Enable to see what other actions can be included in the test results.", "# action_type = None", "action_type", "=", "'other'", "#~ print \"+ [%s] %s %s :: %s\" %(action_type,name,'','')", "if", "action_type", ":", "#~ Get the corresponding test.", "(", "target", ",", "test", ")", "=", "self", ".", "get_test", "(", "action_node", ",", "type", "=", "action_type", ")", "#~ Skip action that have no corresponding test as they are", "#~ regular build actions and don't need to show up in the", "#~ regression results.", "if", "not", "test", ":", "##print \"??? [%s] %s %s :: %s\" %(action_type,name,target,test)", "return", "None", "##print \"+++ [%s] %s %s :: %s\" %(action_type,name,target,test)", "#~ Collect some basic info about the action.", "action", "=", "{", "'command'", ":", "self", ".", "get_action_command", "(", "action_node", ",", "action_type", ")", ",", "'output'", ":", "self", ".", "get_action_output", "(", "action_node", ",", "action_type", ")", ",", "'info'", ":", "self", ".", "get_action_info", "(", "action_node", ",", "action_type", ")", "}", "#~ For the test result status we find the appropriate node", "#~ based on the type of test. Then adjust the result status", "#~ accordingly. This makes the result status reflect the", "#~ expectation as the result pages post processing does not", "#~ account for this inversion.", "action", "[", "'type'", "]", "=", "action_type", "if", "action_type", "==", "'result'", ":", "if", "re", ".", "match", "(", "r'^compile'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'compile'", "elif", "re", ".", "match", "(", "r'^link'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'link'", "elif", "re", ".", "match", "(", "r'^run'", ",", "test", "[", "'test-type'", "]", ")", ":", "action", "[", "'type'", "]", "=", "'run'", "#~ The result sub-part we will add this result to.", "if", "action_node", ".", "getAttribute", "(", "'status'", ")", "==", "'0'", ":", "action", "[", "'result'", "]", "=", "'succeed'", "else", ":", "action", "[", "'result'", "]", "=", "'fail'", "# Add the action to the test.", "test", "[", "'actions'", "]", ".", "append", "(", "action", ")", "# Set the test result if this is the result action for the test.", "if", "action_type", "==", "'result'", ":", "test", "[", "'result'", "]", "=", "action", "[", "'result'", "]", "return", "None" ]
50.4
0.012275
[ "def x_build_action( self, node ):\n", " '''\n", " Given a build action log, process into the corresponding test log and\n", " specific test log sub-part.\n", " '''\n", " action_node = node\n", " name = self.get_child(action_node,tag='name')\n", " if name:\n", " name = self.get_data(name)\n", " #~ Based on the action, we decide what sub-section the log\n", " #~ should go into.\n", " action_type = None\n", " if re.match('[^%]+%[^.]+[.](compile)',name):\n", " action_type = 'compile'\n", " elif re.match('[^%]+%[^.]+[.](link|archive)',name):\n", " action_type = 'link'\n", " elif re.match('[^%]+%testing[.](capture-output)',name):\n", " action_type = 'run'\n", " elif re.match('[^%]+%testing[.](expect-failure|expect-success)',name):\n", " action_type = 'result'\n", " else:\n", " # TODO: Enable to see what other actions can be included in the test results.\n", " # action_type = None\n", " action_type = 'other'\n", " #~ print \"+ [%s] %s %s :: %s\" %(action_type,name,'','')\n", " if action_type:\n", " #~ Get the corresponding test.\n", " (target,test) = self.get_test(action_node,type=action_type)\n", " #~ Skip action that have no corresponding test as they are\n", " #~ regular build actions and don't need to show up in the\n", " #~ regression results.\n", " if not test:\n", " ##print \"??? [%s] %s %s :: %s\" %(action_type,name,target,test)\n", " return None\n", " ##print \"+++ [%s] %s %s :: %s\" %(action_type,name,target,test)\n", " #~ Collect some basic info about the action.\n", " action = {\n", " 'command' : self.get_action_command(action_node,action_type),\n", " 'output' : self.get_action_output(action_node,action_type),\n", " 'info' : self.get_action_info(action_node,action_type)\n", " }\n", " #~ For the test result status we find the appropriate node\n", " #~ based on the type of test. Then adjust the result status\n", " #~ accordingly. This makes the result status reflect the\n", " #~ expectation as the result pages post processing does not\n", " #~ account for this inversion.\n", " action['type'] = action_type\n", " if action_type == 'result':\n", " if re.match(r'^compile',test['test-type']):\n", " action['type'] = 'compile'\n", " elif re.match(r'^link',test['test-type']):\n", " action['type'] = 'link'\n", " elif re.match(r'^run',test['test-type']):\n", " action['type'] = 'run'\n", " #~ The result sub-part we will add this result to.\n", " if action_node.getAttribute('status') == '0':\n", " action['result'] = 'succeed'\n", " else:\n", " action['result'] = 'fail'\n", " # Add the action to the test.\n", " test['actions'].append(action)\n", " # Set the test result if this is the result action for the test.\n", " if action_type == 'result':\n", " test['result'] = action['result']\n", " return None" ]
[ 0.058823529411764705, 0.08333333333333333, 0, 0, 0, 0, 0.018518518518518517, 0, 0, 0.014084507042253521, 0.03225806451612903, 0, 0.017543859649122806, 0, 0.015625, 0, 0.014705882352941176, 0, 0.024096385542168676, 0, 0, 0.010638297872340425, 0, 0, 0.014285714285714285, 0, 0.02127659574468085, 0.02631578947368421, 0.013333333333333334, 0.013513513513513514, 0.02564102564102564, 0, 0.024096385542168676, 0, 0.012658227848101266, 0.01639344262295082, 0, 0.036585365853658534, 0.025, 0.02666666666666667, 0, 0.013333333333333334, 0.013157894736842105, 0.0136986301369863, 0.013157894736842105, 0.02127659574468085, 0, 0, 0.015625, 0, 0.015873015873015872, 0, 0.016129032258064516, 0, 0.014925373134328358, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.05263157894736842 ]
65
0.011501
def generateBatches(tasks, givens): """ A function to generate a batch of commands to run in a specific order as to meet all the dependencies for each command. For example, the commands with no dependencies are run first, and the commands with the most deep dependencies are run last """ _removeGivensFromTasks(tasks, givens) batches = [] while tasks: batch = set() for task, dependencies in tasks.items(): if not dependencies: batch.add(task) if not batch: _batchErrorProcessing(tasks) for task in batch: del tasks[task] for task, dependencies in tasks.items(): for item in batch: if item in dependencies: tasks[task].remove(item) batches.append(batch) return batches
[ "def", "generateBatches", "(", "tasks", ",", "givens", ")", ":", "_removeGivensFromTasks", "(", "tasks", ",", "givens", ")", "batches", "=", "[", "]", "while", "tasks", ":", "batch", "=", "set", "(", ")", "for", "task", ",", "dependencies", "in", "tasks", ".", "items", "(", ")", ":", "if", "not", "dependencies", ":", "batch", ".", "add", "(", "task", ")", "if", "not", "batch", ":", "_batchErrorProcessing", "(", "tasks", ")", "for", "task", "in", "batch", ":", "del", "tasks", "[", "task", "]", "for", "task", ",", "dependencies", "in", "tasks", ".", "items", "(", ")", ":", "for", "item", "in", "batch", ":", "if", "item", "in", "dependencies", ":", "tasks", "[", "task", "]", ".", "remove", "(", "item", ")", "batches", ".", "append", "(", "batch", ")", "return", "batches" ]
28.724138
0.001161
[ "def generateBatches(tasks, givens):\n", " \"\"\"\n", " A function to generate a batch of commands to run in a specific order as to\n", " meet all the dependencies for each command. For example, the commands with\n", " no dependencies are run first, and the commands with the most deep\n", " dependencies are run last\n", " \"\"\"\n", " _removeGivensFromTasks(tasks, givens)\n", "\n", " batches = []\n", " while tasks:\n", " batch = set()\n", " for task, dependencies in tasks.items():\n", " if not dependencies:\n", " batch.add(task)\n", "\n", " if not batch:\n", " _batchErrorProcessing(tasks)\n", "\n", " for task in batch:\n", " del tasks[task]\n", "\n", " for task, dependencies in tasks.items():\n", " for item in batch:\n", " if item in dependencies:\n", " tasks[task].remove(item)\n", "\n", " batches.append(batch)\n", " return batches" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
29
0.001916
def _extract_object_params(self, name): """ Extract object params, return as dict """ params = self.request.query_params.lists() params_map = {} prefix = name[:-1] offset = len(prefix) for name, value in params: if name.startswith(prefix): if name.endswith('}'): name = name[offset:-1] elif name.endswith('}[]'): # strip off trailing [] # this fixes an Ember queryparams issue name = name[offset:-3] else: # malformed argument like: # filter{foo=bar raise exceptions.ParseError( '"%s" is not a well-formed filter key.' % name ) else: continue params_map[name] = value return params_map
[ "def", "_extract_object_params", "(", "self", ",", "name", ")", ":", "params", "=", "self", ".", "request", ".", "query_params", ".", "lists", "(", ")", "params_map", "=", "{", "}", "prefix", "=", "name", "[", ":", "-", "1", "]", "offset", "=", "len", "(", "prefix", ")", "for", "name", ",", "value", "in", "params", ":", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "if", "name", ".", "endswith", "(", "'}'", ")", ":", "name", "=", "name", "[", "offset", ":", "-", "1", "]", "elif", "name", ".", "endswith", "(", "'}[]'", ")", ":", "# strip off trailing []", "# this fixes an Ember queryparams issue", "name", "=", "name", "[", "offset", ":", "-", "3", "]", "else", ":", "# malformed argument like:", "# filter{foo=bar", "raise", "exceptions", ".", "ParseError", "(", "'\"%s\" is not a well-formed filter key.'", "%", "name", ")", "else", ":", "continue", "params_map", "[", "name", "]", "=", "value", "return", "params_map" ]
32.714286
0.002121
[ "def _extract_object_params(self, name):\n", " \"\"\"\n", " Extract object params, return as dict\n", " \"\"\"\n", "\n", " params = self.request.query_params.lists()\n", " params_map = {}\n", " prefix = name[:-1]\n", " offset = len(prefix)\n", " for name, value in params:\n", " if name.startswith(prefix):\n", " if name.endswith('}'):\n", " name = name[offset:-1]\n", " elif name.endswith('}[]'):\n", " # strip off trailing []\n", " # this fixes an Ember queryparams issue\n", " name = name[offset:-3]\n", " else:\n", " # malformed argument like:\n", " # filter{foo=bar\n", " raise exceptions.ParseError(\n", " '\"%s\" is not a well-formed filter key.' % name\n", " )\n", " else:\n", " continue\n", " params_map[name] = value\n", "\n", " return params_map" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04 ]
28
0.004405
def load_config(self, conf_path): """ Load config from an ``andes.conf`` file. This function creates a ``configparser.ConfigParser`` object to read the specified conf file and calls the ``load_config`` function of the config instances of the system and the routines. Parameters ---------- conf_path : None or str Path to the Andes config file. If ``None``, the function body will not run. Returns ------- None """ if conf_path is None: return conf = configparser.ConfigParser() conf.read(conf_path) self.config.load_config(conf) for r in routines.__all__: self.__dict__[r.lower()].config.load_config(conf) logger.debug('Loaded config file from {}.'.format(conf_path))
[ "def", "load_config", "(", "self", ",", "conf_path", ")", ":", "if", "conf_path", "is", "None", ":", "return", "conf", "=", "configparser", ".", "ConfigParser", "(", ")", "conf", ".", "read", "(", "conf_path", ")", "self", ".", "config", ".", "load_config", "(", "conf", ")", "for", "r", "in", "routines", ".", "__all__", ":", "self", ".", "__dict__", "[", "r", ".", "lower", "(", ")", "]", ".", "config", ".", "load_config", "(", "conf", ")", "logger", ".", "debug", "(", "'Loaded config file from {}.'", ".", "format", "(", "conf_path", ")", ")" ]
28.862069
0.002312
[ "def load_config(self, conf_path):\n", " \"\"\"\n", " Load config from an ``andes.conf`` file.\n", "\n", " This function creates a ``configparser.ConfigParser`` object to read\n", " the specified conf file and calls the ``load_config`` function of the\n", " config instances of the system and the routines.\n", "\n", " Parameters\n", " ----------\n", " conf_path : None or str\n", " Path to the Andes config file. If ``None``, the function body\n", " will not run.\n", "\n", " Returns\n", " -------\n", " None\n", " \"\"\"\n", " if conf_path is None:\n", " return\n", "\n", " conf = configparser.ConfigParser()\n", " conf.read(conf_path)\n", "\n", " self.config.load_config(conf)\n", " for r in routines.__all__:\n", " self.__dict__[r.lower()].config.load_config(conf)\n", "\n", " logger.debug('Loaded config file from {}.'.format(conf_path))" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406 ]
29
0.003373
def format_currency(number, currency, format, locale=babel.numbers.LC_NUMERIC, force_frac=None, format_type='standard'): """Same as ``babel.numbers.format_currency``, but has ``force_frac`` argument instead of ``currency_digits``. If the ``force_frac`` argument is given, the argument is passed down to ``pattern.apply``. """ locale = babel.core.Locale.parse(locale) if format: pattern = babel.numbers.parse_pattern(format) else: try: pattern = locale.currency_formats[format_type] except KeyError: raise babel.numbers.UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) if force_frac is None: fractions = babel.core.get_global('currency_fractions') try: digits = fractions[currency][0] except KeyError: digits = fractions['DEFAULT'][0] frac = (digits, digits) else: frac = force_frac return pattern.apply(number, locale, currency=currency, force_frac=frac)
[ "def", "format_currency", "(", "number", ",", "currency", ",", "format", ",", "locale", "=", "babel", ".", "numbers", ".", "LC_NUMERIC", ",", "force_frac", "=", "None", ",", "format_type", "=", "'standard'", ")", ":", "locale", "=", "babel", ".", "core", ".", "Locale", ".", "parse", "(", "locale", ")", "if", "format", ":", "pattern", "=", "babel", ".", "numbers", ".", "parse_pattern", "(", "format", ")", "else", ":", "try", ":", "pattern", "=", "locale", ".", "currency_formats", "[", "format_type", "]", "except", "KeyError", ":", "raise", "babel", ".", "numbers", ".", "UnknownCurrencyFormatError", "(", "\"%r is not a known currency format type\"", "%", "format_type", ")", "if", "force_frac", "is", "None", ":", "fractions", "=", "babel", ".", "core", ".", "get_global", "(", "'currency_fractions'", ")", "try", ":", "digits", "=", "fractions", "[", "currency", "]", "[", "0", "]", "except", "KeyError", ":", "digits", "=", "fractions", "[", "'DEFAULT'", "]", "[", "0", "]", "frac", "=", "(", "digits", ",", "digits", ")", "else", ":", "frac", "=", "force_frac", "return", "pattern", ".", "apply", "(", "number", ",", "locale", ",", "currency", "=", "currency", ",", "force_frac", "=", "frac", ")" ]
39.111111
0.000924
[ "def format_currency(number, currency, format, locale=babel.numbers.LC_NUMERIC,\n", " force_frac=None, format_type='standard'):\n", " \"\"\"Same as ``babel.numbers.format_currency``, but has ``force_frac``\n", " argument instead of ``currency_digits``.\n", "\n", " If the ``force_frac`` argument is given, the argument is passed down to\n", " ``pattern.apply``.\n", " \"\"\"\n", " locale = babel.core.Locale.parse(locale)\n", " if format:\n", " pattern = babel.numbers.parse_pattern(format)\n", " else:\n", " try:\n", " pattern = locale.currency_formats[format_type]\n", " except KeyError:\n", " raise babel.numbers.UnknownCurrencyFormatError(\n", " \"%r is not a known currency format type\" % format_type)\n", " if force_frac is None:\n", " fractions = babel.core.get_global('currency_fractions')\n", " try:\n", " digits = fractions[currency][0]\n", " except KeyError:\n", " digits = fractions['DEFAULT'][0]\n", " frac = (digits, digits)\n", " else:\n", " frac = force_frac\n", " return pattern.apply(number, locale, currency=currency, force_frac=frac)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013157894736842105 ]
27
0.000487
def build_a(self): """Calculates the total absorption from water, phytoplankton and CDOM a = awater + acdom + aphi """ lg.info('Building total absorption') self.a = self.a_water + self.a_cdom + self.a_phi
[ "def", "build_a", "(", "self", ")", ":", "lg", ".", "info", "(", "'Building total absorption'", ")", "self", ".", "a", "=", "self", ".", "a_water", "+", "self", ".", "a_cdom", "+", "self", ".", "a_phi" ]
34.142857
0.008163
[ "def build_a(self):\n", " \"\"\"Calculates the total absorption from water, phytoplankton and CDOM\n", "\n", " a = awater + acdom + aphi\n", " \"\"\"\n", " lg.info('Building total absorption')\n", " self.a = self.a_water + self.a_cdom + self.a_phi" ]
[ 0, 0.01282051282051282, 0, 0, 0, 0, 0.017857142857142856 ]
7
0.004383
def load_signal(signal_handler, get_header=False): """ ----- Brief ----- Function that returns a dictionary with the data contained inside 'signal_name' file (stored in the biosignalsnotebooks signal samples directory). ----------- Description ----------- Biosignalsnotebooks library provides data samples in order to the users that are new to biosignals data handling to have a place to start without the need to acquire new data. This sample files are stored in the folder _signal_samples inside the library. This function returns the data from the selected sample. ---------- Parameters ---------- signal_name : file name or url Name that identifies the signal sample to be loaded or a url. Possible values: [ecg_4000_Hz] ================= ============== Signal Type ECG Acquisition Time 00:12.4 Sample Rate 4000 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_5_min] ================= ============== Signal Type ECG Acquisition Time 05:00.0 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_sample] ================= ============== Signal Type ECG Acquisition Time 00:11.9 Sample Rate 200 Hz Number of Channels 1 Conditions At Rest ================= ============== [ecg_20_sec_10_Hz] ================= ============== Signal Type ECG Acquisition Time 00:20.0 Sample Rate 10 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [ecg_20_sec_100_Hz] ================= ============== Signal Type ECG Acquisition Time 00:19.7 Sample Rate 100 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [ecg_20_sec_1000_Hz] ================= ============== Signal Type ECG Acquisition Time 00:20.4 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest using Lead II ================= ============== [emg_bursts] ================= ============== Signal Type EMG Muscle Biceps Brachii Acquisition Time 00:28.5 Sample Rate 1000 Hz Number of Channels 1 Conditions Cyclic Contraction ================= ============== [emg_fatigue] ================= ============== Signal Type EMG Muscle Biceps Brachii Acquisition Time 02:06.9 Sample Rate 1000 Hz Number of Channels 1 Conditions Cyclic Flexion and Extension for fatigue induction ================= ============== [temp_res_8_16] ================= ============== Signal Type Temperature Acquisition Time 03:53.1 Sample Rate 1000 Hz Number of Channels 2 Resolutions 8 and 16 bits Conditions Temperature increase and decrease ================= ============== [bvp_sample] ================= ============== Signal Type BVP Acquisition Time 00:27.3 Sample Rate 1000 Hz Number of Channels 1 Conditions At Rest ================= ============== get_header : boolean If True the file header will be returned as one of the function outputs. Returns ------- out : dict A dictionary with the data stored inside the file specified in the input 'signal_name'. header : dict Metadata of the acquisition file (includes sampling rate, resolution, used device...) """ available_signals = ["ecg_4000_Hz", "ecg_5_min", "ecg_sample", "ecg_20_sec_10_Hz", "ecg_20_sec_100_Hz", "ecg_20_sec_1000_Hz", "emg_bursts", "emg_fatigue", "temp_res_8_16", "bvp_sample"] # Check if signal_handler is a url. # [Statements to be executed if signal_handler is a url] if any(mark in signal_handler for mark in ["http://", "https://", "www.", ".pt", ".com", ".org", ".net"]): # Check if it is a Google Drive sharable link. if "drive.google" in signal_handler: signal_handler = _generate_download_google_link(signal_handler) # Load file. out, header = load(signal_handler, remote=True, get_header=True, signal_sample=True) # [Statements to be executed if signal_handler is an identifier of the signal] else: if signal_handler in available_signals: out, header = load(SIGNAL_PATH + signal_handler + FILE_EXTENSION, get_header=True, signal_sample=True) else: raise RuntimeError("The signal name defined as input does not correspond to any of the " "signal samples contained in the package.") if get_header is True: return out, header else: return out
[ "def", "load_signal", "(", "signal_handler", ",", "get_header", "=", "False", ")", ":", "available_signals", "=", "[", "\"ecg_4000_Hz\"", ",", "\"ecg_5_min\"", ",", "\"ecg_sample\"", ",", "\"ecg_20_sec_10_Hz\"", ",", "\"ecg_20_sec_100_Hz\"", ",", "\"ecg_20_sec_1000_Hz\"", ",", "\"emg_bursts\"", ",", "\"emg_fatigue\"", ",", "\"temp_res_8_16\"", ",", "\"bvp_sample\"", "]", "# Check if signal_handler is a url.", "# [Statements to be executed if signal_handler is a url]", "if", "any", "(", "mark", "in", "signal_handler", "for", "mark", "in", "[", "\"http://\"", ",", "\"https://\"", ",", "\"www.\"", ",", "\".pt\"", ",", "\".com\"", ",", "\".org\"", ",", "\".net\"", "]", ")", ":", "# Check if it is a Google Drive sharable link.", "if", "\"drive.google\"", "in", "signal_handler", ":", "signal_handler", "=", "_generate_download_google_link", "(", "signal_handler", ")", "# Load file.", "out", ",", "header", "=", "load", "(", "signal_handler", ",", "remote", "=", "True", ",", "get_header", "=", "True", ",", "signal_sample", "=", "True", ")", "# [Statements to be executed if signal_handler is an identifier of the signal]", "else", ":", "if", "signal_handler", "in", "available_signals", ":", "out", ",", "header", "=", "load", "(", "SIGNAL_PATH", "+", "signal_handler", "+", "FILE_EXTENSION", ",", "get_header", "=", "True", ",", "signal_sample", "=", "True", ")", "else", ":", "raise", "RuntimeError", "(", "\"The signal name defined as input does not correspond to any of the \"", "\"signal samples contained in the package.\"", ")", "if", "get_header", "is", "True", ":", "return", "out", ",", "header", "else", ":", "return", "out" ]
36.757576
0.002408
[ "def load_signal(signal_handler, get_header=False):\n", " \"\"\"\n", " -----\n", " Brief\n", " -----\n", " Function that returns a dictionary with the data contained inside 'signal_name' file (stored in\n", " the biosignalsnotebooks signal samples directory).\n", "\n", " -----------\n", " Description\n", " -----------\n", " Biosignalsnotebooks library provides data samples in order to the users that are new to biosignals data handling to\n", " have a place to start without the need to acquire new data. This sample files are stored in the folder\n", " _signal_samples inside the library.\n", "\n", " This function returns the data from the selected sample.\n", "\n", " ----------\n", " Parameters\n", " ----------\n", " signal_name : file name or url\n", " Name that identifies the signal sample to be loaded or a url.\n", "\n", " Possible values:\n", " [ecg_4000_Hz]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 00:12.4\n", " Sample Rate 4000 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " ================= ==============\n", "\n", " [ecg_5_min]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 05:00.0\n", " Sample Rate 1000 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " ================= ==============\n", "\n", " [ecg_sample]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 00:11.9\n", " Sample Rate 200 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " ================= ==============\n", "\n", " [ecg_20_sec_10_Hz]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 00:20.0\n", " Sample Rate 10 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " using Lead II\n", " ================= ==============\n", "\n", " [ecg_20_sec_100_Hz]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 00:19.7\n", " Sample Rate 100 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " using Lead II\n", " ================= ==============\n", "\n", " [ecg_20_sec_1000_Hz]\n", " ================= ==============\n", " Signal Type ECG\n", " Acquisition Time 00:20.4\n", " Sample Rate 1000 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " using Lead II\n", " ================= ==============\n", "\n", " [emg_bursts]\n", " ================= ==============\n", " Signal Type EMG\n", " Muscle Biceps Brachii\n", " Acquisition Time 00:28.5\n", " Sample Rate 1000 Hz\n", " Number of Channels 1\n", " Conditions Cyclic\n", " Contraction\n", " ================= ==============\n", "\n", " [emg_fatigue]\n", " ================= ==============\n", " Signal Type EMG\n", " Muscle Biceps Brachii\n", " Acquisition Time 02:06.9\n", " Sample Rate 1000 Hz\n", " Number of Channels 1\n", " Conditions Cyclic Flexion\n", " and Extension\n", " for fatigue\n", " induction\n", " ================= ==============\n", "\n", " [temp_res_8_16]\n", " ================= ==============\n", " Signal Type Temperature\n", " Acquisition Time 03:53.1\n", " Sample Rate 1000 Hz\n", " Number of Channels 2\n", " Resolutions 8 and 16 bits\n", " Conditions Temperature\n", " increase and\n", " decrease\n", " ================= ==============\n", "\n", " [bvp_sample]\n", " ================= ==============\n", " Signal Type BVP\n", " Acquisition Time 00:27.3\n", " Sample Rate 1000 Hz\n", " Number of Channels 1\n", " Conditions At Rest\n", " ================= ==============\n", "\n", " get_header : boolean\n", " If True the file header will be returned as one of the function outputs.\n", "\n", " Returns\n", " -------\n", " out : dict\n", " A dictionary with the data stored inside the file specified in the input 'signal_name'.\n", "\n", " header : dict\n", " Metadata of the acquisition file (includes sampling rate, resolution, used device...)\n", " \"\"\"\n", "\n", " available_signals = [\"ecg_4000_Hz\", \"ecg_5_min\", \"ecg_sample\", \"ecg_20_sec_10_Hz\",\n", " \"ecg_20_sec_100_Hz\", \"ecg_20_sec_1000_Hz\", \"emg_bursts\", \"emg_fatigue\",\n", " \"temp_res_8_16\", \"bvp_sample\"]\n", "\n", " # Check if signal_handler is a url.\n", " # [Statements to be executed if signal_handler is a url]\n", " if any(mark in signal_handler for mark in [\"http://\", \"https://\", \"www.\", \".pt\", \".com\", \".org\",\n", " \".net\"]):\n", " # Check if it is a Google Drive sharable link.\n", " if \"drive.google\" in signal_handler:\n", " signal_handler = _generate_download_google_link(signal_handler)\n", "\n", " # Load file.\n", " out, header = load(signal_handler, remote=True, get_header=True, signal_sample=True)\n", "\n", " # [Statements to be executed if signal_handler is an identifier of the signal]\n", " else:\n", " if signal_handler in available_signals:\n", " out, header = load(SIGNAL_PATH + signal_handler + FILE_EXTENSION, get_header=True, signal_sample=True)\n", " else:\n", " raise RuntimeError(\"The signal name defined as input does not correspond to any of the \"\n", " \"signal samples contained in the package.\")\n", "\n", " if get_header is True:\n", " return out, header\n", " else:\n", " return out" ]
[ 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0, 0.008333333333333333, 0.009345794392523364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0.010638297872340425, 0, 0, 0.011494252873563218, 0.010309278350515464, 0, 0, 0, 0, 0.009900990099009901, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0.012048192771084338, 0, 0, 0.008695652173913044, 0, 0.009900990099009901, 0.014084507042253521, 0, 0, 0, 0, 0.05555555555555555 ]
165
0.001235
def get_field_kwargs(field_name, model_field): """ Creates a default instance of a basic non-relational field. """ kwargs = {} validator_kwarg = list(model_field.validators) # The following will only be used by ModelField classes. # Gets removed for everything else. kwargs['model_field'] = model_field if model_field.verbose_name and needs_label(model_field, field_name): kwargs['label'] = capfirst(model_field.verbose_name) if model_field.help_text: kwargs['help_text'] = model_field.help_text max_digits = getattr(model_field, 'max_digits', None) if max_digits is not None: kwargs['max_digits'] = max_digits decimal_places = getattr(model_field, 'decimal_places', None) if decimal_places is not None: kwargs['decimal_places'] = decimal_places if isinstance(model_field, models.TextField): kwargs['style'] = {'base_template': 'textarea.html'} if isinstance(model_field, models.AutoField) or not model_field.editable: # If this field is read-only, then return early. # Further keyword arguments are not valid. kwargs['read_only'] = True return kwargs if model_field.has_default() or model_field.blank or model_field.null: kwargs['required'] = False if model_field.null and not isinstance(model_field, models.NullBooleanField): kwargs['allow_null'] = True if model_field.blank and (isinstance(model_field, models.CharField) or isinstance(model_field, models.TextField)): kwargs['allow_blank'] = True if isinstance(model_field, models.FilePathField): kwargs['path'] = model_field.path if model_field.match is not None: kwargs['match'] = model_field.match if model_field.recursive is not False: kwargs['recursive'] = model_field.recursive if model_field.allow_files is not True: kwargs['allow_files'] = model_field.allow_files if model_field.allow_folders is not False: kwargs['allow_folders'] = model_field.allow_folders if model_field.choices: # If this model field contains choices, then return early. # Further keyword arguments are not valid. kwargs['choices'] = model_field.choices return kwargs # Our decimal validation is handled in the field code, not validator code. # (In Django 1.9+ this differs from previous style) if isinstance(model_field, models.DecimalField): validator_kwarg = [ validator for validator in validator_kwarg if DecimalValidator and not isinstance(validator, DecimalValidator) ] # Ensure that max_length is passed explicitly as a keyword arg, # rather than as a validator. max_length = getattr(model_field, 'max_length', None) if max_length is not None and (isinstance(model_field, models.CharField) or isinstance(model_field, models.TextField)): kwargs['max_length'] = max_length validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MaxLengthValidator) ] # Ensure that min_length is passed explicitly as a keyword arg, # rather than as a validator. min_length = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MinLengthValidator) ), None) if min_length is not None and isinstance(model_field, models.CharField): kwargs['min_length'] = min_length validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MinLengthValidator) ] # Ensure that max_value is passed explicitly as a keyword arg, # rather than as a validator. max_value = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MaxValueValidator) ), None) if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['max_value'] = max_value validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MaxValueValidator) ] # Ensure that max_value is passed explicitly as a keyword arg, # rather than as a validator. min_value = next(( validator.limit_value for validator in validator_kwarg if isinstance(validator, validators.MinValueValidator) ), None) if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES): kwargs['min_value'] = min_value validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.MinValueValidator) ] # URLField does not need to include the URLValidator argument, # as it is explicitly added in. if isinstance(model_field, models.URLField): validator_kwarg = [ validator for validator in validator_kwarg if not isinstance(validator, validators.URLValidator) ] # EmailField does not need to include the validate_email argument, # as it is explicitly added in. if isinstance(model_field, models.EmailField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_email ] # SlugField do not need to include the 'validate_slug' argument, if isinstance(model_field, models.SlugField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_slug ] # IPAddressField do not need to include the 'validate_ipv46_address' argument, if isinstance(model_field, models.GenericIPAddressField): validator_kwarg = [ validator for validator in validator_kwarg if validator is not validators.validate_ipv46_address ] if getattr(model_field, 'unique', False): unique_error_message = model_field.error_messages.get('unique', None) if unique_error_message: unique_error_message = unique_error_message % { 'model_name': model_field.model._meta.object_name, 'field_label': model_field.verbose_name } validator = UniqueValidator( queryset=model_field.model._default_manager, message=unique_error_message) validator_kwarg.append(validator) if validator_kwarg: kwargs['validators'] = validator_kwarg return kwargs
[ "def", "get_field_kwargs", "(", "field_name", ",", "model_field", ")", ":", "kwargs", "=", "{", "}", "validator_kwarg", "=", "list", "(", "model_field", ".", "validators", ")", "# The following will only be used by ModelField classes.", "# Gets removed for everything else.", "kwargs", "[", "'model_field'", "]", "=", "model_field", "if", "model_field", ".", "verbose_name", "and", "needs_label", "(", "model_field", ",", "field_name", ")", ":", "kwargs", "[", "'label'", "]", "=", "capfirst", "(", "model_field", ".", "verbose_name", ")", "if", "model_field", ".", "help_text", ":", "kwargs", "[", "'help_text'", "]", "=", "model_field", ".", "help_text", "max_digits", "=", "getattr", "(", "model_field", ",", "'max_digits'", ",", "None", ")", "if", "max_digits", "is", "not", "None", ":", "kwargs", "[", "'max_digits'", "]", "=", "max_digits", "decimal_places", "=", "getattr", "(", "model_field", ",", "'decimal_places'", ",", "None", ")", "if", "decimal_places", "is", "not", "None", ":", "kwargs", "[", "'decimal_places'", "]", "=", "decimal_places", "if", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ":", "kwargs", "[", "'style'", "]", "=", "{", "'base_template'", ":", "'textarea.html'", "}", "if", "isinstance", "(", "model_field", ",", "models", ".", "AutoField", ")", "or", "not", "model_field", ".", "editable", ":", "# If this field is read-only, then return early.", "# Further keyword arguments are not valid.", "kwargs", "[", "'read_only'", "]", "=", "True", "return", "kwargs", "if", "model_field", ".", "has_default", "(", ")", "or", "model_field", ".", "blank", "or", "model_field", ".", "null", ":", "kwargs", "[", "'required'", "]", "=", "False", "if", "model_field", ".", "null", "and", "not", "isinstance", "(", "model_field", ",", "models", ".", "NullBooleanField", ")", ":", "kwargs", "[", "'allow_null'", "]", "=", "True", "if", "model_field", ".", "blank", "and", "(", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", "or", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ")", ":", "kwargs", "[", "'allow_blank'", "]", "=", "True", "if", "isinstance", "(", "model_field", ",", "models", ".", "FilePathField", ")", ":", "kwargs", "[", "'path'", "]", "=", "model_field", ".", "path", "if", "model_field", ".", "match", "is", "not", "None", ":", "kwargs", "[", "'match'", "]", "=", "model_field", ".", "match", "if", "model_field", ".", "recursive", "is", "not", "False", ":", "kwargs", "[", "'recursive'", "]", "=", "model_field", ".", "recursive", "if", "model_field", ".", "allow_files", "is", "not", "True", ":", "kwargs", "[", "'allow_files'", "]", "=", "model_field", ".", "allow_files", "if", "model_field", ".", "allow_folders", "is", "not", "False", ":", "kwargs", "[", "'allow_folders'", "]", "=", "model_field", ".", "allow_folders", "if", "model_field", ".", "choices", ":", "# If this model field contains choices, then return early.", "# Further keyword arguments are not valid.", "kwargs", "[", "'choices'", "]", "=", "model_field", ".", "choices", "return", "kwargs", "# Our decimal validation is handled in the field code, not validator code.", "# (In Django 1.9+ this differs from previous style)", "if", "isinstance", "(", "model_field", ",", "models", ".", "DecimalField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "DecimalValidator", "and", "not", "isinstance", "(", "validator", ",", "DecimalValidator", ")", "]", "# Ensure that max_length is passed explicitly as a keyword arg,", "# rather than as a validator.", "max_length", "=", "getattr", "(", "model_field", ",", "'max_length'", ",", "None", ")", "if", "max_length", "is", "not", "None", "and", "(", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", "or", "isinstance", "(", "model_field", ",", "models", ".", "TextField", ")", ")", ":", "kwargs", "[", "'max_length'", "]", "=", "max_length", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MaxLengthValidator", ")", "]", "# Ensure that min_length is passed explicitly as a keyword arg,", "# rather than as a validator.", "min_length", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MinLengthValidator", ")", ")", ",", "None", ")", "if", "min_length", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "models", ".", "CharField", ")", ":", "kwargs", "[", "'min_length'", "]", "=", "min_length", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MinLengthValidator", ")", "]", "# Ensure that max_value is passed explicitly as a keyword arg,", "# rather than as a validator.", "max_value", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MaxValueValidator", ")", ")", ",", "None", ")", "if", "max_value", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "NUMERIC_FIELD_TYPES", ")", ":", "kwargs", "[", "'max_value'", "]", "=", "max_value", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MaxValueValidator", ")", "]", "# Ensure that max_value is passed explicitly as a keyword arg,", "# rather than as a validator.", "min_value", "=", "next", "(", "(", "validator", ".", "limit_value", "for", "validator", "in", "validator_kwarg", "if", "isinstance", "(", "validator", ",", "validators", ".", "MinValueValidator", ")", ")", ",", "None", ")", "if", "min_value", "is", "not", "None", "and", "isinstance", "(", "model_field", ",", "NUMERIC_FIELD_TYPES", ")", ":", "kwargs", "[", "'min_value'", "]", "=", "min_value", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "MinValueValidator", ")", "]", "# URLField does not need to include the URLValidator argument,", "# as it is explicitly added in.", "if", "isinstance", "(", "model_field", ",", "models", ".", "URLField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "not", "isinstance", "(", "validator", ",", "validators", ".", "URLValidator", ")", "]", "# EmailField does not need to include the validate_email argument,", "# as it is explicitly added in.", "if", "isinstance", "(", "model_field", ",", "models", ".", "EmailField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_email", "]", "# SlugField do not need to include the 'validate_slug' argument,", "if", "isinstance", "(", "model_field", ",", "models", ".", "SlugField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_slug", "]", "# IPAddressField do not need to include the 'validate_ipv46_address' argument,", "if", "isinstance", "(", "model_field", ",", "models", ".", "GenericIPAddressField", ")", ":", "validator_kwarg", "=", "[", "validator", "for", "validator", "in", "validator_kwarg", "if", "validator", "is", "not", "validators", ".", "validate_ipv46_address", "]", "if", "getattr", "(", "model_field", ",", "'unique'", ",", "False", ")", ":", "unique_error_message", "=", "model_field", ".", "error_messages", ".", "get", "(", "'unique'", ",", "None", ")", "if", "unique_error_message", ":", "unique_error_message", "=", "unique_error_message", "%", "{", "'model_name'", ":", "model_field", ".", "model", ".", "_meta", ".", "object_name", ",", "'field_label'", ":", "model_field", ".", "verbose_name", "}", "validator", "=", "UniqueValidator", "(", "queryset", "=", "model_field", ".", "model", ".", "_default_manager", ",", "message", "=", "unique_error_message", ")", "validator_kwarg", ".", "append", "(", "validator", ")", "if", "validator_kwarg", ":", "kwargs", "[", "'validators'", "]", "=", "validator_kwarg", "return", "kwargs" ]
38.650888
0.000448
[ "def get_field_kwargs(field_name, model_field):\n", " \"\"\"\n", " Creates a default instance of a basic non-relational field.\n", " \"\"\"\n", " kwargs = {}\n", " validator_kwarg = list(model_field.validators)\n", "\n", " # The following will only be used by ModelField classes.\n", " # Gets removed for everything else.\n", " kwargs['model_field'] = model_field\n", "\n", " if model_field.verbose_name and needs_label(model_field, field_name):\n", " kwargs['label'] = capfirst(model_field.verbose_name)\n", "\n", " if model_field.help_text:\n", " kwargs['help_text'] = model_field.help_text\n", "\n", " max_digits = getattr(model_field, 'max_digits', None)\n", " if max_digits is not None:\n", " kwargs['max_digits'] = max_digits\n", "\n", " decimal_places = getattr(model_field, 'decimal_places', None)\n", " if decimal_places is not None:\n", " kwargs['decimal_places'] = decimal_places\n", "\n", " if isinstance(model_field, models.TextField):\n", " kwargs['style'] = {'base_template': 'textarea.html'}\n", "\n", " if isinstance(model_field, models.AutoField) or not model_field.editable:\n", " # If this field is read-only, then return early.\n", " # Further keyword arguments are not valid.\n", " kwargs['read_only'] = True\n", " return kwargs\n", "\n", " if model_field.has_default() or model_field.blank or model_field.null:\n", " kwargs['required'] = False\n", "\n", " if model_field.null and not isinstance(model_field, models.NullBooleanField):\n", " kwargs['allow_null'] = True\n", "\n", " if model_field.blank and (isinstance(model_field, models.CharField) or\n", " isinstance(model_field, models.TextField)):\n", " kwargs['allow_blank'] = True\n", "\n", " if isinstance(model_field, models.FilePathField):\n", " kwargs['path'] = model_field.path\n", "\n", " if model_field.match is not None:\n", " kwargs['match'] = model_field.match\n", "\n", " if model_field.recursive is not False:\n", " kwargs['recursive'] = model_field.recursive\n", "\n", " if model_field.allow_files is not True:\n", " kwargs['allow_files'] = model_field.allow_files\n", "\n", " if model_field.allow_folders is not False:\n", " kwargs['allow_folders'] = model_field.allow_folders\n", "\n", " if model_field.choices:\n", " # If this model field contains choices, then return early.\n", " # Further keyword arguments are not valid.\n", " kwargs['choices'] = model_field.choices\n", " return kwargs\n", "\n", " # Our decimal validation is handled in the field code, not validator code.\n", " # (In Django 1.9+ this differs from previous style)\n", " if isinstance(model_field, models.DecimalField):\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if DecimalValidator and not isinstance(validator, DecimalValidator)\n", " ]\n", "\n", " # Ensure that max_length is passed explicitly as a keyword arg,\n", " # rather than as a validator.\n", " max_length = getattr(model_field, 'max_length', None)\n", " if max_length is not None and (isinstance(model_field, models.CharField) or\n", " isinstance(model_field, models.TextField)):\n", " kwargs['max_length'] = max_length\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if not isinstance(validator, validators.MaxLengthValidator)\n", " ]\n", "\n", " # Ensure that min_length is passed explicitly as a keyword arg,\n", " # rather than as a validator.\n", " min_length = next((\n", " validator.limit_value for validator in validator_kwarg\n", " if isinstance(validator, validators.MinLengthValidator)\n", " ), None)\n", " if min_length is not None and isinstance(model_field, models.CharField):\n", " kwargs['min_length'] = min_length\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if not isinstance(validator, validators.MinLengthValidator)\n", " ]\n", "\n", " # Ensure that max_value is passed explicitly as a keyword arg,\n", " # rather than as a validator.\n", " max_value = next((\n", " validator.limit_value for validator in validator_kwarg\n", " if isinstance(validator, validators.MaxValueValidator)\n", " ), None)\n", " if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):\n", " kwargs['max_value'] = max_value\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if not isinstance(validator, validators.MaxValueValidator)\n", " ]\n", "\n", " # Ensure that max_value is passed explicitly as a keyword arg,\n", " # rather than as a validator.\n", " min_value = next((\n", " validator.limit_value for validator in validator_kwarg\n", " if isinstance(validator, validators.MinValueValidator)\n", " ), None)\n", " if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):\n", " kwargs['min_value'] = min_value\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if not isinstance(validator, validators.MinValueValidator)\n", " ]\n", "\n", " # URLField does not need to include the URLValidator argument,\n", " # as it is explicitly added in.\n", " if isinstance(model_field, models.URLField):\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if not isinstance(validator, validators.URLValidator)\n", " ]\n", "\n", " # EmailField does not need to include the validate_email argument,\n", " # as it is explicitly added in.\n", " if isinstance(model_field, models.EmailField):\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if validator is not validators.validate_email\n", " ]\n", "\n", " # SlugField do not need to include the 'validate_slug' argument,\n", " if isinstance(model_field, models.SlugField):\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if validator is not validators.validate_slug\n", " ]\n", "\n", " # IPAddressField do not need to include the 'validate_ipv46_address' argument,\n", " if isinstance(model_field, models.GenericIPAddressField):\n", " validator_kwarg = [\n", " validator for validator in validator_kwarg\n", " if validator is not validators.validate_ipv46_address\n", " ]\n", "\n", " if getattr(model_field, 'unique', False):\n", " unique_error_message = model_field.error_messages.get('unique', None)\n", " if unique_error_message:\n", " unique_error_message = unique_error_message % {\n", " 'model_name': model_field.model._meta.object_name,\n", " 'field_label': model_field.verbose_name\n", " }\n", " validator = UniqueValidator(\n", " queryset=model_field.model._default_manager,\n", " message=unique_error_message)\n", " validator_kwarg.append(validator)\n", "\n", " if validator_kwarg:\n", " kwargs['validators'] = validator_kwarg\n", "\n", " return kwargs" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
169
0.000492
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, diagonal=0): '''Helper method to create a new weight matrix. Parameters ---------- name : str Name of the parameter to add. nin : int Size of "input" for this weight matrix. nout : int Size of "output" for this weight matrix. mean : float, optional Mean value for randomly-initialized weights. Defaults to 0. std : float, optional Standard deviation of initial matrix values. Defaults to :math:`1 / sqrt(n_i + n_o)`. sparsity : float, optional Fraction of weights to be set to zero. Defaults to 0. diagonal : float, optional Initialize weights to a matrix of zeros with this value along the diagonal. Defaults to None, which initializes all weights randomly. ''' glorot = 1 / np.sqrt(nin + nout) m = self.kwargs.get( 'mean_{}'.format(name), self.kwargs.get('mean', mean)) s = self.kwargs.get( 'std_{}'.format(name), self.kwargs.get('std', std or glorot)) p = self.kwargs.get( 'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity)) d = self.kwargs.get( 'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal)) self._params.append(theano.shared( util.random_matrix(nin, nout, mean=m, std=s, sparsity=p, diagonal=d, rng=self.rng), name=self._fmt(name)))
[ "def", "add_weights", "(", "self", ",", "name", ",", "nin", ",", "nout", ",", "mean", "=", "0", ",", "std", "=", "0", ",", "sparsity", "=", "0", ",", "diagonal", "=", "0", ")", ":", "glorot", "=", "1", "/", "np", ".", "sqrt", "(", "nin", "+", "nout", ")", "m", "=", "self", ".", "kwargs", ".", "get", "(", "'mean_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'mean'", ",", "mean", ")", ")", "s", "=", "self", ".", "kwargs", ".", "get", "(", "'std_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'std'", ",", "std", "or", "glorot", ")", ")", "p", "=", "self", ".", "kwargs", ".", "get", "(", "'sparsity_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'sparsity'", ",", "sparsity", ")", ")", "d", "=", "self", ".", "kwargs", ".", "get", "(", "'diagonal_{}'", ".", "format", "(", "name", ")", ",", "self", ".", "kwargs", ".", "get", "(", "'diagonal'", ",", "diagonal", ")", ")", "self", ".", "_params", ".", "append", "(", "theano", ".", "shared", "(", "util", ".", "random_matrix", "(", "nin", ",", "nout", ",", "mean", "=", "m", ",", "std", "=", "s", ",", "sparsity", "=", "p", ",", "diagonal", "=", "d", ",", "rng", "=", "self", ".", "rng", ")", ",", "name", "=", "self", ".", "_fmt", "(", "name", ")", ")", ")" ]
44.342857
0.001261
[ "def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, diagonal=0):\n", " '''Helper method to create a new weight matrix.\n", "\n", " Parameters\n", " ----------\n", " name : str\n", " Name of the parameter to add.\n", " nin : int\n", " Size of \"input\" for this weight matrix.\n", " nout : int\n", " Size of \"output\" for this weight matrix.\n", " mean : float, optional\n", " Mean value for randomly-initialized weights. Defaults to 0.\n", " std : float, optional\n", " Standard deviation of initial matrix values. Defaults to\n", " :math:`1 / sqrt(n_i + n_o)`.\n", " sparsity : float, optional\n", " Fraction of weights to be set to zero. Defaults to 0.\n", " diagonal : float, optional\n", " Initialize weights to a matrix of zeros with this value along the\n", " diagonal. Defaults to None, which initializes all weights randomly.\n", " '''\n", " glorot = 1 / np.sqrt(nin + nout)\n", " m = self.kwargs.get(\n", " 'mean_{}'.format(name), self.kwargs.get('mean', mean))\n", " s = self.kwargs.get(\n", " 'std_{}'.format(name), self.kwargs.get('std', std or glorot))\n", " p = self.kwargs.get(\n", " 'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity))\n", " d = self.kwargs.get(\n", " 'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal))\n", " self._params.append(theano.shared(\n", " util.random_matrix(nin, nout, mean=m, std=s, sparsity=p,\n", " diagonal=d, rng=self.rng),\n", " name=self._fmt(name)))" ]
[ 0, 0.017857142857142856, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353 ]
35
0.001351
def _add_defaults_python(self): """getting python files""" if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') self.filelist.extend(build_py.get_source_files()) # This functionality is incompatible with include_package_data, and # will in fact create an infinite recursion if include_package_data # is True. Use of include_package_data will imply that # distutils-style automatic handling of package_data is disabled if not self.distribution.include_package_data: for _, src_dir, _, filenames in build_py.data_files: self.filelist.extend([os.path.join(src_dir, filename) for filename in filenames])
[ "def", "_add_defaults_python", "(", "self", ")", ":", "if", "self", ".", "distribution", ".", "has_pure_modules", "(", ")", ":", "build_py", "=", "self", ".", "get_finalized_command", "(", "'build_py'", ")", "self", ".", "filelist", ".", "extend", "(", "build_py", ".", "get_source_files", "(", ")", ")", "# This functionality is incompatible with include_package_data, and", "# will in fact create an infinite recursion if include_package_data", "# is True. Use of include_package_data will imply that", "# distutils-style automatic handling of package_data is disabled", "if", "not", "self", ".", "distribution", ".", "include_package_data", ":", "for", "_", ",", "src_dir", ",", "_", ",", "filenames", "in", "build_py", ".", "data_files", ":", "self", ".", "filelist", ".", "extend", "(", "[", "os", ".", "path", ".", "join", "(", "src_dir", ",", "filename", ")", "for", "filename", "in", "filenames", "]", ")" ]
61.846154
0.002451
[ "def _add_defaults_python(self):\n", " \"\"\"getting python files\"\"\"\n", " if self.distribution.has_pure_modules():\n", " build_py = self.get_finalized_command('build_py')\n", " self.filelist.extend(build_py.get_source_files())\n", " # This functionality is incompatible with include_package_data, and\n", " # will in fact create an infinite recursion if include_package_data\n", " # is True. Use of include_package_data will imply that\n", " # distutils-style automatic handling of package_data is disabled\n", " if not self.distribution.include_package_data:\n", " for _, src_dir, _, filenames in build_py.data_files:\n", " self.filelist.extend([os.path.join(src_dir, filename)\n", " for filename in filenames])" ]
[ 0, 0.02857142857142857, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406 ]
13
0.003313
def grab_literal(template, l_del): """Parse a literal from the template""" global _CURRENT_LINE try: # Look for the next tag and move the template to it literal, template = template.split(l_del, 1) _CURRENT_LINE += literal.count('\n') return (literal, template) # There are no more tags in the template? except ValueError: # Then the rest of the template is a literal return (template, '')
[ "def", "grab_literal", "(", "template", ",", "l_del", ")", ":", "global", "_CURRENT_LINE", "try", ":", "# Look for the next tag and move the template to it", "literal", ",", "template", "=", "template", ".", "split", "(", "l_del", ",", "1", ")", "_CURRENT_LINE", "+=", "literal", ".", "count", "(", "'\\n'", ")", "return", "(", "literal", ",", "template", ")", "# There are no more tags in the template?", "except", "ValueError", ":", "# Then the rest of the template is a literal", "return", "(", "template", ",", "''", ")" ]
29.733333
0.002174
[ "def grab_literal(template, l_del):\n", " \"\"\"Parse a literal from the template\"\"\"\n", "\n", " global _CURRENT_LINE\n", "\n", " try:\n", " # Look for the next tag and move the template to it\n", " literal, template = template.split(l_del, 1)\n", " _CURRENT_LINE += literal.count('\\n')\n", " return (literal, template)\n", "\n", " # There are no more tags in the template?\n", " except ValueError:\n", " # Then the rest of the template is a literal\n", " return (template, '')" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655 ]
15
0.002299
def is_line_layer(layer): """Check if a QGIS layer is vector and its geometries are lines. :param layer: A vector layer. :type layer: QgsVectorLayer, QgsMapLayer :returns: True if the layer contains lines, otherwise False. :rtype: bool """ try: return (layer.type() == QgsMapLayer.VectorLayer) and ( layer.geometryType() == QgsWkbTypes.LineGeometry) except AttributeError: return False
[ "def", "is_line_layer", "(", "layer", ")", ":", "try", ":", "return", "(", "layer", ".", "type", "(", ")", "==", "QgsMapLayer", ".", "VectorLayer", ")", "and", "(", "layer", ".", "geometryType", "(", ")", "==", "QgsWkbTypes", ".", "LineGeometry", ")", "except", "AttributeError", ":", "return", "False" ]
28.933333
0.002232
[ "def is_line_layer(layer):\n", " \"\"\"Check if a QGIS layer is vector and its geometries are lines.\n", "\n", " :param layer: A vector layer.\n", " :type layer: QgsVectorLayer, QgsMapLayer\n", "\n", " :returns: True if the layer contains lines, otherwise False.\n", " :rtype: bool\n", "\n", " \"\"\"\n", " try:\n", " return (layer.type() == QgsMapLayer.VectorLayer) and (\n", " layer.geometryType() == QgsWkbTypes.LineGeometry)\n", " except AttributeError:\n", " return False" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
15
0.003333
def close_first_file(self): """ Attemtps to close the first **Script_Editor_tabWidget** Widget tab Model editor file. :return: Method success. :rtype: bool """ editor = self.get_current_editor() if len(self.__model.list_editors()) == 1 and editor.is_untitled and not editor.is_modified(): self.close_file(leave_first_editor=False) return True
[ "def", "close_first_file", "(", "self", ")", ":", "editor", "=", "self", ".", "get_current_editor", "(", ")", "if", "len", "(", "self", ".", "__model", ".", "list_editors", "(", ")", ")", "==", "1", "and", "editor", ".", "is_untitled", "and", "not", "editor", ".", "is_modified", "(", ")", ":", "self", ".", "close_file", "(", "leave_first_editor", "=", "False", ")", "return", "True" ]
34.416667
0.009434
[ "def close_first_file(self):\n", " \"\"\"\n", " Attemtps to close the first **Script_Editor_tabWidget** Widget tab Model editor file.\n", "\n", " :return: Method success.\n", " :rtype: bool\n", " \"\"\"\n", "\n", " editor = self.get_current_editor()\n", " if len(self.__model.list_editors()) == 1 and editor.is_untitled and not editor.is_modified():\n", " self.close_file(leave_first_editor=False)\n", " return True" ]
[ 0, 0.08333333333333333, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0.00980392156862745, 0, 0.043478260869565216 ]
12
0.012271
def input_schema_clean(input_, input_schema): """ Updates schema default values with input data. :param input_: Input data :type input_: dict :param input_schema: Input schema :type input_schema: dict :returns: Nested dict with data (defaul values updated with input data) :rtype: dict """ if input_schema.get('type') == 'object': try: defaults = get_object_defaults(input_schema) except NoObjectDefaults: pass else: return deep_update(defaults, input_) return input_
[ "def", "input_schema_clean", "(", "input_", ",", "input_schema", ")", ":", "if", "input_schema", ".", "get", "(", "'type'", ")", "==", "'object'", ":", "try", ":", "defaults", "=", "get_object_defaults", "(", "input_schema", ")", "except", "NoObjectDefaults", ":", "pass", "else", ":", "return", "deep_update", "(", "defaults", ",", "input_", ")", "return", "input_" ]
29.263158
0.001742
[ "def input_schema_clean(input_, input_schema):\n", " \"\"\"\n", " Updates schema default values with input data.\n", "\n", " :param input_: Input data\n", " :type input_: dict\n", " :param input_schema: Input schema\n", " :type input_schema: dict\n", " :returns: Nested dict with data (defaul values updated with input data)\n", " :rtype: dict\n", " \"\"\"\n", " if input_schema.get('type') == 'object':\n", " try:\n", " defaults = get_object_defaults(input_schema)\n", " except NoObjectDefaults:\n", " pass\n", " else:\n", " return deep_update(defaults, input_)\n", " return input_" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
19
0.003096
def sample(self, sample_indices=None, num_samples=1): """ returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn. """ if sample_indices is None: sample_indices = np.random.choice(self.data.shape[0], size=num_samples) samples = self.data[sample_indices] samples = samples.squeeze() if self.num_values == 1: # handle cases where there is only one value! return(samples) probs = self.bw * np.ones(self.num_values)/(self.num_values-1) probs[0] = 1-self.bw delta = np.random.choice(self.num_values, size=num_samples, p = probs) samples = np.mod(samples + delta, self.num_values) return(samples)
[ "def", "sample", "(", "self", ",", "sample_indices", "=", "None", ",", "num_samples", "=", "1", ")", ":", "if", "sample_indices", "is", "None", ":", "sample_indices", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "size", "=", "num_samples", ")", "samples", "=", "self", ".", "data", "[", "sample_indices", "]", "samples", "=", "samples", ".", "squeeze", "(", ")", "if", "self", ".", "num_values", "==", "1", ":", "# handle cases where there is only one value!", "return", "(", "samples", ")", "probs", "=", "self", ".", "bw", "*", "np", ".", "ones", "(", "self", ".", "num_values", ")", "/", "(", "self", ".", "num_values", "-", "1", ")", "probs", "[", "0", "]", "=", "1", "-", "self", ".", "bw", "delta", "=", "np", ".", "random", ".", "choice", "(", "self", ".", "num_values", ",", "size", "=", "num_samples", ",", "p", "=", "probs", ")", "samples", "=", "np", ".", "mod", "(", "samples", "+", "delta", ",", "self", ".", "num_values", ")", "return", "(", "samples", ")" ]
27.566667
0.044393
[ "def sample(self, sample_indices=None, num_samples=1):\n", "\t\t\"\"\" returns samples according to the KDE\n", "\t\t\n", "\t\t\tParameters\n", "\t\t\t----------\n", "\t\t\t\tsample_inices: list of ints\n", "\t\t\t\t\tIndices into the training data used as centers for the samples\n", "\t\t\t\t\n", "\t\t\t\tnum_samples: int\n", "\t\t\t\t\tif samples_indices is None, this specifies how many samples\n", "\t\t\t\t\tare drawn.\n", "\t\t\t\t\n", "\t\t\"\"\"\n", "\t\tif sample_indices is None:\n", "\t\t\tsample_indices = np.random.choice(self.data.shape[0], size=num_samples)\n", "\t\tsamples = self.data[sample_indices]\n", "\n", "\t\tsamples = samples.squeeze()\n", "\t\t\n", "\t\tif self.num_values == 1:\n", "\t\t\t# handle cases where there is only one value!\n", "\t\t\treturn(samples)\n", "\t\t\n", "\t\tprobs = self.bw * np.ones(self.num_values)/(self.num_values-1)\n", "\t\tprobs[0] = 1-self.bw\n", "\t\t\n", "\t\tdelta = np.random.choice(self.num_values, size=num_samples, p = probs)\n", "\t\tsamples = np.mod(samples + delta, self.num_values)\t\n", "\n", "\t\treturn(samples)" ]
[ 0, 0.046511627906976744, 0.6666666666666666, 0.07142857142857142, 0.07142857142857142, 0.03125, 0.014705882352941176, 0.4, 0.047619047619047616, 0.015384615384615385, 0.0625, 0.4, 0.16666666666666666, 0.034482758620689655, 0.013333333333333334, 0.02631578947368421, 0, 0.03333333333333333, 0.6666666666666666, 0.037037037037037035, 0.02040816326530612, 0.05263157894736842, 0.6666666666666666, 0.015384615384615385, 0.043478260869565216, 0.6666666666666666, 0.0410958904109589, 0.037037037037037035, 0, 0.11764705882352941 ]
30
0.148878
def obtain_hosting_device_credentials_from_config(): """Obtains credentials from config file and stores them in memory. To be called before hosting device templates defined in the config file are created. """ cred_dict = get_specific_config('cisco_hosting_device_credential') attr_info = { 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'description': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'user_name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'password': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'type': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}} credentials = {} for cred_uuid, kv_dict in cred_dict.items(): # ensure cred_uuid is properly formatted cred_uuid = uuidify(cred_uuid) verify_resource_dict(kv_dict, True, attr_info) credentials[cred_uuid] = kv_dict return credentials
[ "def", "obtain_hosting_device_credentials_from_config", "(", ")", ":", "cred_dict", "=", "get_specific_config", "(", "'cisco_hosting_device_credential'", ")", "attr_info", "=", "{", "'name'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'description'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'user_name'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'password'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", ",", "'type'", ":", "{", "'allow_post'", ":", "True", ",", "'allow_put'", ":", "True", ",", "'validate'", ":", "{", "'type:string'", ":", "None", "}", ",", "'is_visible'", ":", "True", ",", "'default'", ":", "''", "}", "}", "credentials", "=", "{", "}", "for", "cred_uuid", ",", "kv_dict", "in", "cred_dict", ".", "items", "(", ")", ":", "# ensure cred_uuid is properly formatted", "cred_uuid", "=", "uuidify", "(", "cred_uuid", ")", "verify_resource_dict", "(", "kv_dict", ",", "True", ",", "attr_info", ")", "credentials", "[", "cred_uuid", "]", "=", "kv_dict", "return", "credentials" ]
48.793103
0.000693
[ "def obtain_hosting_device_credentials_from_config():\n", " \"\"\"Obtains credentials from config file and stores them in memory.\n", " To be called before hosting device templates defined in the config file\n", " are created.\n", " \"\"\"\n", " cred_dict = get_specific_config('cisco_hosting_device_credential')\n", " attr_info = {\n", " 'name': {'allow_post': True, 'allow_put': True,\n", " 'validate': {'type:string': None}, 'is_visible': True,\n", " 'default': ''},\n", " 'description': {'allow_post': True, 'allow_put': True,\n", " 'validate': {'type:string': None},\n", " 'is_visible': True, 'default': ''},\n", " 'user_name': {'allow_post': True, 'allow_put': True,\n", " 'validate': {'type:string': None},\n", " 'is_visible': True, 'default': ''},\n", " 'password': {'allow_post': True, 'allow_put': True,\n", " 'validate': {'type:string': None},\n", " 'is_visible': True, 'default': ''},\n", " 'type': {'allow_post': True, 'allow_put': True,\n", " 'validate': {'type:string': None}, 'is_visible': True,\n", " 'default': ''}}\n", " credentials = {}\n", " for cred_uuid, kv_dict in cred_dict.items():\n", " # ensure cred_uuid is properly formatted\n", " cred_uuid = uuidify(cred_uuid)\n", " verify_resource_dict(kv_dict, True, attr_info)\n", " credentials[cred_uuid] = kv_dict\n", " return credentials" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
29
0.001567
def picture( relationshiplist, picname, picdescription, pixelwidth=None, pixelheight=None, nochangeaspect=True, nochangearrowheads=True, imagefiledict=None): """ Take a relationshiplist, picture file name, and return a paragraph containing the image and an updated relationshiplist """ if imagefiledict is None: warn( 'Using picture() without imagefiledict parameter will be depreca' 'ted in the future.', PendingDeprecationWarning ) # http://openxmldeveloper.org/articles/462.aspx # Create an image. Size may be specified, otherwise it will based on the # pixel size of image. Return a paragraph containing the picture # Set relationship ID to that of the image or the first available one picid = '2' picpath = abspath(picname) if imagefiledict is not None: # Keep track of the image files in a separate dictionary so they don't # need to be copied into the template directory if picpath not in imagefiledict: picrelid = 'rId' + str(len(relationshiplist) + 1) imagefiledict[picpath] = picrelid relationshiplist.append([ 'http://schemas.openxmlformats.org/officeDocument/2006/relat' 'ionships/image', 'media/%s_%s' % (picrelid, basename(picpath)) ]) else: picrelid = imagefiledict[picpath] else: # Copy files into template directory for backwards compatibility # Images still accumulate in the template directory this way picrelid = 'rId' + str(len(relationshiplist) + 1) relationshiplist.append([ 'http://schemas.openxmlformats.org/officeDocument/2006/relations' 'hips/image', 'media/' + picname ]) media_dir = join(template_dir, 'word', 'media') if not os.path.isdir(media_dir): os.mkdir(media_dir) shutil.copyfile(picname, join(media_dir, picname)) image = Image.open(picpath) # Extract EXIF data, if available try: exif = image._getexif() exif = {} if exif is None else exif except: exif = {} imageExif = {} for tag, value in exif.items(): imageExif[TAGS.get(tag, tag)] = value imageOrientation = imageExif.get('Orientation', 1) imageAngle = { 1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270 }[imageOrientation] imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false' imageFlipV = 'true' if imageOrientation == 4 else 'false' # Check if the user has specified a size if not pixelwidth or not pixelheight: # If not, get info from the picture itself pixelwidth, pixelheight = image.size[0:2] # Swap width and height if necessary if imageOrientation in (5, 6, 7, 8): pixelwidth, pixelheight = pixelheight, pixelwidth # OpenXML measures on-screen objects in English Metric Units # 1cm = 36000 EMUs emuperpixel = 12700 width = str(pixelwidth * emuperpixel) height = str(pixelheight * emuperpixel) # There are 3 main elements inside a picture # 1. The Blipfill - specifies how the image fills the picture area # (stretch, tile, etc.) blipfill = makeelement('blipFill', nsprefix='pic') blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r', attributes={'embed': picrelid})) stretch = makeelement('stretch', nsprefix='a') stretch.append(makeelement('fillRect', nsprefix='a')) blipfill.append(makeelement('srcRect', nsprefix='a')) blipfill.append(stretch) # 2. The non visual picture properties nvpicpr = makeelement('nvPicPr', nsprefix='pic') cnvpr = makeelement( 'cNvPr', nsprefix='pic', attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription} ) nvpicpr.append(cnvpr) cnvpicpr = makeelement('cNvPicPr', nsprefix='pic') cnvpicpr.append(makeelement( 'picLocks', nsprefix='a', attributes={'noChangeAspect': str(int(nochangeaspect)), 'noChangeArrowheads': str(int(nochangearrowheads))})) nvpicpr.append(cnvpicpr) # 3. The Shape properties sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'}) xfrm = makeelement( 'xfrm', nsprefix='a', attributes={ 'rot': str(imageAngle * 60000), 'flipH': imageFlipH, 'flipV': imageFlipV } ) xfrm.append( makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'}) ) xfrm.append( makeelement( 'ext', nsprefix='a', attributes={'cx': width, 'cy': height} ) ) prstgeom = makeelement( 'prstGeom', nsprefix='a', attributes={'prst': 'rect'} ) prstgeom.append(makeelement('avLst', nsprefix='a')) sppr.append(xfrm) sppr.append(prstgeom) # Add our 3 parts to the picture element pic = makeelement('pic', nsprefix='pic') pic.append(nvpicpr) pic.append(blipfill) pic.append(sppr) # Now make the supporting elements # The following sequence is just: make element, then add its children graphicdata = makeelement( 'graphicData', nsprefix='a', attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200' '6/picture')}) graphicdata.append(pic) graphic = makeelement('graphic', nsprefix='a') graphic.append(graphicdata) framelocks = makeelement('graphicFrameLocks', nsprefix='a', attributes={'noChangeAspect': '1'}) framepr = makeelement('cNvGraphicFramePr', nsprefix='wp') framepr.append(framelocks) docpr = makeelement('docPr', nsprefix='wp', attributes={'id': picid, 'name': 'Picture 1', 'descr': picdescription}) effectextent = makeelement('effectExtent', nsprefix='wp', attributes={'l': '25400', 't': '0', 'r': '0', 'b': '0'}) extent = makeelement('extent', nsprefix='wp', attributes={'cx': width, 'cy': height}) inline = makeelement('inline', attributes={'distT': "0", 'distB': "0", 'distL': "0", 'distR': "0"}, nsprefix='wp') inline.append(extent) inline.append(effectextent) inline.append(docpr) inline.append(framepr) inline.append(graphic) drawing = makeelement('drawing') drawing.append(inline) run = makeelement('r') run.append(drawing) paragraph = makeelement('p') paragraph.append(run) if imagefiledict is not None: return relationshiplist, paragraph, imagefiledict else: return relationshiplist, paragraph
[ "def", "picture", "(", "relationshiplist", ",", "picname", ",", "picdescription", ",", "pixelwidth", "=", "None", ",", "pixelheight", "=", "None", ",", "nochangeaspect", "=", "True", ",", "nochangearrowheads", "=", "True", ",", "imagefiledict", "=", "None", ")", ":", "if", "imagefiledict", "is", "None", ":", "warn", "(", "'Using picture() without imagefiledict parameter will be depreca'", "'ted in the future.'", ",", "PendingDeprecationWarning", ")", "# http://openxmldeveloper.org/articles/462.aspx", "# Create an image. Size may be specified, otherwise it will based on the", "# pixel size of image. Return a paragraph containing the picture", "# Set relationship ID to that of the image or the first available one", "picid", "=", "'2'", "picpath", "=", "abspath", "(", "picname", ")", "if", "imagefiledict", "is", "not", "None", ":", "# Keep track of the image files in a separate dictionary so they don't", "# need to be copied into the template directory", "if", "picpath", "not", "in", "imagefiledict", ":", "picrelid", "=", "'rId'", "+", "str", "(", "len", "(", "relationshiplist", ")", "+", "1", ")", "imagefiledict", "[", "picpath", "]", "=", "picrelid", "relationshiplist", ".", "append", "(", "[", "'http://schemas.openxmlformats.org/officeDocument/2006/relat'", "'ionships/image'", ",", "'media/%s_%s'", "%", "(", "picrelid", ",", "basename", "(", "picpath", ")", ")", "]", ")", "else", ":", "picrelid", "=", "imagefiledict", "[", "picpath", "]", "else", ":", "# Copy files into template directory for backwards compatibility", "# Images still accumulate in the template directory this way", "picrelid", "=", "'rId'", "+", "str", "(", "len", "(", "relationshiplist", ")", "+", "1", ")", "relationshiplist", ".", "append", "(", "[", "'http://schemas.openxmlformats.org/officeDocument/2006/relations'", "'hips/image'", ",", "'media/'", "+", "picname", "]", ")", "media_dir", "=", "join", "(", "template_dir", ",", "'word'", ",", "'media'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "media_dir", ")", ":", "os", ".", "mkdir", "(", "media_dir", ")", "shutil", ".", "copyfile", "(", "picname", ",", "join", "(", "media_dir", ",", "picname", ")", ")", "image", "=", "Image", ".", "open", "(", "picpath", ")", "# Extract EXIF data, if available", "try", ":", "exif", "=", "image", ".", "_getexif", "(", ")", "exif", "=", "{", "}", "if", "exif", "is", "None", "else", "exif", "except", ":", "exif", "=", "{", "}", "imageExif", "=", "{", "}", "for", "tag", ",", "value", "in", "exif", ".", "items", "(", ")", ":", "imageExif", "[", "TAGS", ".", "get", "(", "tag", ",", "tag", ")", "]", "=", "value", "imageOrientation", "=", "imageExif", ".", "get", "(", "'Orientation'", ",", "1", ")", "imageAngle", "=", "{", "1", ":", "0", ",", "2", ":", "0", ",", "3", ":", "180", ",", "4", ":", "0", ",", "5", ":", "90", ",", "6", ":", "90", ",", "7", ":", "270", ",", "8", ":", "270", "}", "[", "imageOrientation", "]", "imageFlipH", "=", "'true'", "if", "imageOrientation", "in", "(", "2", ",", "5", ",", "7", ")", "else", "'false'", "imageFlipV", "=", "'true'", "if", "imageOrientation", "==", "4", "else", "'false'", "# Check if the user has specified a size", "if", "not", "pixelwidth", "or", "not", "pixelheight", ":", "# If not, get info from the picture itself", "pixelwidth", ",", "pixelheight", "=", "image", ".", "size", "[", "0", ":", "2", "]", "# Swap width and height if necessary", "if", "imageOrientation", "in", "(", "5", ",", "6", ",", "7", ",", "8", ")", ":", "pixelwidth", ",", "pixelheight", "=", "pixelheight", ",", "pixelwidth", "# OpenXML measures on-screen objects in English Metric Units", "# 1cm = 36000 EMUs", "emuperpixel", "=", "12700", "width", "=", "str", "(", "pixelwidth", "*", "emuperpixel", ")", "height", "=", "str", "(", "pixelheight", "*", "emuperpixel", ")", "# There are 3 main elements inside a picture", "# 1. The Blipfill - specifies how the image fills the picture area", "# (stretch, tile, etc.)", "blipfill", "=", "makeelement", "(", "'blipFill'", ",", "nsprefix", "=", "'pic'", ")", "blipfill", ".", "append", "(", "makeelement", "(", "'blip'", ",", "nsprefix", "=", "'a'", ",", "attrnsprefix", "=", "'r'", ",", "attributes", "=", "{", "'embed'", ":", "picrelid", "}", ")", ")", "stretch", "=", "makeelement", "(", "'stretch'", ",", "nsprefix", "=", "'a'", ")", "stretch", ".", "append", "(", "makeelement", "(", "'fillRect'", ",", "nsprefix", "=", "'a'", ")", ")", "blipfill", ".", "append", "(", "makeelement", "(", "'srcRect'", ",", "nsprefix", "=", "'a'", ")", ")", "blipfill", ".", "append", "(", "stretch", ")", "# 2. The non visual picture properties", "nvpicpr", "=", "makeelement", "(", "'nvPicPr'", ",", "nsprefix", "=", "'pic'", ")", "cnvpr", "=", "makeelement", "(", "'cNvPr'", ",", "nsprefix", "=", "'pic'", ",", "attributes", "=", "{", "'id'", ":", "'0'", ",", "'name'", ":", "'Picture 1'", ",", "'descr'", ":", "picdescription", "}", ")", "nvpicpr", ".", "append", "(", "cnvpr", ")", "cnvpicpr", "=", "makeelement", "(", "'cNvPicPr'", ",", "nsprefix", "=", "'pic'", ")", "cnvpicpr", ".", "append", "(", "makeelement", "(", "'picLocks'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'noChangeAspect'", ":", "str", "(", "int", "(", "nochangeaspect", ")", ")", ",", "'noChangeArrowheads'", ":", "str", "(", "int", "(", "nochangearrowheads", ")", ")", "}", ")", ")", "nvpicpr", ".", "append", "(", "cnvpicpr", ")", "# 3. The Shape properties", "sppr", "=", "makeelement", "(", "'spPr'", ",", "nsprefix", "=", "'pic'", ",", "attributes", "=", "{", "'bwMode'", ":", "'auto'", "}", ")", "xfrm", "=", "makeelement", "(", "'xfrm'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'rot'", ":", "str", "(", "imageAngle", "*", "60000", ")", ",", "'flipH'", ":", "imageFlipH", ",", "'flipV'", ":", "imageFlipV", "}", ")", "xfrm", ".", "append", "(", "makeelement", "(", "'off'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'x'", ":", "'0'", ",", "'y'", ":", "'0'", "}", ")", ")", "xfrm", ".", "append", "(", "makeelement", "(", "'ext'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'cx'", ":", "width", ",", "'cy'", ":", "height", "}", ")", ")", "prstgeom", "=", "makeelement", "(", "'prstGeom'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'prst'", ":", "'rect'", "}", ")", "prstgeom", ".", "append", "(", "makeelement", "(", "'avLst'", ",", "nsprefix", "=", "'a'", ")", ")", "sppr", ".", "append", "(", "xfrm", ")", "sppr", ".", "append", "(", "prstgeom", ")", "# Add our 3 parts to the picture element", "pic", "=", "makeelement", "(", "'pic'", ",", "nsprefix", "=", "'pic'", ")", "pic", ".", "append", "(", "nvpicpr", ")", "pic", ".", "append", "(", "blipfill", ")", "pic", ".", "append", "(", "sppr", ")", "# Now make the supporting elements", "# The following sequence is just: make element, then add its children", "graphicdata", "=", "makeelement", "(", "'graphicData'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'uri'", ":", "(", "'http://schemas.openxmlformats.org/drawingml/200'", "'6/picture'", ")", "}", ")", "graphicdata", ".", "append", "(", "pic", ")", "graphic", "=", "makeelement", "(", "'graphic'", ",", "nsprefix", "=", "'a'", ")", "graphic", ".", "append", "(", "graphicdata", ")", "framelocks", "=", "makeelement", "(", "'graphicFrameLocks'", ",", "nsprefix", "=", "'a'", ",", "attributes", "=", "{", "'noChangeAspect'", ":", "'1'", "}", ")", "framepr", "=", "makeelement", "(", "'cNvGraphicFramePr'", ",", "nsprefix", "=", "'wp'", ")", "framepr", ".", "append", "(", "framelocks", ")", "docpr", "=", "makeelement", "(", "'docPr'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'id'", ":", "picid", ",", "'name'", ":", "'Picture 1'", ",", "'descr'", ":", "picdescription", "}", ")", "effectextent", "=", "makeelement", "(", "'effectExtent'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'l'", ":", "'25400'", ",", "'t'", ":", "'0'", ",", "'r'", ":", "'0'", ",", "'b'", ":", "'0'", "}", ")", "extent", "=", "makeelement", "(", "'extent'", ",", "nsprefix", "=", "'wp'", ",", "attributes", "=", "{", "'cx'", ":", "width", ",", "'cy'", ":", "height", "}", ")", "inline", "=", "makeelement", "(", "'inline'", ",", "attributes", "=", "{", "'distT'", ":", "\"0\"", ",", "'distB'", ":", "\"0\"", ",", "'distL'", ":", "\"0\"", ",", "'distR'", ":", "\"0\"", "}", ",", "nsprefix", "=", "'wp'", ")", "inline", ".", "append", "(", "extent", ")", "inline", ".", "append", "(", "effectextent", ")", "inline", ".", "append", "(", "docpr", ")", "inline", ".", "append", "(", "framepr", ")", "inline", ".", "append", "(", "graphic", ")", "drawing", "=", "makeelement", "(", "'drawing'", ")", "drawing", ".", "append", "(", "inline", ")", "run", "=", "makeelement", "(", "'r'", ")", "run", ".", "append", "(", "drawing", ")", "paragraph", "=", "makeelement", "(", "'p'", ")", "paragraph", ".", "append", "(", "run", ")", "if", "imagefiledict", "is", "not", "None", ":", "return", "relationshiplist", ",", "paragraph", ",", "imagefiledict", "else", ":", "return", "relationshiplist", ",", "paragraph" ]
36.861878
0.000292
[ "def picture(\n", " relationshiplist, picname, picdescription, pixelwidth=None,\n", " pixelheight=None, nochangeaspect=True, nochangearrowheads=True,\n", " imagefiledict=None):\n", " \"\"\"\n", " Take a relationshiplist, picture file name, and return a paragraph\n", " containing the image and an updated relationshiplist\n", " \"\"\"\n", " if imagefiledict is None:\n", " warn(\n", " 'Using picture() without imagefiledict parameter will be depreca'\n", " 'ted in the future.', PendingDeprecationWarning\n", " )\n", "\n", " # http://openxmldeveloper.org/articles/462.aspx\n", " # Create an image. Size may be specified, otherwise it will based on the\n", " # pixel size of image. Return a paragraph containing the picture\n", "\n", " # Set relationship ID to that of the image or the first available one\n", " picid = '2'\n", " picpath = abspath(picname)\n", "\n", " if imagefiledict is not None:\n", " # Keep track of the image files in a separate dictionary so they don't\n", " # need to be copied into the template directory\n", " if picpath not in imagefiledict:\n", " picrelid = 'rId' + str(len(relationshiplist) + 1)\n", " imagefiledict[picpath] = picrelid\n", "\n", " relationshiplist.append([\n", " 'http://schemas.openxmlformats.org/officeDocument/2006/relat'\n", " 'ionships/image',\n", " 'media/%s_%s' % (picrelid, basename(picpath))\n", " ])\n", " else:\n", " picrelid = imagefiledict[picpath]\n", " else:\n", " # Copy files into template directory for backwards compatibility\n", " # Images still accumulate in the template directory this way\n", " picrelid = 'rId' + str(len(relationshiplist) + 1)\n", "\n", " relationshiplist.append([\n", " 'http://schemas.openxmlformats.org/officeDocument/2006/relations'\n", " 'hips/image', 'media/' + picname\n", " ])\n", "\n", " media_dir = join(template_dir, 'word', 'media')\n", " if not os.path.isdir(media_dir):\n", " os.mkdir(media_dir)\n", " shutil.copyfile(picname, join(media_dir, picname))\n", "\n", " image = Image.open(picpath)\n", "\n", " # Extract EXIF data, if available\n", " try:\n", " exif = image._getexif()\n", " exif = {} if exif is None else exif\n", " except:\n", " exif = {}\n", "\n", " imageExif = {}\n", " for tag, value in exif.items():\n", " imageExif[TAGS.get(tag, tag)] = value\n", "\n", " imageOrientation = imageExif.get('Orientation', 1)\n", " imageAngle = {\n", " 1: 0, 2: 0, 3: 180, 4: 0, 5: 90, 6: 90, 7: 270, 8: 270\n", " }[imageOrientation]\n", " imageFlipH = 'true' if imageOrientation in (2, 5, 7) else 'false'\n", " imageFlipV = 'true' if imageOrientation == 4 else 'false'\n", "\n", " # Check if the user has specified a size\n", " if not pixelwidth or not pixelheight:\n", " # If not, get info from the picture itself\n", " pixelwidth, pixelheight = image.size[0:2]\n", "\n", " # Swap width and height if necessary\n", " if imageOrientation in (5, 6, 7, 8):\n", " pixelwidth, pixelheight = pixelheight, pixelwidth\n", "\n", " # OpenXML measures on-screen objects in English Metric Units\n", " # 1cm = 36000 EMUs\n", " emuperpixel = 12700\n", " width = str(pixelwidth * emuperpixel)\n", " height = str(pixelheight * emuperpixel)\n", "\n", " # There are 3 main elements inside a picture\n", " # 1. The Blipfill - specifies how the image fills the picture area\n", " # (stretch, tile, etc.)\n", " blipfill = makeelement('blipFill', nsprefix='pic')\n", " blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',\n", " attributes={'embed': picrelid}))\n", " stretch = makeelement('stretch', nsprefix='a')\n", " stretch.append(makeelement('fillRect', nsprefix='a'))\n", " blipfill.append(makeelement('srcRect', nsprefix='a'))\n", " blipfill.append(stretch)\n", "\n", " # 2. The non visual picture properties\n", " nvpicpr = makeelement('nvPicPr', nsprefix='pic')\n", " cnvpr = makeelement(\n", " 'cNvPr', nsprefix='pic',\n", " attributes={'id': '0', 'name': 'Picture 1', 'descr': picdescription}\n", " )\n", " nvpicpr.append(cnvpr)\n", " cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')\n", " cnvpicpr.append(makeelement(\n", " 'picLocks', nsprefix='a',\n", " attributes={'noChangeAspect': str(int(nochangeaspect)),\n", " 'noChangeArrowheads': str(int(nochangearrowheads))}))\n", " nvpicpr.append(cnvpicpr)\n", "\n", " # 3. The Shape properties\n", " sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})\n", " xfrm = makeelement(\n", " 'xfrm', nsprefix='a', attributes={\n", " 'rot': str(imageAngle * 60000), 'flipH': imageFlipH,\n", " 'flipV': imageFlipV\n", " }\n", " )\n", " xfrm.append(\n", " makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'})\n", " )\n", " xfrm.append(\n", " makeelement(\n", " 'ext', nsprefix='a', attributes={'cx': width, 'cy': height}\n", " )\n", " )\n", " prstgeom = makeelement(\n", " 'prstGeom', nsprefix='a', attributes={'prst': 'rect'}\n", " )\n", " prstgeom.append(makeelement('avLst', nsprefix='a'))\n", " sppr.append(xfrm)\n", " sppr.append(prstgeom)\n", "\n", " # Add our 3 parts to the picture element\n", " pic = makeelement('pic', nsprefix='pic')\n", " pic.append(nvpicpr)\n", " pic.append(blipfill)\n", " pic.append(sppr)\n", "\n", " # Now make the supporting elements\n", " # The following sequence is just: make element, then add its children\n", " graphicdata = makeelement(\n", " 'graphicData', nsprefix='a',\n", " attributes={'uri': ('http://schemas.openxmlformats.org/drawingml/200'\n", " '6/picture')})\n", " graphicdata.append(pic)\n", " graphic = makeelement('graphic', nsprefix='a')\n", " graphic.append(graphicdata)\n", "\n", " framelocks = makeelement('graphicFrameLocks', nsprefix='a',\n", " attributes={'noChangeAspect': '1'})\n", " framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')\n", " framepr.append(framelocks)\n", " docpr = makeelement('docPr', nsprefix='wp',\n", " attributes={'id': picid, 'name': 'Picture 1',\n", " 'descr': picdescription})\n", " effectextent = makeelement('effectExtent', nsprefix='wp',\n", " attributes={'l': '25400', 't': '0', 'r': '0',\n", " 'b': '0'})\n", " extent = makeelement('extent', nsprefix='wp',\n", " attributes={'cx': width, 'cy': height})\n", " inline = makeelement('inline', attributes={'distT': \"0\", 'distB': \"0\",\n", " 'distL': \"0\", 'distR': \"0\"},\n", " nsprefix='wp')\n", " inline.append(extent)\n", " inline.append(effectextent)\n", " inline.append(docpr)\n", " inline.append(framepr)\n", " inline.append(graphic)\n", " drawing = makeelement('drawing')\n", " drawing.append(inline)\n", " run = makeelement('r')\n", " run.append(drawing)\n", " paragraph = makeelement('p')\n", " paragraph.append(run)\n", "\n", " if imagefiledict is not None:\n", " return relationshiplist, paragraph, imagefiledict\n", " else:\n", " return relationshiplist, paragraph" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808 ]
181
0.000592