text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
texts
sequence
scores
sequence
num_lines
int64
3
2.77k
avg_score
float64
0
0.37
def merge(self, other, reference_seq): '''Tries to merge this VcfRecord with other VcfRecord. Simple example (working in 0-based coords): ref = ACGT var1 = SNP at position 1, C->G var2 = SNP at position 3, T->A then this returns new variant, position=1, REF=CGT, ALT=GGA. If there is any kind of conflict, eg two SNPs in same position, then returns None. Also assumes there is only one ALT, otherwise returns None.''' if self.CHROM != other.CHROM or self.intersects(other) or len(self.ALT) != 1 or len(other.ALT) != 1: return None ref_start = min(self.POS, other.POS) ref_end = max(self.ref_end_pos(), other.ref_end_pos()) ref_seq_for_vcf = reference_seq[ref_start:ref_end + 1] sorted_records = sorted([self, other], key=operator.attrgetter('POS')) alt_seq = [] gt_confs = [] current_ref_pos = ref_start for record in sorted_records: assert record.REF != '.' and record.ALT[0] != '.' alt_seq.append(reference_seq[current_ref_pos:record.POS]) alt_seq.append(record.ALT[0]) current_ref_pos += len(record.REF) if record.FORMAT is not None and 'GT_CONF' in record.FORMAT: gt_confs.append(record.FORMAT['GT_CONF']) gt_conf = 0 format = "GT" gt_1 = '1/1' if len(gt_confs) > 0: gt_conf = min(gt_confs) format = 'GT:GT_CONF' gt_1 = '1/1:' + str(gt_conf) return VcfRecord('\t'.join([ self.CHROM, str(ref_start + 1), '.', ref_seq_for_vcf, ''.join(alt_seq), '.', '.', 'SVTYPE=MERGED', format, gt_1, ]))
[ "def", "merge", "(", "self", ",", "other", ",", "reference_seq", ")", ":", "if", "self", ".", "CHROM", "!=", "other", ".", "CHROM", "or", "self", ".", "intersects", "(", "other", ")", "or", "len", "(", "self", ".", "ALT", ")", "!=", "1", "or", "len", "(", "other", ".", "ALT", ")", "!=", "1", ":", "return", "None", "ref_start", "=", "min", "(", "self", ".", "POS", ",", "other", ".", "POS", ")", "ref_end", "=", "max", "(", "self", ".", "ref_end_pos", "(", ")", ",", "other", ".", "ref_end_pos", "(", ")", ")", "ref_seq_for_vcf", "=", "reference_seq", "[", "ref_start", ":", "ref_end", "+", "1", "]", "sorted_records", "=", "sorted", "(", "[", "self", ",", "other", "]", ",", "key", "=", "operator", ".", "attrgetter", "(", "'POS'", ")", ")", "alt_seq", "=", "[", "]", "gt_confs", "=", "[", "]", "current_ref_pos", "=", "ref_start", "for", "record", "in", "sorted_records", ":", "assert", "record", ".", "REF", "!=", "'.'", "and", "record", ".", "ALT", "[", "0", "]", "!=", "'.'", "alt_seq", ".", "append", "(", "reference_seq", "[", "current_ref_pos", ":", "record", ".", "POS", "]", ")", "alt_seq", ".", "append", "(", "record", ".", "ALT", "[", "0", "]", ")", "current_ref_pos", "+=", "len", "(", "record", ".", "REF", ")", "if", "record", ".", "FORMAT", "is", "not", "None", "and", "'GT_CONF'", "in", "record", ".", "FORMAT", ":", "gt_confs", ".", "append", "(", "record", ".", "FORMAT", "[", "'GT_CONF'", "]", ")", "gt_conf", "=", "0", "format", "=", "\"GT\"", "gt_1", "=", "'1/1'", "if", "len", "(", "gt_confs", ")", ">", "0", ":", "gt_conf", "=", "min", "(", "gt_confs", ")", "format", "=", "'GT:GT_CONF'", "gt_1", "=", "'1/1:'", "+", "str", "(", "gt_conf", ")", "return", "VcfRecord", "(", "'\\t'", ".", "join", "(", "[", "self", ".", "CHROM", ",", "str", "(", "ref_start", "+", "1", ")", ",", "'.'", ",", "ref_seq_for_vcf", ",", "''", ".", "join", "(", "alt_seq", ")", ",", "'.'", ",", "'.'", ",", "'SVTYPE=MERGED'", ",", "format", ",", "gt_1", ",", "]", ")", ")" ]
37.234043
0.00167
[ "def merge(self, other, reference_seq):\n", " '''Tries to merge this VcfRecord with other VcfRecord.\n", " Simple example (working in 0-based coords):\n", " ref = ACGT\n", " var1 = SNP at position 1, C->G\n", " var2 = SNP at position 3, T->A\n", " then this returns new variant, position=1, REF=CGT, ALT=GGA.\n", "\n", " If there is any kind of conflict, eg two SNPs in same position, then\n", " returns None.\n", " Also assumes there is only one ALT, otherwise returns None.'''\n", " if self.CHROM != other.CHROM or self.intersects(other) or len(self.ALT) != 1 or len(other.ALT) != 1:\n", " return None\n", "\n", " ref_start = min(self.POS, other.POS)\n", " ref_end = max(self.ref_end_pos(), other.ref_end_pos())\n", " ref_seq_for_vcf = reference_seq[ref_start:ref_end + 1]\n", " sorted_records = sorted([self, other], key=operator.attrgetter('POS'))\n", " alt_seq = []\n", " gt_confs = []\n", " current_ref_pos = ref_start\n", "\n", " for record in sorted_records:\n", " assert record.REF != '.' and record.ALT[0] != '.'\n", " alt_seq.append(reference_seq[current_ref_pos:record.POS])\n", " alt_seq.append(record.ALT[0])\n", " current_ref_pos += len(record.REF)\n", " if record.FORMAT is not None and 'GT_CONF' in record.FORMAT:\n", " gt_confs.append(record.FORMAT['GT_CONF'])\n", "\n", " gt_conf = 0\n", " format = \"GT\"\n", " gt_1 = '1/1'\n", " if len(gt_confs) > 0:\n", " gt_conf = min(gt_confs)\n", " format = 'GT:GT_CONF'\n", " gt_1 = '1/1:' + str(gt_conf)\n", "\n", " return VcfRecord('\\t'.join([\n", " self.CHROM,\n", " str(ref_start + 1),\n", " '.',\n", " ref_seq_for_vcf,\n", " ''.join(alt_seq),\n", " '.', '.', 'SVTYPE=MERGED',\n", " format, gt_1,\n", " ]))" ]
[ 0, 0.015873015873015872, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.009174311926605505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.09090909090909091 ]
47
0.002467
def rstrip(self, chars=None): """ Like str.rstrip, except it returns the Colr instance. """ return self.__class__( self._str_strip('rstrip', chars), no_closing=chars and (closing_code in chars), )
[ "def", "rstrip", "(", "self", ",", "chars", "=", "None", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "_str_strip", "(", "'rstrip'", ",", "chars", ")", ",", "no_closing", "=", "chars", "and", "(", "closing_code", "in", "chars", ")", ",", ")" ]
39.833333
0.008197
[ "def rstrip(self, chars=None):\n", " \"\"\" Like str.rstrip, except it returns the Colr instance. \"\"\"\n", " return self.__class__(\n", " self._str_strip('rstrip', chars),\n", " no_closing=chars and (closing_code in chars),\n", " )" ]
[ 0, 0.014285714285714285, 0, 0, 0, 0.1111111111111111 ]
6
0.020899
def suppress_keyboard_interrupt_message(): """Register a new excepthook to suppress KeyboardInterrupt exception messages, and exit with status code 130. """ old_excepthook = sys.excepthook def new_hook(type, value, traceback): if type != KeyboardInterrupt: old_excepthook(type, value, traceback) else: sys.exit(130) sys.excepthook = new_hook
[ "def", "suppress_keyboard_interrupt_message", "(", ")", ":", "old_excepthook", "=", "sys", ".", "excepthook", "def", "new_hook", "(", "type", ",", "value", ",", "traceback", ")", ":", "if", "type", "!=", "KeyboardInterrupt", ":", "old_excepthook", "(", "type", ",", "value", ",", "traceback", ")", "else", ":", "sys", ".", "exit", "(", "130", ")", "sys", ".", "excepthook", "=", "new_hook" ]
28.214286
0.002451
[ "def suppress_keyboard_interrupt_message():\n", " \"\"\"Register a new excepthook to suppress KeyboardInterrupt\n", " exception messages, and exit with status code 130.\n", "\n", " \"\"\"\n", " old_excepthook = sys.excepthook\n", "\n", " def new_hook(type, value, traceback):\n", " if type != KeyboardInterrupt:\n", " old_excepthook(type, value, traceback)\n", " else:\n", " sys.exit(130)\n", "\n", " sys.excepthook = new_hook" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.034482758620689655 ]
14
0.002463
def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right
[ "def", "_align_method_FRAME", "(", "left", ",", "right", ",", "axis", ")", ":", "def", "to_series", "(", "right", ")", ":", "msg", "=", "(", "'Unable to coerce to Series, length must be {req_len}: '", "'given {given_len}'", ")", "if", "axis", "is", "not", "None", "and", "left", ".", "_get_axis_name", "(", "axis", ")", "==", "'index'", ":", "if", "len", "(", "left", ".", "index", ")", "!=", "len", "(", "right", ")", ":", "raise", "ValueError", "(", "msg", ".", "format", "(", "req_len", "=", "len", "(", "left", ".", "index", ")", ",", "given_len", "=", "len", "(", "right", ")", ")", ")", "right", "=", "left", ".", "_constructor_sliced", "(", "right", ",", "index", "=", "left", ".", "index", ")", "else", ":", "if", "len", "(", "left", ".", "columns", ")", "!=", "len", "(", "right", ")", ":", "raise", "ValueError", "(", "msg", ".", "format", "(", "req_len", "=", "len", "(", "left", ".", "columns", ")", ",", "given_len", "=", "len", "(", "right", ")", ")", ")", "right", "=", "left", ".", "_constructor_sliced", "(", "right", ",", "index", "=", "left", ".", "columns", ")", "return", "right", "if", "isinstance", "(", "right", ",", "np", ".", "ndarray", ")", ":", "if", "right", ".", "ndim", "==", "1", ":", "right", "=", "to_series", "(", "right", ")", "elif", "right", ".", "ndim", "==", "2", ":", "if", "right", ".", "shape", "==", "left", ".", "shape", ":", "right", "=", "left", ".", "_constructor", "(", "right", ",", "index", "=", "left", ".", "index", ",", "columns", "=", "left", ".", "columns", ")", "elif", "right", ".", "shape", "[", "0", "]", "==", "left", ".", "shape", "[", "0", "]", "and", "right", ".", "shape", "[", "1", "]", "==", "1", ":", "# Broadcast across columns", "right", "=", "np", ".", "broadcast_to", "(", "right", ",", "left", ".", "shape", ")", "right", "=", "left", ".", "_constructor", "(", "right", ",", "index", "=", "left", ".", "index", ",", "columns", "=", "left", ".", "columns", ")", "elif", "right", ".", "shape", "[", "1", "]", "==", "left", ".", "shape", "[", "1", "]", "and", "right", ".", "shape", "[", "0", "]", "==", "1", ":", "# Broadcast along rows", "right", "=", "to_series", "(", "right", "[", "0", ",", ":", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Unable to coerce to DataFrame, shape \"", "\"must be {req_shape}: given {given_shape}\"", ".", "format", "(", "req_shape", "=", "left", ".", "shape", ",", "given_shape", "=", "right", ".", "shape", ")", ")", "elif", "right", ".", "ndim", ">", "2", ":", "raise", "ValueError", "(", "'Unable to coerce to Series/DataFrame, dim '", "'must be <= 2: {dim}'", ".", "format", "(", "dim", "=", "right", ".", "shape", ")", ")", "elif", "(", "is_list_like", "(", "right", ")", "and", "not", "isinstance", "(", "right", ",", "(", "ABCSeries", ",", "ABCDataFrame", ")", ")", ")", ":", "# GH17901", "right", "=", "to_series", "(", "right", ")", "return", "right" ]
41.472727
0.000428
[ "def _align_method_FRAME(left, right, axis):\n", " \"\"\" convert rhs to meet lhs dims if input is list, tuple or np.ndarray \"\"\"\n", "\n", " def to_series(right):\n", " msg = ('Unable to coerce to Series, length must be {req_len}: '\n", " 'given {given_len}')\n", " if axis is not None and left._get_axis_name(axis) == 'index':\n", " if len(left.index) != len(right):\n", " raise ValueError(msg.format(req_len=len(left.index),\n", " given_len=len(right)))\n", " right = left._constructor_sliced(right, index=left.index)\n", " else:\n", " if len(left.columns) != len(right):\n", " raise ValueError(msg.format(req_len=len(left.columns),\n", " given_len=len(right)))\n", " right = left._constructor_sliced(right, index=left.columns)\n", " return right\n", "\n", " if isinstance(right, np.ndarray):\n", "\n", " if right.ndim == 1:\n", " right = to_series(right)\n", "\n", " elif right.ndim == 2:\n", " if right.shape == left.shape:\n", " right = left._constructor(right, index=left.index,\n", " columns=left.columns)\n", "\n", " elif right.shape[0] == left.shape[0] and right.shape[1] == 1:\n", " # Broadcast across columns\n", " right = np.broadcast_to(right, left.shape)\n", " right = left._constructor(right,\n", " index=left.index,\n", " columns=left.columns)\n", "\n", " elif right.shape[1] == left.shape[1] and right.shape[0] == 1:\n", " # Broadcast along rows\n", " right = to_series(right[0, :])\n", "\n", " else:\n", " raise ValueError(\"Unable to coerce to DataFrame, shape \"\n", " \"must be {req_shape}: given {given_shape}\"\n", " .format(req_shape=left.shape,\n", " given_shape=right.shape))\n", "\n", " elif right.ndim > 2:\n", " raise ValueError('Unable to coerce to Series/DataFrame, dim '\n", " 'must be <= 2: {dim}'.format(dim=right.shape))\n", "\n", " elif (is_list_like(right) and\n", " not isinstance(right, (ABCSeries, ABCDataFrame))):\n", " # GH17901\n", " right = to_series(right)\n", "\n", " return right" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
55
0.001136
def clean(self): """Routine to return C/NOFS IVM data cleaned to the specified level Parameters ----------- inst : (pysat.Instrument) Instrument class object, whose attribute clean_level is used to return the desired level of data selectivity. Returns -------- Void : (NoneType) data in inst is modified in-place. Notes -------- Supports 'clean', 'dusty', 'dirty' """ # cleans cindi data if self.clean_level == 'clean': # choose areas below 550km # self.data = self.data[self.data.alt <= 550] idx, = np.where(self.data.altitude <= 550) self.data = self[idx,:] # make sure all -999999 values are NaN self.data.replace(-999999., np.nan, inplace=True) if (self.clean_level == 'clean') | (self.clean_level == 'dusty'): try: idx, = np.where(np.abs(self.data.ionVelmeridional) < 10000.) self.data = self[idx,:] except AttributeError: pass if self.clean_level == 'dusty': # take out all values where RPA data quality is > 1 idx, = np.where(self.data.RPAflag <= 1) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 3) ] else: # take out all values where RPA data quality is > 0 idx, = np.where(self.data.RPAflag <= 0) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 0) ] if self.clean_level == 'dirty': # take out all values where RPA data quality is > 4 idx, = np.where(self.data.RPAflag <= 4) self.data = self[idx,:] # IDM quality flags self.data = self.data[ (self.data.driftMeterflag<= 6) ] # basic quality check on drifts and don't let UTS go above 86400. idx, = np.where(self.data.time <= 86400.) self.data = self[idx,:] # make sure MLT is between 0 and 24 idx, = np.where((self.data.mlt >= 0) & (self.data.mlt <= 24.)) self.data = self[idx,:] return
[ "def", "clean", "(", "self", ")", ":", "# cleans cindi data", "if", "self", ".", "clean_level", "==", "'clean'", ":", "# choose areas below 550km", "# self.data = self.data[self.data.alt <= 550]", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "data", ".", "altitude", "<=", "550", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "# make sure all -999999 values are NaN", "self", ".", "data", ".", "replace", "(", "-", "999999.", ",", "np", ".", "nan", ",", "inplace", "=", "True", ")", "if", "(", "self", ".", "clean_level", "==", "'clean'", ")", "|", "(", "self", ".", "clean_level", "==", "'dusty'", ")", ":", "try", ":", "idx", ",", "=", "np", ".", "where", "(", "np", ".", "abs", "(", "self", ".", "data", ".", "ionVelmeridional", ")", "<", "10000.", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "except", "AttributeError", ":", "pass", "if", "self", ".", "clean_level", "==", "'dusty'", ":", "# take out all values where RPA data quality is > 1", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "data", ".", "RPAflag", "<=", "1", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "# IDM quality flags", "self", ".", "data", "=", "self", ".", "data", "[", "(", "self", ".", "data", ".", "driftMeterflag", "<=", "3", ")", "]", "else", ":", "# take out all values where RPA data quality is > 0", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "data", ".", "RPAflag", "<=", "0", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "# IDM quality flags", "self", ".", "data", "=", "self", ".", "data", "[", "(", "self", ".", "data", ".", "driftMeterflag", "<=", "0", ")", "]", "if", "self", ".", "clean_level", "==", "'dirty'", ":", "# take out all values where RPA data quality is > 4", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "data", ".", "RPAflag", "<=", "4", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "# IDM quality flags", "self", ".", "data", "=", "self", ".", "data", "[", "(", "self", ".", "data", ".", "driftMeterflag", "<=", "6", ")", "]", "# basic quality check on drifts and don't let UTS go above 86400.", "idx", ",", "=", "np", ".", "where", "(", "self", ".", "data", ".", "time", "<=", "86400.", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "# make sure MLT is between 0 and 24", "idx", ",", "=", "np", ".", "where", "(", "(", "self", ".", "data", ".", "mlt", ">=", "0", ")", "&", "(", "self", ".", "data", ".", "mlt", "<=", "24.", ")", ")", "self", ".", "data", "=", "self", "[", "idx", ",", ":", "]", "return" ]
32.75
0.010653
[ "def clean(self):\n", " \"\"\"Routine to return C/NOFS IVM data cleaned to the specified level\n", "\n", " Parameters\n", " -----------\n", " inst : (pysat.Instrument)\n", " Instrument class object, whose attribute clean_level is used to return\n", " the desired level of data selectivity.\n", "\n", " Returns\n", " --------\n", " Void : (NoneType)\n", " data in inst is modified in-place.\n", "\n", " Notes\n", " --------\n", " Supports 'clean', 'dusty', 'dirty'\n", " \n", " \"\"\"\n", "\n", " # cleans cindi data\n", " if self.clean_level == 'clean':\n", " # choose areas below 550km\n", " # self.data = self.data[self.data.alt <= 550]\n", " idx, = np.where(self.data.altitude <= 550)\n", " self.data = self[idx,:]\n", " \n", " # make sure all -999999 values are NaN\n", " self.data.replace(-999999., np.nan, inplace=True)\n", "\n", " if (self.clean_level == 'clean') | (self.clean_level == 'dusty'):\n", " try:\n", " idx, = np.where(np.abs(self.data.ionVelmeridional) < 10000.)\n", " self.data = self[idx,:]\n", " except AttributeError:\n", " pass\n", " \n", " if self.clean_level == 'dusty':\n", " # take out all values where RPA data quality is > 1\n", " idx, = np.where(self.data.RPAflag <= 1)\n", " self.data = self[idx,:]\n", " # IDM quality flags\n", " self.data = self.data[ (self.data.driftMeterflag<= 3) ]\n", " else:\n", " # take out all values where RPA data quality is > 0\n", " idx, = np.where(self.data.RPAflag <= 0)\n", " self.data = self[idx,:] \n", " # IDM quality flags\n", " self.data = self.data[ (self.data.driftMeterflag<= 0) ]\n", " if self.clean_level == 'dirty':\n", " # take out all values where RPA data quality is > 4\n", " idx, = np.where(self.data.RPAflag <= 4)\n", " self.data = self[idx,:]\n", " # IDM quality flags\n", " self.data = self.data[ (self.data.driftMeterflag<= 6) ]\n", " \n", " # basic quality check on drifts and don't let UTS go above 86400.\n", " idx, = np.where(self.data.time <= 86400.)\n", " self.data = self[idx,:]\n", " \n", " # make sure MLT is between 0 and 24\n", " idx, = np.where((self.data.mlt >= 0) & (self.data.mlt <= 24.))\n", " self.data = self[idx,:]\n", " return" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.2, 0, 0, 0, 0, 0, 0, 0.027777777777777776, 0, 0, 0.1111111111111111, 0, 0, 0, 0.027777777777777776, 0, 0.04411764705882353, 0, 0, 0, 0.05405405405405406, 0, 0.04411764705882353, 0, 0, 0, 0.03125, 0, 0.046875, 0.1111111111111111, 0, 0, 0.03571428571428571, 0.2, 0, 0, 0.03571428571428571, 0.1 ]
64
0.020326
def elapsed(self): """ Elapsed time [µs] between start and stop timestamps. If stop is empty then returned time is difference between start and current timestamp. """ if self._stop is None: return timer() - self._start return self._stop - self._start
[ "def", "elapsed", "(", "self", ")", ":", "if", "self", ".", "_stop", "is", "None", ":", "return", "timer", "(", ")", "-", "self", ".", "_start", "return", "self", ".", "_stop", "-", "self", ".", "_start" ]
37.875
0.009677
[ "def elapsed(self):\n", " \"\"\"\n", " Elapsed time [µs] between start and stop timestamps. If stop is empty then\n", " returned time is difference between start and current timestamp.\n", " \"\"\"\n", " if self._stop is None:\n", " return timer() - self._start\n", " return self._stop - self._start" ]
[ 0, 0.08333333333333333, 0.012048192771084338, 0, 0, 0, 0, 0.02564102564102564 ]
8
0.015128
def convert_to_G(self, word): """ Given a size such as '2333M', return the converted value in G """ value = 0.0 if word[-1] == 'G' or word[-1] == 'g': value = float(word[:-1]) elif word[-1] == 'M' or word[-1] == 'm': value = float(word[:-1]) / 1000.0 elif word[-1] == 'K' or word[-1] == 'k': value = float(word[:-1]) / 1000.0 / 1000.0 else: # No unit value = float(word) / 1000.0 / 1000.0 / 1000.0 return str(value)
[ "def", "convert_to_G", "(", "self", ",", "word", ")", ":", "value", "=", "0.0", "if", "word", "[", "-", "1", "]", "==", "'G'", "or", "word", "[", "-", "1", "]", "==", "'g'", ":", "value", "=", "float", "(", "word", "[", ":", "-", "1", "]", ")", "elif", "word", "[", "-", "1", "]", "==", "'M'", "or", "word", "[", "-", "1", "]", "==", "'m'", ":", "value", "=", "float", "(", "word", "[", ":", "-", "1", "]", ")", "/", "1000.0", "elif", "word", "[", "-", "1", "]", "==", "'K'", "or", "word", "[", "-", "1", "]", "==", "'k'", ":", "value", "=", "float", "(", "word", "[", ":", "-", "1", "]", ")", "/", "1000.0", "/", "1000.0", "else", ":", "# No unit", "value", "=", "float", "(", "word", ")", "/", "1000.0", "/", "1000.0", "/", "1000.0", "return", "str", "(", "value", ")" ]
33.071429
0.010504
[ "def convert_to_G(self, word):\n", " \"\"\"\n", " Given a size such as '2333M', return the converted value in G\n", " \"\"\"\n", " value = 0.0\n", " if word[-1] == 'G' or word[-1] == 'g':\n", " value = float(word[:-1])\n", " elif word[-1] == 'M' or word[-1] == 'm':\n", " value = float(word[:-1]) / 1000.0\n", " elif word[-1] == 'K' or word[-1] == 'k':\n", " value = float(word[:-1]) / 1000.0 / 1000.0\n", " else: # No unit\n", " value = float(word) / 1000.0 / 1000.0 / 1000.0\n", " return str(value)" ]
[ 0, 0, 0, 0, 0, 0, 0.03225806451612903, 0, 0.025, 0, 0.02040816326530612, 0, 0.018867924528301886, 0.047619047619047616 ]
14
0.010297
def argmin(input_, key=None): """ Returns index / key of the item with the smallest value. Args: input_ (dict or list): Note: a[argmin(a, key=key)] == min(a, key=key) """ # if isinstance(input_, dict): # return list(input_.keys())[argmin(list(input_.values()))] # elif hasattr(input_, 'index'): # return input_.index(min(input_)) # else: # return min(enumerate(input_), key=operator.itemgetter(1))[0] if isinstance(input, dict): return list(input.keys())[argmin(list(input.values()), key=key)] else: if key is None: def _key(item): return item[1] else: def _key(item): return key(item[1]) return min(enumerate(input), key=_key)[0]
[ "def", "argmin", "(", "input_", ",", "key", "=", "None", ")", ":", "# if isinstance(input_, dict):", "# return list(input_.keys())[argmin(list(input_.values()))]", "# elif hasattr(input_, 'index'):", "# return input_.index(min(input_))", "# else:", "# return min(enumerate(input_), key=operator.itemgetter(1))[0]", "if", "isinstance", "(", "input", ",", "dict", ")", ":", "return", "list", "(", "input", ".", "keys", "(", ")", ")", "[", "argmin", "(", "list", "(", "input", ".", "values", "(", ")", ")", ",", "key", "=", "key", ")", "]", "else", ":", "if", "key", "is", "None", ":", "def", "_key", "(", "item", ")", ":", "return", "item", "[", "1", "]", "else", ":", "def", "_key", "(", "item", ")", ":", "return", "key", "(", "item", "[", "1", "]", ")", "return", "min", "(", "enumerate", "(", "input", ")", ",", "key", "=", "_key", ")", "[", "0", "]" ]
29.807692
0.00125
[ "def argmin(input_, key=None):\n", " \"\"\"\n", " Returns index / key of the item with the smallest value.\n", "\n", " Args:\n", " input_ (dict or list):\n", "\n", " Note:\n", " a[argmin(a, key=key)] == min(a, key=key)\n", " \"\"\"\n", " # if isinstance(input_, dict):\n", " # return list(input_.keys())[argmin(list(input_.values()))]\n", " # elif hasattr(input_, 'index'):\n", " # return input_.index(min(input_))\n", " # else:\n", " # return min(enumerate(input_), key=operator.itemgetter(1))[0]\n", " if isinstance(input, dict):\n", " return list(input.keys())[argmin(list(input.values()), key=key)]\n", " else:\n", " if key is None:\n", " def _key(item):\n", " return item[1]\n", " else:\n", " def _key(item):\n", " return key(item[1])\n", " return min(enumerate(input), key=_key)[0]" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02040816326530612 ]
26
0.000785
def isBool(self, type): """ is the type a boolean value? :param type: PKCS#11 type like `CKA_ALWAYS_SENSITIVE` :rtype: bool """ if type in (CKA_ALWAYS_SENSITIVE, CKA_DECRYPT, CKA_DERIVE, CKA_ENCRYPT, CKA_EXTRACTABLE, CKA_HAS_RESET, CKA_LOCAL, CKA_MODIFIABLE, CKA_NEVER_EXTRACTABLE, CKA_PRIVATE, CKA_RESET_ON_INIT, CKA_SECONDARY_AUTH, CKA_SENSITIVE, CKA_SIGN, CKA_SIGN_RECOVER, CKA_TOKEN, CKA_TRUSTED, CKA_UNWRAP, CKA_VERIFY, CKA_VERIFY_RECOVER, CKA_WRAP, CKA_WRAP_WITH_TRUSTED): return True return False
[ "def", "isBool", "(", "self", ",", "type", ")", ":", "if", "type", "in", "(", "CKA_ALWAYS_SENSITIVE", ",", "CKA_DECRYPT", ",", "CKA_DERIVE", ",", "CKA_ENCRYPT", ",", "CKA_EXTRACTABLE", ",", "CKA_HAS_RESET", ",", "CKA_LOCAL", ",", "CKA_MODIFIABLE", ",", "CKA_NEVER_EXTRACTABLE", ",", "CKA_PRIVATE", ",", "CKA_RESET_ON_INIT", ",", "CKA_SECONDARY_AUTH", ",", "CKA_SENSITIVE", ",", "CKA_SIGN", ",", "CKA_SIGN_RECOVER", ",", "CKA_TOKEN", ",", "CKA_TRUSTED", ",", "CKA_UNWRAP", ",", "CKA_VERIFY", ",", "CKA_VERIFY_RECOVER", ",", "CKA_WRAP", ",", "CKA_WRAP_WITH_TRUSTED", ")", ":", "return", "True", "return", "False" ]
31.032258
0.002016
[ "def isBool(self, type):\n", " \"\"\"\n", " is the type a boolean value?\n", "\n", " :param type: PKCS#11 type like `CKA_ALWAYS_SENSITIVE`\n", " :rtype: bool\n", " \"\"\"\n", " if type in (CKA_ALWAYS_SENSITIVE,\n", " CKA_DECRYPT,\n", " CKA_DERIVE,\n", " CKA_ENCRYPT,\n", " CKA_EXTRACTABLE,\n", " CKA_HAS_RESET,\n", " CKA_LOCAL,\n", " CKA_MODIFIABLE,\n", " CKA_NEVER_EXTRACTABLE,\n", " CKA_PRIVATE,\n", " CKA_RESET_ON_INIT,\n", " CKA_SECONDARY_AUTH,\n", " CKA_SENSITIVE,\n", " CKA_SIGN,\n", " CKA_SIGN_RECOVER,\n", " CKA_TOKEN,\n", " CKA_TRUSTED,\n", " CKA_UNWRAP,\n", " CKA_VERIFY,\n", " CKA_VERIFY_RECOVER,\n", " CKA_WRAP,\n", " CKA_WRAP_WITH_TRUSTED):\n", " return True\n", " return False" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
31
0.004301
def then(self, success=None, failure=None): """ This method takes two optional arguments. The first argument is used if the "self promise" is fulfilled and the other is used if the "self promise" is rejected. In either case, this method returns another promise that effectively represents the result of either the first of the second argument (in the case that the "self promise" is fulfilled or rejected, respectively). Each argument can be either: * None - Meaning no action is taken * A function - which will be called with either the value of the "self promise" or the reason for rejection of the "self promise". The function may return: * A value - which will be used to fulfill the promise returned by this method. * A promise - which, when fulfilled or rejected, will cascade its value or reason to the promise returned by this method. * A value - which will be assigned as either the value or the reason for the promise returned by this method when the "self promise" is either fulfilled or rejected, respectively. :type success: (object) -> object :type failure: (object) -> object :rtype : Promise """ ret = self.create_next() def callAndFulfill(v): """ A callback to be invoked if the "self promise" is fulfilled. """ try: if aplus._isFunction(success): ret.fulfill(success(v)) else: ret.fulfill(v) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) def callAndReject(r): """ A callback to be invoked if the "self promise" is rejected. """ try: if aplus._isFunction(failure): ret.fulfill(failure(r)) else: ret.reject(r) except Exception as e: Promise.last_exc_info = sys.exc_info() e.exc_info = sys.exc_info() ret.reject(e) self.done(callAndFulfill, callAndReject) return ret
[ "def", "then", "(", "self", ",", "success", "=", "None", ",", "failure", "=", "None", ")", ":", "ret", "=", "self", ".", "create_next", "(", ")", "def", "callAndFulfill", "(", "v", ")", ":", "\"\"\"\n A callback to be invoked if the \"self promise\"\n is fulfilled.\n \"\"\"", "try", ":", "if", "aplus", ".", "_isFunction", "(", "success", ")", ":", "ret", ".", "fulfill", "(", "success", "(", "v", ")", ")", "else", ":", "ret", ".", "fulfill", "(", "v", ")", "except", "Exception", "as", "e", ":", "Promise", ".", "last_exc_info", "=", "sys", ".", "exc_info", "(", ")", "e", ".", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "ret", ".", "reject", "(", "e", ")", "def", "callAndReject", "(", "r", ")", ":", "\"\"\"\n A callback to be invoked if the \"self promise\"\n is rejected.\n \"\"\"", "try", ":", "if", "aplus", ".", "_isFunction", "(", "failure", ")", ":", "ret", ".", "fulfill", "(", "failure", "(", "r", ")", ")", "else", ":", "ret", ".", "reject", "(", "r", ")", "except", "Exception", "as", "e", ":", "Promise", ".", "last_exc_info", "=", "sys", ".", "exc_info", "(", ")", "e", ".", "exc_info", "=", "sys", ".", "exc_info", "(", ")", "ret", ".", "reject", "(", "e", ")", "self", ".", "done", "(", "callAndFulfill", ",", "callAndReject", ")", "return", "ret" ]
37.640625
0.000809
[ "def then(self, success=None, failure=None):\n", " \"\"\"\n", " This method takes two optional arguments. The first argument\n", " is used if the \"self promise\" is fulfilled and the other is\n", " used if the \"self promise\" is rejected. In either case, this\n", " method returns another promise that effectively represents\n", " the result of either the first of the second argument (in the\n", " case that the \"self promise\" is fulfilled or rejected,\n", " respectively).\n", "\n", " Each argument can be either:\n", " * None - Meaning no action is taken\n", " * A function - which will be called with either the value\n", " of the \"self promise\" or the reason for rejection of\n", " the \"self promise\". The function may return:\n", " * A value - which will be used to fulfill the promise\n", " returned by this method.\n", " * A promise - which, when fulfilled or rejected, will\n", " cascade its value or reason to the promise returned\n", " by this method.\n", " * A value - which will be assigned as either the value\n", " or the reason for the promise returned by this method\n", " when the \"self promise\" is either fulfilled or rejected,\n", " respectively.\n", "\n", " :type success: (object) -> object\n", " :type failure: (object) -> object\n", " :rtype : Promise\n", " \"\"\"\n", " ret = self.create_next()\n", "\n", " def callAndFulfill(v):\n", " \"\"\"\n", " A callback to be invoked if the \"self promise\"\n", " is fulfilled.\n", " \"\"\"\n", " try:\n", " if aplus._isFunction(success):\n", " ret.fulfill(success(v))\n", " else:\n", " ret.fulfill(v)\n", " except Exception as e:\n", " Promise.last_exc_info = sys.exc_info()\n", " e.exc_info = sys.exc_info()\n", " ret.reject(e)\n", "\n", " def callAndReject(r):\n", " \"\"\"\n", " A callback to be invoked if the \"self promise\"\n", " is rejected.\n", " \"\"\"\n", " try:\n", " if aplus._isFunction(failure):\n", " ret.fulfill(failure(r))\n", " else:\n", " ret.reject(r)\n", " except Exception as e:\n", " Promise.last_exc_info = sys.exc_info()\n", " e.exc_info = sys.exc_info()\n", " ret.reject(e)\n", "\n", " self.done(callAndFulfill, callAndReject)\n", "\n", " return ret" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
64
0.00217
def get_reports(): """ Returns energy data from 1960 to 2014 across various factors. """ if False: # If there was a Test version of this method, it would go here. But alas. pass else: rows = _Constants._DATABASE.execute("SELECT data FROM energy".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
[ "def", "get_reports", "(", ")", ":", "if", "False", ":", "# If there was a Test version of this method, it would go here. But alas.", "pass", "else", ":", "rows", "=", "_Constants", ".", "_DATABASE", ".", "execute", "(", "\"SELECT data FROM energy\"", ".", "format", "(", "hardware", "=", "_Constants", ".", "_HARDWARE", ")", ")", "data", "=", "[", "r", "[", "0", "]", "for", "r", "in", "rows", "]", "data", "=", "[", "_Auxiliary", ".", "_byteify", "(", "_json", ".", "loads", "(", "r", ")", ")", "for", "r", "in", "data", "]", "return", "_Auxiliary", ".", "_byteify", "(", "data", ")" ]
32.333333
0.008016
[ "def get_reports():\n", " \"\"\"\n", " Returns energy data from 1960 to 2014 across various factors.\n", " \n", " \"\"\"\n", " if False:\n", " # If there was a Test version of this method, it would go here. But alas.\n", " pass\n", " else:\n", " rows = _Constants._DATABASE.execute(\"SELECT data FROM energy\".format(\n", " hardware=_Constants._HARDWARE))\n", " data = [r[0] for r in rows]\n", " data = [_Auxiliary._byteify(_json.loads(r)) for r in data]\n", " \n", " return _Auxiliary._byteify(data)" ]
[ 0, 0, 0, 0.2, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0.1111111111111111, 0.025 ]
15
0.02322
def get_local_file_dist(self): """ Handle importing from a source archive; this also uses setup_requires but points easy_install directly to the source archive. """ if not os.path.isfile(self.path): return log.info('Attempting to unpack and import astropy_helpers from ' '{0!r}'.format(self.path)) try: dist = self._do_download(find_links=[self.path]) except Exception as e: if DEBUG: raise log.warn( 'Failed to import {0} from the specified archive {1!r}: ' '{2}'.format(PACKAGE_NAME, self.path, str(e))) dist = None if dist is not None and self.auto_upgrade: # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self._do_upgrade(dist) if upgrade is not None: dist = upgrade return dist
[ "def", "get_local_file_dist", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "path", ")", ":", "return", "log", ".", "info", "(", "'Attempting to unpack and import astropy_helpers from '", "'{0!r}'", ".", "format", "(", "self", ".", "path", ")", ")", "try", ":", "dist", "=", "self", ".", "_do_download", "(", "find_links", "=", "[", "self", ".", "path", "]", ")", "except", "Exception", "as", "e", ":", "if", "DEBUG", ":", "raise", "log", ".", "warn", "(", "'Failed to import {0} from the specified archive {1!r}: '", "'{2}'", ".", "format", "(", "PACKAGE_NAME", ",", "self", ".", "path", ",", "str", "(", "e", ")", ")", ")", "dist", "=", "None", "if", "dist", "is", "not", "None", "and", "self", ".", "auto_upgrade", ":", "# A version of astropy-helpers was found on the available path, but", "# check to see if a bugfix release is available on PyPI", "upgrade", "=", "self", ".", "_do_upgrade", "(", "dist", ")", "if", "upgrade", "is", "not", "None", ":", "dist", "=", "upgrade", "return", "dist" ]
32.677419
0.001918
[ "def get_local_file_dist(self):\n", " \"\"\"\n", " Handle importing from a source archive; this also uses setup_requires\n", " but points easy_install directly to the source archive.\n", " \"\"\"\n", "\n", " if not os.path.isfile(self.path):\n", " return\n", "\n", " log.info('Attempting to unpack and import astropy_helpers from '\n", " '{0!r}'.format(self.path))\n", "\n", " try:\n", " dist = self._do_download(find_links=[self.path])\n", " except Exception as e:\n", " if DEBUG:\n", " raise\n", "\n", " log.warn(\n", " 'Failed to import {0} from the specified archive {1!r}: '\n", " '{2}'.format(PACKAGE_NAME, self.path, str(e)))\n", " dist = None\n", "\n", " if dist is not None and self.auto_upgrade:\n", " # A version of astropy-helpers was found on the available path, but\n", " # check to see if a bugfix release is available on PyPI\n", " upgrade = self._do_upgrade(dist)\n", " if upgrade is not None:\n", " dist = upgrade\n", "\n", " return dist" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05263157894736842 ]
31
0.004386
def get_definition_location(self): """Returns a (module, lineno) tuple""" if self.lineno is None and self.assignments: self.lineno = self.assignments[0].get_lineno() return (self.module, self.lineno)
[ "def", "get_definition_location", "(", "self", ")", ":", "if", "self", ".", "lineno", "is", "None", "and", "self", ".", "assignments", ":", "self", ".", "lineno", "=", "self", ".", "assignments", "[", "0", "]", ".", "get_lineno", "(", ")", "return", "(", "self", ".", "module", ",", "self", ".", "lineno", ")" ]
46.2
0.008511
[ "def get_definition_location(self):\n", " \"\"\"Returns a (module, lineno) tuple\"\"\"\n", " if self.lineno is None and self.assignments:\n", " self.lineno = self.assignments[0].get_lineno()\n", " return (self.module, self.lineno)" ]
[ 0, 0.02127659574468085, 0, 0, 0.024390243902439025 ]
5
0.009133
def _evaluate(self,x,return_indices = False): ''' Returns the level of the interpolated function at each value in x. Only called internally by HARKinterpolator1D.__call__ (etc). ''' return self._evalOrDer(x,True,False)[0]
[ "def", "_evaluate", "(", "self", ",", "x", ",", "return_indices", "=", "False", ")", ":", "return", "self", ".", "_evalOrDer", "(", "x", ",", "True", ",", "False", ")", "[", "0", "]" ]
42.833333
0.034351
[ "def _evaluate(self,x,return_indices = False):\n", " '''\n", " Returns the level of the interpolated function at each value in x. Only\n", " called internally by HARKinterpolator1D.__call__ (etc).\n", " '''\n", " return self._evalOrDer(x,True,False)[0]" ]
[ 0.08695652173913043, 0.08333333333333333, 0.012345679012345678, 0, 0, 0.06382978723404255 ]
6
0.041078
def render_query(dataset, tables, select=None, conditions=None, groupings=None, having=None, order_by=None, limit=None): """Render a query that will run over the given tables using the specified parameters. Parameters ---------- dataset : str The BigQuery dataset to query data from tables : Union[dict, list] The table in `dataset` to query. select : dict, optional The keys function as column names and the values function as options to apply to the select field such as alias and format. For example, select['start_time'] might have the form {'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as StartTime' in a query. Pass `None` to select all. conditions : list, optional a ``list`` of ``dict`` objects to filter results by. Each dict should have the keys 'field', 'type', and 'comparators'. The first two map to strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT'). 'comparators' maps to another ``dict`` containing the keys 'condition', 'negate', and 'value'. If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1}, this example will be rendered as 'foo >= FLOAT('1')' in the query. ``list`` of field names to group by order_by : dict, optional Keys = {'field', 'direction'}. `dict` should be formatted as {'field':'TimeStamp, 'direction':'desc'} or similar limit : int, optional Limit the amount of data needed to be returned. Returns ------- str A rendered query """ if None in (dataset, tables): return None query = "%s %s %s %s %s %s %s" % ( _render_select(select), _render_sources(dataset, tables), _render_conditions(conditions), _render_groupings(groupings), _render_having(having), _render_order(order_by), _render_limit(limit) ) return query
[ "def", "render_query", "(", "dataset", ",", "tables", ",", "select", "=", "None", ",", "conditions", "=", "None", ",", "groupings", "=", "None", ",", "having", "=", "None", ",", "order_by", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "None", "in", "(", "dataset", ",", "tables", ")", ":", "return", "None", "query", "=", "\"%s %s %s %s %s %s %s\"", "%", "(", "_render_select", "(", "select", ")", ",", "_render_sources", "(", "dataset", ",", "tables", ")", ",", "_render_conditions", "(", "conditions", ")", ",", "_render_groupings", "(", "groupings", ")", ",", "_render_having", "(", "having", ")", ",", "_render_order", "(", "order_by", ")", ",", "_render_limit", "(", "limit", ")", ")", "return", "query" ]
38.339623
0.00048
[ "def render_query(dataset, tables, select=None, conditions=None,\n", " groupings=None, having=None, order_by=None, limit=None):\n", " \"\"\"Render a query that will run over the given tables using the specified\n", " parameters.\n", "\n", " Parameters\n", " ----------\n", " dataset : str\n", " The BigQuery dataset to query data from\n", " tables : Union[dict, list]\n", " The table in `dataset` to query.\n", " select : dict, optional\n", " The keys function as column names and the values function as options to\n", " apply to the select field such as alias and format. For example,\n", " select['start_time'] might have the form\n", " {'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which\n", " would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as\n", " StartTime' in a query. Pass `None` to select all.\n", " conditions : list, optional\n", " a ``list`` of ``dict`` objects to filter results by. Each dict should\n", " have the keys 'field', 'type', and 'comparators'. The first two map to\n", " strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT').\n", " 'comparators' maps to another ``dict`` containing the keys 'condition',\n", " 'negate', and 'value'.\n", " If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1},\n", " this example will be rendered as 'foo >= FLOAT('1')' in the query.\n", " ``list`` of field names to group by\n", " order_by : dict, optional\n", " Keys = {'field', 'direction'}. `dict` should be formatted as\n", " {'field':'TimeStamp, 'direction':'desc'} or similar\n", " limit : int, optional\n", " Limit the amount of data needed to be returned.\n", "\n", " Returns\n", " -------\n", " str\n", " A rendered query\n", " \"\"\"\n", "\n", " if None in (dataset, tables):\n", " return None\n", "\n", " query = \"%s %s %s %s %s %s %s\" % (\n", " _render_select(select),\n", " _render_sources(dataset, tables),\n", " _render_conditions(conditions),\n", " _render_groupings(groupings),\n", " _render_having(having),\n", " _render_order(order_by),\n", " _render_limit(limit)\n", " )\n", "\n", " return query" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
53
0.001179
def set_up_phase(self, training_info, model, source: Source): """ Prepare the phase for learning """ self._optimizer_instance = self.optimizer_factory.instantiate(model) self._source = source
[ "def", "set_up_phase", "(", "self", ",", "training_info", ",", "model", ",", "source", ":", "Source", ")", ":", "self", ".", "_optimizer_instance", "=", "self", ".", "optimizer_factory", ".", "instantiate", "(", "model", ")", "self", ".", "_source", "=", "source" ]
53
0.009302
[ "def set_up_phase(self, training_info, model, source: Source):\n", " \"\"\" Prepare the phase for learning \"\"\"\n", " self._optimizer_instance = self.optimizer_factory.instantiate(model)\n", " self._source = source" ]
[ 0, 0.02127659574468085, 0, 0.034482758620689655 ]
4
0.01394
def del_password(name, root=None): ''' .. versionadded:: 2014.7.0 Delete the password from name user name User to delete root Directory to chroot into CLI Example: .. code-block:: bash salt '*' shadow.del_password username ''' cmd = ['passwd'] if root is not None: cmd.extend(('-R', root)) cmd.extend(('-d', name)) __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet') uinfo = info(name, root=root) return not uinfo['passwd'] and uinfo['name'] == name
[ "def", "del_password", "(", "name", ",", "root", "=", "None", ")", ":", "cmd", "=", "[", "'passwd'", "]", "if", "root", "is", "not", "None", ":", "cmd", ".", "extend", "(", "(", "'-R'", ",", "root", ")", ")", "cmd", ".", "extend", "(", "(", "'-d'", ",", "name", ")", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ",", "output_loglevel", "=", "'quiet'", ")", "uinfo", "=", "info", "(", "name", ",", "root", "=", "root", ")", "return", "not", "uinfo", "[", "'passwd'", "]", "and", "uinfo", "[", "'name'", "]", "==", "name" ]
20.653846
0.001779
[ "def del_password(name, root=None):\n", " '''\n", " .. versionadded:: 2014.7.0\n", "\n", " Delete the password from name user\n", "\n", " name\n", " User to delete\n", "\n", " root\n", " Directory to chroot into\n", "\n", " CLI Example:\n", "\n", " .. code-block:: bash\n", "\n", " salt '*' shadow.del_password username\n", " '''\n", " cmd = ['passwd']\n", " if root is not None:\n", " cmd.extend(('-R', root))\n", " cmd.extend(('-d', name))\n", "\n", " __salt__['cmd.run'](cmd, python_shell=False, output_loglevel='quiet')\n", " uinfo = info(name, root=root)\n", " return not uinfo['passwd'] and uinfo['name'] == name" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017857142857142856 ]
26
0.000687
def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1): """Returns QNM damping time for the given mass and spin and mode. Parameters ---------- final_mass : float or array Mass of the black hole (in solar masses). final_spin : float or array Dimensionless spin of the final black hole. l : int or array, optional l-index of the harmonic. Default is 2. m : int or array, optional m-index of the harmonic. Default is 2. nmodes : int, optional The number of overtones to generate. Default is 1. Returns ------- float or array The damping time of the QNM(s), in seconds. If only a single mode is requested (and mass, spin, l, and m are not arrays), this will be a float. If multiple modes requested, will be an array with shape ``[input shape x] nmodes``, where ``input shape`` is the broadcasted shape of the inputs. """ return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]
[ "def", "tau_from_final_mass_spin", "(", "final_mass", ",", "final_spin", ",", "l", "=", "2", ",", "m", "=", "2", ",", "nmodes", "=", "1", ")", ":", "return", "get_lm_f0tau", "(", "final_mass", ",", "final_spin", ",", "l", ",", "m", ",", "nmodes", ")", "[", "1", "]" ]
38.653846
0.001942
[ "def tau_from_final_mass_spin(final_mass, final_spin, l=2, m=2, nmodes=1):\n", " \"\"\"Returns QNM damping time for the given mass and spin and mode.\n", "\n", " Parameters\n", " ----------\n", " final_mass : float or array\n", " Mass of the black hole (in solar masses).\n", " final_spin : float or array\n", " Dimensionless spin of the final black hole.\n", " l : int or array, optional\n", " l-index of the harmonic. Default is 2.\n", " m : int or array, optional\n", " m-index of the harmonic. Default is 2.\n", " nmodes : int, optional\n", " The number of overtones to generate. Default is 1.\n", "\n", " Returns\n", " -------\n", " float or array\n", " The damping time of the QNM(s), in seconds. If only a single mode is\n", " requested (and mass, spin, l, and m are not arrays), this will be a\n", " float. If multiple modes requested, will be an array with shape\n", " ``[input shape x] nmodes``, where ``input shape`` is the broadcasted\n", " shape of the inputs.\n", " \"\"\"\n", " return get_lm_f0tau(final_mass, final_spin, l, m, nmodes)[1]" ]
[ 0.013513513513513514, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015625 ]
26
0.001121
def get_requests_for_local_unit(relation_name=None): """Extract any certificates data targeted at this unit down relation_name. :param relation_name: str Name of relation to check for data. :returns: List of bundles of certificates. :rtype: List of dicts """ local_name = local_unit().replace('/', '_') raw_certs_key = '{}.processed_requests'.format(local_name) relation_name = relation_name or 'certificates' bundles = [] for rid in relation_ids(relation_name): for unit in related_units(rid): data = relation_get(rid=rid, unit=unit) if data.get(raw_certs_key): bundles.append({ 'ca': data['ca'], 'chain': data.get('chain'), 'certs': json.loads(data[raw_certs_key])}) return bundles
[ "def", "get_requests_for_local_unit", "(", "relation_name", "=", "None", ")", ":", "local_name", "=", "local_unit", "(", ")", ".", "replace", "(", "'/'", ",", "'_'", ")", "raw_certs_key", "=", "'{}.processed_requests'", ".", "format", "(", "local_name", ")", "relation_name", "=", "relation_name", "or", "'certificates'", "bundles", "=", "[", "]", "for", "rid", "in", "relation_ids", "(", "relation_name", ")", ":", "for", "unit", "in", "related_units", "(", "rid", ")", ":", "data", "=", "relation_get", "(", "rid", "=", "rid", ",", "unit", "=", "unit", ")", "if", "data", ".", "get", "(", "raw_certs_key", ")", ":", "bundles", ".", "append", "(", "{", "'ca'", ":", "data", "[", "'ca'", "]", ",", "'chain'", ":", "data", ".", "get", "(", "'chain'", ")", ",", "'certs'", ":", "json", ".", "loads", "(", "data", "[", "raw_certs_key", "]", ")", "}", ")", "return", "bundles" ]
40.85
0.001196
[ "def get_requests_for_local_unit(relation_name=None):\n", " \"\"\"Extract any certificates data targeted at this unit down relation_name.\n", "\n", " :param relation_name: str Name of relation to check for data.\n", " :returns: List of bundles of certificates.\n", " :rtype: List of dicts\n", " \"\"\"\n", " local_name = local_unit().replace('/', '_')\n", " raw_certs_key = '{}.processed_requests'.format(local_name)\n", " relation_name = relation_name or 'certificates'\n", " bundles = []\n", " for rid in relation_ids(relation_name):\n", " for unit in related_units(rid):\n", " data = relation_get(rid=rid, unit=unit)\n", " if data.get(raw_certs_key):\n", " bundles.append({\n", " 'ca': data['ca'],\n", " 'chain': data.get('chain'),\n", " 'certs': json.loads(data[raw_certs_key])})\n", " return bundles" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
20
0.002778
def status(ctx): """Print a status of this Lambda function""" status = ctx.status() click.echo(click.style('Policy', bold=True)) if status['policy']: line = ' {} ({})'.format( status['policy']['PolicyName'], status['policy']['Arn']) click.echo(click.style(line, fg='green')) click.echo(click.style('Role', bold=True)) if status['role']: line = ' {} ({})'.format( status['role']['RoleName'], status['role']['Arn']) click.echo(click.style(line, fg='green')) click.echo(click.style('Function', bold=True)) if status['function']: line = ' {} ({})'.format( status['function']['Configuration']['FunctionName'], status['function']['Configuration']['FunctionArn']) click.echo(click.style(line, fg='green')) else: click.echo(click.style(' None', fg='green')) click.echo(click.style('Event Sources', bold=True)) if status['event_sources']: for event_source in status['event_sources']: if event_source: arn = event_source.get('EventSourceArn') state = event_source.get('State', 'Enabled') line = ' {}: {}'.format(arn, state) click.echo(click.style(line, fg='green')) else: click.echo(click.style(' None', fg='green'))
[ "def", "status", "(", "ctx", ")", ":", "status", "=", "ctx", ".", "status", "(", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "'Policy'", ",", "bold", "=", "True", ")", ")", "if", "status", "[", "'policy'", "]", ":", "line", "=", "' {} ({})'", ".", "format", "(", "status", "[", "'policy'", "]", "[", "'PolicyName'", "]", ",", "status", "[", "'policy'", "]", "[", "'Arn'", "]", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "line", ",", "fg", "=", "'green'", ")", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "'Role'", ",", "bold", "=", "True", ")", ")", "if", "status", "[", "'role'", "]", ":", "line", "=", "' {} ({})'", ".", "format", "(", "status", "[", "'role'", "]", "[", "'RoleName'", "]", ",", "status", "[", "'role'", "]", "[", "'Arn'", "]", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "line", ",", "fg", "=", "'green'", ")", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "'Function'", ",", "bold", "=", "True", ")", ")", "if", "status", "[", "'function'", "]", ":", "line", "=", "' {} ({})'", ".", "format", "(", "status", "[", "'function'", "]", "[", "'Configuration'", "]", "[", "'FunctionName'", "]", ",", "status", "[", "'function'", "]", "[", "'Configuration'", "]", "[", "'FunctionArn'", "]", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "line", ",", "fg", "=", "'green'", ")", ")", "else", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "' None'", ",", "fg", "=", "'green'", ")", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "'Event Sources'", ",", "bold", "=", "True", ")", ")", "if", "status", "[", "'event_sources'", "]", ":", "for", "event_source", "in", "status", "[", "'event_sources'", "]", ":", "if", "event_source", ":", "arn", "=", "event_source", ".", "get", "(", "'EventSourceArn'", ")", "state", "=", "event_source", ".", "get", "(", "'State'", ",", "'Enabled'", ")", "line", "=", "' {}: {}'", ".", "format", "(", "arn", ",", "state", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "line", ",", "fg", "=", "'green'", ")", ")", "else", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "' None'", ",", "fg", "=", "'green'", ")", ")" ]
41.69697
0.00071
[ "def status(ctx):\n", " \"\"\"Print a status of this Lambda function\"\"\"\n", " status = ctx.status()\n", " click.echo(click.style('Policy', bold=True))\n", " if status['policy']:\n", " line = ' {} ({})'.format(\n", " status['policy']['PolicyName'],\n", " status['policy']['Arn'])\n", " click.echo(click.style(line, fg='green'))\n", " click.echo(click.style('Role', bold=True))\n", " if status['role']:\n", " line = ' {} ({})'.format(\n", " status['role']['RoleName'],\n", " status['role']['Arn'])\n", " click.echo(click.style(line, fg='green'))\n", " click.echo(click.style('Function', bold=True))\n", " if status['function']:\n", " line = ' {} ({})'.format(\n", " status['function']['Configuration']['FunctionName'],\n", " status['function']['Configuration']['FunctionArn'])\n", " click.echo(click.style(line, fg='green'))\n", " else:\n", " click.echo(click.style(' None', fg='green'))\n", " click.echo(click.style('Event Sources', bold=True))\n", " if status['event_sources']:\n", " for event_source in status['event_sources']:\n", " if event_source:\n", " arn = event_source.get('EventSourceArn')\n", " state = event_source.get('State', 'Enabled')\n", " line = ' {}: {}'.format(arn, state)\n", " click.echo(click.style(line, fg='green'))\n", " else:\n", " click.echo(click.style(' None', fg='green'))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872 ]
33
0.000481
def assemble_all(asmcode, pc=0, fork=DEFAULT_FORK): """ Assemble a sequence of textual representation of EVM instructions :param asmcode: assembly code for any number of instructions :type asmcode: str :param pc: program counter of the first instruction(optional) :type pc: int :param fork: fork name (optional) :type fork: str :return: An generator of Instruction objects :rtype: generator[Instructions] Example use:: >>> assemble_one('''PUSH1 0x60\n \ PUSH1 0x40\n \ MSTORE\n \ PUSH1 0x2\n \ PUSH2 0x108\n \ PUSH1 0x0\n \ POP\n \ SSTORE\n \ PUSH1 0x40\n \ MLOAD\n \ ''') """ asmcode = asmcode.split('\n') asmcode = iter(asmcode) for line in asmcode: if not line.strip(): continue instr = assemble_one(line, pc=pc, fork=fork) yield instr pc += instr.size
[ "def", "assemble_all", "(", "asmcode", ",", "pc", "=", "0", ",", "fork", "=", "DEFAULT_FORK", ")", ":", "asmcode", "=", "asmcode", ".", "split", "(", "'\\n'", ")", "asmcode", "=", "iter", "(", "asmcode", ")", "for", "line", "in", "asmcode", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "continue", "instr", "=", "assemble_one", "(", "line", ",", "pc", "=", "pc", ",", "fork", "=", "fork", ")", "yield", "instr", "pc", "+=", "instr", ".", "size" ]
32.942857
0.000842
[ "def assemble_all(asmcode, pc=0, fork=DEFAULT_FORK):\n", " \"\"\" Assemble a sequence of textual representation of EVM instructions\n", "\n", " :param asmcode: assembly code for any number of instructions\n", " :type asmcode: str\n", " :param pc: program counter of the first instruction(optional)\n", " :type pc: int\n", " :param fork: fork name (optional)\n", " :type fork: str\n", " :return: An generator of Instruction objects\n", " :rtype: generator[Instructions]\n", "\n", " Example use::\n", "\n", " >>> assemble_one('''PUSH1 0x60\\n \\\n", " PUSH1 0x40\\n \\\n", " MSTORE\\n \\\n", " PUSH1 0x2\\n \\\n", " PUSH2 0x108\\n \\\n", " PUSH1 0x0\\n \\\n", " POP\\n \\\n", " SSTORE\\n \\\n", " PUSH1 0x40\\n \\\n", " MLOAD\\n \\\n", " ''')\n", "\n", " \"\"\"\n", " asmcode = asmcode.split('\\n')\n", " asmcode = iter(asmcode)\n", " for line in asmcode:\n", " if not line.strip():\n", " continue\n", " instr = assemble_one(line, pc=pc, fork=fork)\n", " yield instr\n", " pc += instr.size" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.041666666666666664 ]
35
0.00119
def lookup(self, pathogenName, sampleName): """ Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames """ pathogenIndex = self._pathogens[pathogenName] sampleIndex = self._samples[sampleName] return self._readsFilenames[(pathogenIndex, sampleIndex)]
[ "def", "lookup", "(", "self", ",", "pathogenName", ",", "sampleName", ")", ":", "pathogenIndex", "=", "self", ".", "_pathogens", "[", "pathogenName", "]", "sampleIndex", "=", "self", ".", "_samples", "[", "sampleName", "]", "return", "self", ".", "_readsFilenames", "[", "(", "pathogenIndex", ",", "sampleIndex", ")", "]" ]
47.222222
0.002307
[ "def lookup(self, pathogenName, sampleName):\n", " \"\"\"\n", " Look up a pathogen name, sample name combination and get its\n", " FASTA/FASTQ file name and unique read count.\n", "\n", " This method should be used instead of C{add} in situations where\n", " you want an exception to be raised if a pathogen/sample combination has\n", " not already been passed to C{add}.\n", "\n", " @param pathogenName: A C{str} pathogen name.\n", " @param sampleName: A C{str} sample name.\n", " @raise KeyError: If the pathogen name or sample name have not been\n", " seen, either individually or in combination.\n", " @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames\n", " \"\"\"\n", " pathogenIndex = self._pathogens[pathogenName]\n", " sampleIndex = self._samples[sampleName]\n", " return self._readsFilenames[(pathogenIndex, sampleIndex)]" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015384615384615385 ]
18
0.005484
def SegmentProd(a, ids): """ Segmented prod op. """ func = lambda idxs: reduce(np.multiply, a[idxs]) return seg_map(func, a, ids),
[ "def", "SegmentProd", "(", "a", ",", "ids", ")", ":", "func", "=", "lambda", "idxs", ":", "reduce", "(", "np", ".", "multiply", ",", "a", "[", "idxs", "]", ")", "return", "seg_map", "(", "func", ",", "a", ",", "ids", ")", "," ]
24.166667
0.013333
[ "def SegmentProd(a, ids):\n", " \"\"\"\n", " Segmented prod op.\n", " \"\"\"\n", " func = lambda idxs: reduce(np.multiply, a[idxs])\n", " return seg_map(func, a, ids)," ]
[ 0, 0, 0, 0, 0.018867924528301886, 0.030303030303030304 ]
6
0.008195
def set_cores_massive(self,filename='core_masses_massive.txt'): ''' Uesse function cores in nugridse.py ''' core_info=[] minis=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) mini=sefiles.get('mini') minis.append(mini) incycle=int(sefiles.se.cycles[-1]) core_info.append(sefiles.cores(incycle=incycle)) print_info='' for i in range(len(self.runs_H5_surf)): if i ==0: print 'Following returned for each initial mass' print core_info[i][1] #print '----Mini: ',minis[i],'------' print_info+=(str(minis[i])+' & ') info=core_info[i][0] for k in range(len(info)): print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ') print_info=(print_info+'\n') #print core_info[i][2] f1=open(filename,'a') f1.write(print_info) f1.close()
[ "def", "set_cores_massive", "(", "self", ",", "filename", "=", "'core_masses_massive.txt'", ")", ":", "core_info", "=", "[", "]", "minis", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "runs_H5_surf", ")", ")", ":", "sefiles", "=", "se", "(", "self", ".", "runs_H5_out", "[", "i", "]", ")", "mini", "=", "sefiles", ".", "get", "(", "'mini'", ")", "minis", ".", "append", "(", "mini", ")", "incycle", "=", "int", "(", "sefiles", ".", "se", ".", "cycles", "[", "-", "1", "]", ")", "core_info", ".", "append", "(", "sefiles", ".", "cores", "(", "incycle", "=", "incycle", ")", ")", "print_info", "=", "''", "for", "i", "in", "range", "(", "len", "(", "self", ".", "runs_H5_surf", ")", ")", ":", "if", "i", "==", "0", ":", "print", "'Following returned for each initial mass'", "print", "core_info", "[", "i", "]", "[", "1", "]", "#print '----Mini: ',minis[i],'------'", "print_info", "+=", "(", "str", "(", "minis", "[", "i", "]", ")", "+", "' & '", ")", "info", "=", "core_info", "[", "i", "]", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "info", ")", ")", ":", "print_info", "+=", "(", "'{:.3E}'", ".", "format", "(", "float", "(", "core_info", "[", "i", "]", "[", "0", "]", "[", "k", "]", ")", ")", "+", "' & '", ")", "print_info", "=", "(", "print_info", "+", "'\\n'", ")", "#print core_info[i][2]", "f1", "=", "open", "(", "filename", ",", "'a'", ")", "f1", ".", "write", "(", "print_info", ")", "f1", ".", "close", "(", ")" ]
30
0.054566
[ "def set_cores_massive(self,filename='core_masses_massive.txt'):\n", "\n", "\t\t'''\n", "\t\t\tUesse function cores in nugridse.py\n", "\t\t'''\n", "\t\t\n", "\t\tcore_info=[]\n", "\t\tminis=[]\n", " for i in range(len(self.runs_H5_surf)):\n", " sefiles=se(self.runs_H5_out[i])\n", "\t\t\tmini=sefiles.get('mini')\n", "\t\t\tminis.append(mini)\n", "\t\t\tincycle=int(sefiles.se.cycles[-1])\n", "\t\t\tcore_info.append(sefiles.cores(incycle=incycle))\n", "\t\tprint_info=''\n", " for i in range(len(self.runs_H5_surf)):\n", "\t\t\tif i ==0:\n", "\t\t\t\tprint 'Following returned for each initial mass'\n", "\t\t\t\tprint core_info[i][1]\n", " #print '----Mini: ',minis[i],'------'\n", "\t\t\tprint_info+=(str(minis[i])+' & ')\n", "\t\t\tinfo=core_info[i][0]\n", "\t\t\tfor k in range(len(info)):\n", "\t\t\t\tprint_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ')\n", "\t\t\tprint_info=(print_info+'\\n')\n", "\t\t\t#print core_info[i][2]\n", "\t\tf1=open(filename,'a')\n", "\t\tf1.write(print_info)\n", "\t\tf1.close()" ]
[ 0.015625, 0, 0.3333333333333333, 0.02564102564102564, 0.16666666666666666, 0.6666666666666666, 0.13333333333333333, 0.18181818181818182, 0.017857142857142856, 0.03571428571428571, 0.10714285714285714, 0.045454545454545456, 0.05263157894736842, 0.019230769230769232, 0.125, 0.017857142857142856, 0.23076923076923078, 0.018867924528301886, 0.038461538461538464, 0.03225806451612903, 0.08108108108108109, 0.08333333333333333, 0.03333333333333333, 0.029850746268656716, 0.0625, 0.07692307692307693, 0.125, 0.043478260869565216, 0.16666666666666666 ]
29
0.102293
def load_from_args(args): """ Given parsed commandline arguments, returns a list of ReadSource objects """ if not args.reads: return None if args.read_source_name: read_source_names = util.expand( args.read_source_name, 'read_source_name', 'read source', len(args.reads)) else: read_source_names = util.drop_prefix(args.reads) filters = [] for (name, info) in READ_FILTERS.items(): value = getattr(args, name) if value is not None: filters.append(functools.partial(info[-1], value)) return [ load_bam(filename, name, filters) for (filename, name) in zip(args.reads, read_source_names) ]
[ "def", "load_from_args", "(", "args", ")", ":", "if", "not", "args", ".", "reads", ":", "return", "None", "if", "args", ".", "read_source_name", ":", "read_source_names", "=", "util", ".", "expand", "(", "args", ".", "read_source_name", ",", "'read_source_name'", ",", "'read source'", ",", "len", "(", "args", ".", "reads", ")", ")", "else", ":", "read_source_names", "=", "util", ".", "drop_prefix", "(", "args", ".", "reads", ")", "filters", "=", "[", "]", "for", "(", "name", ",", "info", ")", "in", "READ_FILTERS", ".", "items", "(", ")", ":", "value", "=", "getattr", "(", "args", ",", "name", ")", "if", "value", "is", "not", "None", ":", "filters", ".", "append", "(", "functools", ".", "partial", "(", "info", "[", "-", "1", "]", ",", "value", ")", ")", "return", "[", "load_bam", "(", "filename", ",", "name", ",", "filters", ")", "for", "(", "filename", ",", "name", ")", "in", "zip", "(", "args", ".", "reads", ",", "read_source_names", ")", "]" ]
26.925926
0.001328
[ "def load_from_args(args):\n", " \"\"\"\n", " Given parsed commandline arguments, returns a list of ReadSource objects\n", " \"\"\"\n", " if not args.reads:\n", " return None\n", "\n", " if args.read_source_name:\n", " read_source_names = util.expand(\n", " args.read_source_name,\n", " 'read_source_name',\n", " 'read source',\n", " len(args.reads))\n", " else:\n", " read_source_names = util.drop_prefix(args.reads)\n", "\n", " filters = []\n", " for (name, info) in READ_FILTERS.items():\n", " value = getattr(args, name)\n", " if value is not None:\n", " filters.append(functools.partial(info[-1], value))\n", "\n", " return [\n", " load_bam(filename, name, filters)\n", " for (filename, name)\n", " in zip(args.reads, read_source_names)\n", " ]" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2 ]
27
0.007407
def _verify_same_spaces(self): """Verifies that all the envs have the same observation and action space.""" # Pre-conditions: self._envs is initialized. if self._envs is None: raise ValueError("Environments not initialized.") if not isinstance(self._envs, list): tf.logging.warning("Not checking observation and action space " "compatibility across envs, since there is just one.") return # NOTE: We compare string representations of observation_space and # action_space because compositional classes like space.Tuple don't return # true on object comparison. if not all( str(env.observation_space) == str(self.observation_space) for env in self._envs): err_str = ("All environments should have the same observation space, but " "don't.") tf.logging.error(err_str) # Log all observation spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has observation space [%s]", i, env.observation_space) raise ValueError(err_str) if not all( str(env.action_space) == str(self.action_space) for env in self._envs): err_str = "All environments should have the same action space, but don't." tf.logging.error(err_str) # Log all action spaces. for i, env in enumerate(self._envs): tf.logging.error("Env[%d] has action space [%s]", i, env.action_space) raise ValueError(err_str)
[ "def", "_verify_same_spaces", "(", "self", ")", ":", "# Pre-conditions: self._envs is initialized.", "if", "self", ".", "_envs", "is", "None", ":", "raise", "ValueError", "(", "\"Environments not initialized.\"", ")", "if", "not", "isinstance", "(", "self", ".", "_envs", ",", "list", ")", ":", "tf", ".", "logging", ".", "warning", "(", "\"Not checking observation and action space \"", "\"compatibility across envs, since there is just one.\"", ")", "return", "# NOTE: We compare string representations of observation_space and", "# action_space because compositional classes like space.Tuple don't return", "# true on object comparison.", "if", "not", "all", "(", "str", "(", "env", ".", "observation_space", ")", "==", "str", "(", "self", ".", "observation_space", ")", "for", "env", "in", "self", ".", "_envs", ")", ":", "err_str", "=", "(", "\"All environments should have the same observation space, but \"", "\"don't.\"", ")", "tf", ".", "logging", ".", "error", "(", "err_str", ")", "# Log all observation spaces.", "for", "i", ",", "env", "in", "enumerate", "(", "self", ".", "_envs", ")", ":", "tf", ".", "logging", ".", "error", "(", "\"Env[%d] has observation space [%s]\"", ",", "i", ",", "env", ".", "observation_space", ")", "raise", "ValueError", "(", "err_str", ")", "if", "not", "all", "(", "str", "(", "env", ".", "action_space", ")", "==", "str", "(", "self", ".", "action_space", ")", "for", "env", "in", "self", ".", "_envs", ")", ":", "err_str", "=", "\"All environments should have the same action space, but don't.\"", "tf", ".", "logging", ".", "error", "(", "err_str", ")", "# Log all action spaces.", "for", "i", ",", "env", "in", "enumerate", "(", "self", ".", "_envs", ")", ":", "tf", ".", "logging", ".", "error", "(", "\"Env[%d] has action space [%s]\"", ",", "i", ",", "env", ".", "action_space", ")", "raise", "ValueError", "(", "err_str", ")" ]
39.810811
0.012591
[ "def _verify_same_spaces(self):\n", " \"\"\"Verifies that all the envs have the same observation and action space.\"\"\"\n", "\n", " # Pre-conditions: self._envs is initialized.\n", "\n", " if self._envs is None:\n", " raise ValueError(\"Environments not initialized.\")\n", "\n", " if not isinstance(self._envs, list):\n", " tf.logging.warning(\"Not checking observation and action space \"\n", " \"compatibility across envs, since there is just one.\")\n", " return\n", "\n", " # NOTE: We compare string representations of observation_space and\n", " # action_space because compositional classes like space.Tuple don't return\n", " # true on object comparison.\n", "\n", " if not all(\n", " str(env.observation_space) == str(self.observation_space)\n", " for env in self._envs):\n", " err_str = (\"All environments should have the same observation space, but \"\n", " \"don't.\")\n", " tf.logging.error(err_str)\n", " # Log all observation spaces.\n", " for i, env in enumerate(self._envs):\n", " tf.logging.error(\"Env[%d] has observation space [%s]\", i,\n", " env.observation_space)\n", " raise ValueError(err_str)\n", "\n", " if not all(\n", " str(env.action_space) == str(self.action_space) for env in self._envs):\n", " err_str = \"All environments should have the same action space, but don't.\"\n", " tf.logging.error(err_str)\n", " # Log all action spaces.\n", " for i, env in enumerate(self._envs):\n", " tf.logging.error(\"Env[%d] has action space [%s]\", i, env.action_space)\n", " raise ValueError(err_str)" ]
[ 0, 0.012345679012345678, 0, 0, 0, 0, 0.017857142857142856, 0, 0, 0.014285714285714285, 0, 0.07692307692307693, 0, 0, 0, 0, 0, 0, 0, 0.03125, 0.024691358024691357, 0, 0.03125, 0.027777777777777776, 0.023255813953488372, 0, 0, 0.03125, 0, 0, 0.0125, 0.024691358024691357, 0.03125, 0.03225806451612903, 0.023255813953488372, 0, 0.06451612903225806 ]
37
0.012956
def sources_relative_to_source_root(self): """ :API: public """ if self.has_sources(): abs_source_root = os.path.join(get_buildroot(), self.target_base) for source in self.sources_relative_to_buildroot(): abs_source = os.path.join(get_buildroot(), source) yield os.path.relpath(abs_source, abs_source_root)
[ "def", "sources_relative_to_source_root", "(", "self", ")", ":", "if", "self", ".", "has_sources", "(", ")", ":", "abs_source_root", "=", "os", ".", "path", ".", "join", "(", "get_buildroot", "(", ")", ",", "self", ".", "target_base", ")", "for", "source", "in", "self", ".", "sources_relative_to_buildroot", "(", ")", ":", "abs_source", "=", "os", ".", "path", ".", "join", "(", "get_buildroot", "(", ")", ",", "source", ")", "yield", "os", ".", "path", ".", "relpath", "(", "abs_source", ",", "abs_source_root", ")" ]
38
0.008571
[ "def sources_relative_to_source_root(self):\n", " \"\"\"\n", " :API: public\n", " \"\"\"\n", " if self.has_sources():\n", " abs_source_root = os.path.join(get_buildroot(), self.target_base)\n", " for source in self.sources_relative_to_buildroot():\n", " abs_source = os.path.join(get_buildroot(), source)\n", " yield os.path.relpath(abs_source, abs_source_root)" ]
[ 0, 0, 0, 0, 0, 0.013888888888888888, 0.017241379310344827, 0, 0.017241379310344827 ]
9
0.005375
def consumer_initialize_task(processor, consumer_client, shard_id, cursor_position, cursor_start_time, cursor_end_time=None): """ return TaskResult if failed, or else, return InitTaskResult :param processor: :param consumer_client: :param shard_id: :param cursor_position: :param cursor_start_time: :return: """ try: processor.initialize(shard_id) is_cursor_persistent = False check_point = consumer_client.get_check_point(shard_id) if check_point['checkpoint'] and len(check_point['checkpoint']) > 0: is_cursor_persistent = True cursor = check_point['checkpoint'] else: if cursor_position == CursorPosition.BEGIN_CURSOR: cursor = consumer_client.get_begin_cursor(shard_id) elif cursor_position == CursorPosition.END_CURSOR: cursor = consumer_client.get_end_cursor(shard_id) else: cursor = consumer_client.get_cursor(shard_id, cursor_start_time) end_cursor = None if cursor_end_time is not None: end_cursor = consumer_client.get_cursor(shard_id, cursor_end_time) return InitTaskResult(cursor, is_cursor_persistent, end_cursor) except Exception as e: return TaskResult(e)
[ "def", "consumer_initialize_task", "(", "processor", ",", "consumer_client", ",", "shard_id", ",", "cursor_position", ",", "cursor_start_time", ",", "cursor_end_time", "=", "None", ")", ":", "try", ":", "processor", ".", "initialize", "(", "shard_id", ")", "is_cursor_persistent", "=", "False", "check_point", "=", "consumer_client", ".", "get_check_point", "(", "shard_id", ")", "if", "check_point", "[", "'checkpoint'", "]", "and", "len", "(", "check_point", "[", "'checkpoint'", "]", ")", ">", "0", ":", "is_cursor_persistent", "=", "True", "cursor", "=", "check_point", "[", "'checkpoint'", "]", "else", ":", "if", "cursor_position", "==", "CursorPosition", ".", "BEGIN_CURSOR", ":", "cursor", "=", "consumer_client", ".", "get_begin_cursor", "(", "shard_id", ")", "elif", "cursor_position", "==", "CursorPosition", ".", "END_CURSOR", ":", "cursor", "=", "consumer_client", ".", "get_end_cursor", "(", "shard_id", ")", "else", ":", "cursor", "=", "consumer_client", ".", "get_cursor", "(", "shard_id", ",", "cursor_start_time", ")", "end_cursor", "=", "None", "if", "cursor_end_time", "is", "not", "None", ":", "end_cursor", "=", "consumer_client", ".", "get_cursor", "(", "shard_id", ",", "cursor_end_time", ")", "return", "InitTaskResult", "(", "cursor", ",", "is_cursor_persistent", ",", "end_cursor", ")", "except", "Exception", "as", "e", ":", "return", "TaskResult", "(", "e", ")" ]
39.90625
0.002294
[ "def consumer_initialize_task(processor, consumer_client, shard_id, cursor_position, cursor_start_time, cursor_end_time=None):\n", " \"\"\"\n", " return TaskResult if failed, or else, return InitTaskResult\n", " :param processor:\n", " :param consumer_client:\n", " :param shard_id:\n", " :param cursor_position:\n", " :param cursor_start_time:\n", " :return:\n", " \"\"\"\n", " try:\n", " processor.initialize(shard_id)\n", " is_cursor_persistent = False\n", " check_point = consumer_client.get_check_point(shard_id)\n", " if check_point['checkpoint'] and len(check_point['checkpoint']) > 0:\n", " is_cursor_persistent = True\n", " cursor = check_point['checkpoint']\n", " else:\n", " if cursor_position == CursorPosition.BEGIN_CURSOR:\n", " cursor = consumer_client.get_begin_cursor(shard_id)\n", " elif cursor_position == CursorPosition.END_CURSOR:\n", " cursor = consumer_client.get_end_cursor(shard_id)\n", " else:\n", " cursor = consumer_client.get_cursor(shard_id, cursor_start_time)\n", "\n", " end_cursor = None\n", " if cursor_end_time is not None:\n", " end_cursor = consumer_client.get_cursor(shard_id, cursor_end_time)\n", "\n", " return InitTaskResult(cursor, is_cursor_persistent, end_cursor)\n", " except Exception as e:\n", " return TaskResult(e)" ]
[ 0.007936507936507936, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
32
0.00175
def block(self): ''' pfctl -a switchyard -f- < rules.txt pfctl -a switchyard -F rules pfctl -t switchyard -F r ''' st,output = _runcmd("/sbin/pfctl -aswitchyard -f -", self._rules) log_debug("Installing rules: {}".format(output))
[ "def", "block", "(", "self", ")", ":", "st", ",", "output", "=", "_runcmd", "(", "\"/sbin/pfctl -aswitchyard -f -\"", ",", "self", ".", "_rules", ")", "log_debug", "(", "\"Installing rules: {}\"", ".", "format", "(", "output", ")", ")" ]
34.75
0.010526
[ "def block(self):\n", " '''\n", " pfctl -a switchyard -f- < rules.txt\n", " pfctl -a switchyard -F rules\n", " pfctl -t switchyard -F r\n", " '''\n", " st,output = _runcmd(\"/sbin/pfctl -aswitchyard -f -\", self._rules)\n", " log_debug(\"Installing rules: {}\".format(output))" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0.013513513513513514, 0.017857142857142856 ]
8
0.014338
def _chart_support(self, name, data, caller, **kwargs): "template chart support function" id = 'chart-%s' % next(self.id) name = self._chart_class_name(name) options = dict(self.environment.options) options.update(name=name, id=id) # jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys if jinja2.__version__ >= '2.9': kwargs = dict((k[4:], v) for (k, v) in kwargs.items()) else: kwargs = dict((k[2:], v) for (k, v) in kwargs.items()) if self._library is None: self._library = self.load_library() id = kwargs.get('id', '') library = self._library.get(id, {}) # apply options from a tag library.update(kwargs.get('library', {})) # apply options from chartkick.json kwargs.update(library=library) options.update(kwargs) return CHART_HTML.format(data=data, options=json.dumps(kwargs), **options)
[ "def", "_chart_support", "(", "self", ",", "name", ",", "data", ",", "caller", ",", "*", "*", "kwargs", ")", ":", "id", "=", "'chart-%s'", "%", "next", "(", "self", ".", "id", ")", "name", "=", "self", ".", "_chart_class_name", "(", "name", ")", "options", "=", "dict", "(", "self", ".", "environment", ".", "options", ")", "options", ".", "update", "(", "name", "=", "name", ",", "id", "=", "id", ")", "# jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys", "if", "jinja2", ".", "__version__", ">=", "'2.9'", ":", "kwargs", "=", "dict", "(", "(", "k", "[", "4", ":", "]", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "kwargs", ".", "items", "(", ")", ")", "else", ":", "kwargs", "=", "dict", "(", "(", "k", "[", "2", ":", "]", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "kwargs", ".", "items", "(", ")", ")", "if", "self", ".", "_library", "is", "None", ":", "self", ".", "_library", "=", "self", ".", "load_library", "(", ")", "id", "=", "kwargs", ".", "get", "(", "'id'", ",", "''", ")", "library", "=", "self", ".", "_library", ".", "get", "(", "id", ",", "{", "}", ")", "# apply options from a tag", "library", ".", "update", "(", "kwargs", ".", "get", "(", "'library'", ",", "{", "}", ")", ")", "# apply options from chartkick.json", "kwargs", ".", "update", "(", "library", "=", "library", ")", "options", ".", "update", "(", "kwargs", ")", "return", "CHART_HTML", ".", "format", "(", "data", "=", "data", ",", "options", "=", "json", ".", "dumps", "(", "kwargs", ")", ",", "*", "*", "options", ")" ]
37.576923
0.001996
[ "def _chart_support(self, name, data, caller, **kwargs):\n", " \"template chart support function\"\n", " id = 'chart-%s' % next(self.id)\n", " name = self._chart_class_name(name)\n", " options = dict(self.environment.options)\n", " options.update(name=name, id=id)\n", "\n", " # jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys\n", " if jinja2.__version__ >= '2.9':\n", " kwargs = dict((k[4:], v) for (k, v) in kwargs.items())\n", " else:\n", " kwargs = dict((k[2:], v) for (k, v) in kwargs.items())\n", "\n", " if self._library is None:\n", " self._library = self.load_library()\n", " id = kwargs.get('id', '')\n", " library = self._library.get(id, {})\n", "\n", " # apply options from a tag\n", " library.update(kwargs.get('library', {}))\n", " # apply options from chartkick.json\n", " kwargs.update(library=library)\n", "\n", " options.update(kwargs)\n", " return CHART_HTML.format(data=data, options=json.dumps(kwargs),\n", " **options)" ]
[ 0, 0.023809523809523808, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372 ]
26
0.00181
def concat_cols(df1,df2,idx_col,df1_cols,df2_cols, df1_suffix,df2_suffix,wc_cols=[],suffix_all=False): """ Concatenates two pandas tables :param df1: dataframe 1 :param df2: dataframe 2 :param idx_col: column name which will be used as a common index """ df1=df1.set_index(idx_col) df2=df2.set_index(idx_col) if not len(wc_cols)==0: for wc in wc_cols: df1_cols=df1_cols+[c for c in df1.columns if wc in c] df2_cols=df2_cols+[c for c in df2.columns if wc in c] combo=pd.concat([df1.loc[:,df1_cols],df2.loc[:,df2_cols]],axis=1) # find common columns and rename them # print df1_cols # print df2_cols if suffix_all: df1_cols=["%s%s" % (c,df1_suffix) for c in df1_cols] df2_cols=["%s%s" % (c,df2_suffix) for c in df2_cols] # df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix) # df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix) else: common_cols=[col for col in df1_cols if col in df2_cols] for col in common_cols: df1_cols[df1_cols.index(col)]="%s%s" % (col,df1_suffix) df2_cols[df2_cols.index(col)]="%s%s" % (col,df2_suffix) combo.columns=df1_cols+df2_cols combo.index.name=idx_col return combo
[ "def", "concat_cols", "(", "df1", ",", "df2", ",", "idx_col", ",", "df1_cols", ",", "df2_cols", ",", "df1_suffix", ",", "df2_suffix", ",", "wc_cols", "=", "[", "]", ",", "suffix_all", "=", "False", ")", ":", "df1", "=", "df1", ".", "set_index", "(", "idx_col", ")", "df2", "=", "df2", ".", "set_index", "(", "idx_col", ")", "if", "not", "len", "(", "wc_cols", ")", "==", "0", ":", "for", "wc", "in", "wc_cols", ":", "df1_cols", "=", "df1_cols", "+", "[", "c", "for", "c", "in", "df1", ".", "columns", "if", "wc", "in", "c", "]", "df2_cols", "=", "df2_cols", "+", "[", "c", "for", "c", "in", "df2", ".", "columns", "if", "wc", "in", "c", "]", "combo", "=", "pd", ".", "concat", "(", "[", "df1", ".", "loc", "[", ":", ",", "df1_cols", "]", ",", "df2", ".", "loc", "[", ":", ",", "df2_cols", "]", "]", ",", "axis", "=", "1", ")", "# find common columns and rename them", "# print df1_cols", "# print df2_cols ", "if", "suffix_all", ":", "df1_cols", "=", "[", "\"%s%s\"", "%", "(", "c", ",", "df1_suffix", ")", "for", "c", "in", "df1_cols", "]", "df2_cols", "=", "[", "\"%s%s\"", "%", "(", "c", ",", "df2_suffix", ")", "for", "c", "in", "df2_cols", "]", "# df1_cols[df1_cols.index(col)]=\"%s%s\" % (col,df1_suffix)", "# df2_cols[df2_cols.index(col)]=\"%s%s\" % (col,df2_suffix)", "else", ":", "common_cols", "=", "[", "col", "for", "col", "in", "df1_cols", "if", "col", "in", "df2_cols", "]", "for", "col", "in", "common_cols", ":", "df1_cols", "[", "df1_cols", ".", "index", "(", "col", ")", "]", "=", "\"%s%s\"", "%", "(", "col", ",", "df1_suffix", ")", "df2_cols", "[", "df2_cols", ".", "index", "(", "col", ")", "]", "=", "\"%s%s\"", "%", "(", "col", ",", "df2_suffix", ")", "combo", ".", "columns", "=", "df1_cols", "+", "df2_cols", "combo", ".", "index", ".", "name", "=", "idx_col", "return", "combo" ]
38.636364
0.025249
[ "def concat_cols(df1,df2,idx_col,df1_cols,df2_cols,\n", " df1_suffix,df2_suffix,wc_cols=[],suffix_all=False):\n", " \"\"\"\n", " Concatenates two pandas tables \n", "\n", " :param df1: dataframe 1\n", " :param df2: dataframe 2\n", " :param idx_col: column name which will be used as a common index \n", " \"\"\"\n", "\n", " df1=df1.set_index(idx_col)\n", " df2=df2.set_index(idx_col) \n", " if not len(wc_cols)==0:\n", " for wc in wc_cols:\n", " df1_cols=df1_cols+[c for c in df1.columns if wc in c]\n", " df2_cols=df2_cols+[c for c in df2.columns if wc in c]\n", " combo=pd.concat([df1.loc[:,df1_cols],df2.loc[:,df2_cols]],axis=1)\n", " # find common columns and rename them\n", " # print df1_cols\n", " # print df2_cols \n", " if suffix_all:\n", " df1_cols=[\"%s%s\" % (c,df1_suffix) for c in df1_cols]\n", " df2_cols=[\"%s%s\" % (c,df2_suffix) for c in df2_cols]\n", " # df1_cols[df1_cols.index(col)]=\"%s%s\" % (col,df1_suffix)\n", " # df2_cols[df2_cols.index(col)]=\"%s%s\" % (col,df2_suffix)\n", " else:\n", " common_cols=[col for col in df1_cols if col in df2_cols]\n", " for col in common_cols:\n", " df1_cols[df1_cols.index(col)]=\"%s%s\" % (col,df1_suffix)\n", " df2_cols[df2_cols.index(col)]=\"%s%s\" % (col,df2_suffix)\n", " combo.columns=df1_cols+df2_cols\n", " combo.index.name=idx_col\n", " return combo" ]
[ 0.0784313725490196, 0.04411764705882353, 0, 0.027777777777777776, 0, 0, 0, 0.014285714285714285, 0, 0, 0.03225806451612903, 0.05714285714285714, 0.03571428571428571, 0, 0.015151515151515152, 0.015151515151515152, 0.07142857142857142, 0, 0, 0.04, 0, 0.03278688524590164, 0.03278688524590164, 0, 0, 0, 0.015384615384615385, 0, 0.029411764705882353, 0.029411764705882353, 0.027777777777777776, 0.034482758620689655, 0.0625 ]
33
0.021091
def masked_max(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, min_val: float = -1e7) -> torch.Tensor: """ To calculate max along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate max, assume unmasked parts are already zeros mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate max keepdim : ``bool`` Whether to keep dimension min_val : ``float`` The minimal value for paddings Returns ------- A ``torch.Tensor`` of including the maximum values. """ one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, min_val) max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim) return max_value
[ "def", "masked_max", "(", "vector", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ",", "dim", ":", "int", ",", "keepdim", ":", "bool", "=", "False", ",", "min_val", ":", "float", "=", "-", "1e7", ")", "->", "torch", ".", "Tensor", ":", "one_minus_mask", "=", "(", "1.0", "-", "mask", ")", ".", "byte", "(", ")", "replaced_vector", "=", "vector", ".", "masked_fill", "(", "one_minus_mask", ",", "min_val", ")", "max_value", ",", "_", "=", "replaced_vector", ".", "max", "(", "dim", "=", "dim", ",", "keepdim", "=", "keepdim", ")", "return", "max_value" ]
31.965517
0.001047
[ "def masked_max(vector: torch.Tensor,\n", " mask: torch.Tensor,\n", " dim: int,\n", " keepdim: bool = False,\n", " min_val: float = -1e7) -> torch.Tensor:\n", " \"\"\"\n", " To calculate max along certain dimensions on masked values\n", "\n", " Parameters\n", " ----------\n", " vector : ``torch.Tensor``\n", " The vector to calculate max, assume unmasked parts are already zeros\n", " mask : ``torch.Tensor``\n", " The mask of the vector. It must be broadcastable with vector.\n", " dim : ``int``\n", " The dimension to calculate max\n", " keepdim : ``bool``\n", " Whether to keep dimension\n", " min_val : ``float``\n", " The minimal value for paddings\n", "\n", " Returns\n", " -------\n", " A ``torch.Tensor`` of including the maximum values.\n", " \"\"\"\n", " one_minus_mask = (1.0 - mask).byte()\n", " replaced_vector = vector.masked_fill(one_minus_mask, min_val)\n", " max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)\n", " return max_value" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
29
0.001724
def merge_recs_headers(recs): ''' take a list of recs [rec1,rec2,rec3....], each rec is a dictionary. make sure that all recs have the same headers. ''' headers = [] for rec in recs: keys = list(rec.keys()) for key in keys: if key not in headers: headers.append(key) for rec in recs: for header in headers: if header not in list(rec.keys()): rec[header] = "" return recs
[ "def", "merge_recs_headers", "(", "recs", ")", ":", "headers", "=", "[", "]", "for", "rec", "in", "recs", ":", "keys", "=", "list", "(", "rec", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "if", "key", "not", "in", "headers", ":", "headers", ".", "append", "(", "key", ")", "for", "rec", "in", "recs", ":", "for", "header", "in", "headers", ":", "if", "header", "not", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", ":", "rec", "[", "header", "]", "=", "\"\"", "return", "recs" ]
29.1875
0.002075
[ "def merge_recs_headers(recs):\n", " '''\n", " take a list of recs [rec1,rec2,rec3....], each rec is a dictionary.\n", " make sure that all recs have the same headers.\n", " '''\n", " headers = []\n", " for rec in recs:\n", " keys = list(rec.keys())\n", " for key in keys:\n", " if key not in headers:\n", " headers.append(key)\n", " for rec in recs:\n", " for header in headers:\n", " if header not in list(rec.keys()):\n", " rec[header] = \"\"\n", " return recs" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667 ]
16
0.004167
def tuplize(nested): """Recursively converts iterables into tuples. Args: nested: A nested structure of items and iterables. Returns: A nested structure of items and tuples. """ if isinstance(nested, str): return nested try: return tuple(map(tuplize, nested)) except TypeError: return nested
[ "def", "tuplize", "(", "nested", ")", ":", "if", "isinstance", "(", "nested", ",", "str", ")", ":", "return", "nested", "try", ":", "return", "tuple", "(", "map", "(", "tuplize", ",", "nested", ")", ")", "except", "TypeError", ":", "return", "nested" ]
20.866667
0.015291
[ "def tuplize(nested):\n", " \"\"\"Recursively converts iterables into tuples.\n", "\n", " Args:\n", " nested: A nested structure of items and iterables.\n", "\n", " Returns:\n", " A nested structure of items and tuples.\n", " \"\"\"\n", " if isinstance(nested, str):\n", " return nested\n", " try:\n", " return tuple(map(tuplize, nested))\n", " except TypeError:\n", " return nested" ]
[ 0, 0.02040816326530612, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0, 0.14285714285714285, 0, 0.05, 0.058823529411764705 ]
15
0.020361
async def get_records_for_zone(self, dns_zone, params=None): """Get all resource record sets for a managed zone, using the DNS zone. Args: dns_zone (str): Desired DNS zone to query. params (dict): (optional) Additional query parameters for HTTP requests to the GDNS API. Returns: list of dicts representing rrsets. """ managed_zone = self.get_managed_zone(dns_zone) url = f'{self._base_url}/managedZones/{managed_zone}/rrsets' if not params: params = {} if 'fields' not in params: # Get only the fields we care about params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,' 'rrsets/type,rrsets/ttl,nextPageToken') next_page_token = None records = [] while True: if next_page_token: params['pageToken'] = next_page_token response = await self.get_json(url, params=params) records.extend(response['rrsets']) next_page_token = response.get('nextPageToken') if not next_page_token: break logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".') return records
[ "async", "def", "get_records_for_zone", "(", "self", ",", "dns_zone", ",", "params", "=", "None", ")", ":", "managed_zone", "=", "self", ".", "get_managed_zone", "(", "dns_zone", ")", "url", "=", "f'{self._base_url}/managedZones/{managed_zone}/rrsets'", "if", "not", "params", ":", "params", "=", "{", "}", "if", "'fields'", "not", "in", "params", ":", "# Get only the fields we care about", "params", "[", "'fields'", "]", "=", "(", "'rrsets/name,rrsets/kind,rrsets/rrdatas,'", "'rrsets/type,rrsets/ttl,nextPageToken'", ")", "next_page_token", "=", "None", "records", "=", "[", "]", "while", "True", ":", "if", "next_page_token", ":", "params", "[", "'pageToken'", "]", "=", "next_page_token", "response", "=", "await", "self", ".", "get_json", "(", "url", ",", "params", "=", "params", ")", "records", ".", "extend", "(", "response", "[", "'rrsets'", "]", ")", "next_page_token", "=", "response", ".", "get", "(", "'nextPageToken'", ")", "if", "not", "next_page_token", ":", "break", "logging", ".", "info", "(", "f'Found {len(records)} rrsets for zone \"{dns_zone}\".'", ")", "return", "records" ]
37.029412
0.001548
[ "async def get_records_for_zone(self, dns_zone, params=None):\n", " \"\"\"Get all resource record sets for a managed zone, using the DNS zone.\n", "\n", " Args:\n", " dns_zone (str): Desired DNS zone to query.\n", " params (dict): (optional) Additional query parameters for HTTP\n", " requests to the GDNS API.\n", " Returns:\n", " list of dicts representing rrsets.\n", " \"\"\"\n", " managed_zone = self.get_managed_zone(dns_zone)\n", " url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'\n", "\n", " if not params:\n", " params = {}\n", "\n", " if 'fields' not in params:\n", " # Get only the fields we care about\n", " params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'\n", " 'rrsets/type,rrsets/ttl,nextPageToken')\n", " next_page_token = None\n", "\n", " records = []\n", " while True:\n", " if next_page_token:\n", " params['pageToken'] = next_page_token\n", " response = await self.get_json(url, params=params)\n", " records.extend(response['rrsets'])\n", " next_page_token = response.get('nextPageToken')\n", " if not next_page_token:\n", " break\n", "\n", " logging.info(f'Found {len(records)} rrsets for zone \"{dns_zone}\".')\n", " return records" ]
[ 0, 0.0125, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
34
0.001705
def _check_local_option(self, option): """Test the status of local negotiated Telnet options.""" if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() return self.telnet_opt_dict[option].local_option
[ "def", "_check_local_option", "(", "self", ",", "option", ")", ":", "if", "not", "self", ".", "telnet_opt_dict", ".", "has_key", "(", "option", ")", ":", "self", ".", "telnet_opt_dict", "[", "option", "]", "=", "TelnetOption", "(", ")", "return", "self", ".", "telnet_opt_dict", "[", "option", "]", ".", "local_option" ]
53.6
0.011029
[ "def _check_local_option(self, option):\n", " \"\"\"Test the status of local negotiated Telnet options.\"\"\"\n", " if not self.telnet_opt_dict.has_key(option):\n", " self.telnet_opt_dict[option] = TelnetOption()\n", " return self.telnet_opt_dict[option].local_option" ]
[ 0, 0.015151515151515152, 0.018867924528301886, 0, 0.017857142857142856 ]
5
0.010375
def get_node_by_dsl(self, node_dict: BaseEntity) -> Optional[Node]: """Look up a node by its data dictionary by hashing it then using :func:`get_node_by_hash`.""" return self.get_node_by_hash(node_dict.as_sha512())
[ "def", "get_node_by_dsl", "(", "self", ",", "node_dict", ":", "BaseEntity", ")", "->", "Optional", "[", "Node", "]", ":", "return", "self", ".", "get_node_by_hash", "(", "node_dict", ".", "as_sha512", "(", ")", ")" ]
76
0.013043
[ "def get_node_by_dsl(self, node_dict: BaseEntity) -> Optional[Node]:\n", " \"\"\"Look up a node by its data dictionary by hashing it then using :func:`get_node_by_hash`.\"\"\"\n", " return self.get_node_by_hash(node_dict.as_sha512())" ]
[ 0, 0.019417475728155338, 0.01694915254237288 ]
3
0.012122
def map_reduce(self, map, reduce, out, full_response=False, session=None, **kwargs): """Perform a map/reduce operation on this collection. If `full_response` is ``False`` (default) returns a :class:`~pymongo.collection.Collection` instance containing the results of the operation. Otherwise, returns the full response from the server to the `map reduce command`_. :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - `out`: output collection name or `out object` (dict). See the `map reduce command`_ documentation for available options. Note: `out` options are order sensitive. :class:`~bson.son.SON` can be used to specify multiple options. e.g. SON([('replace', <collection name>), ('db', <database name>)]) - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: >>> db.test.map_reduce(map, reduce, "myresults", limit=2) .. note:: The :meth:`map_reduce` method does **not** obey the :attr:`read_preference` of this :class:`Collection`. To run mapReduce on a secondary use the :meth:`inline_map_reduce` method instead. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation (if the output is not inline) when using MongoDB >= 3.4. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. .. seealso:: :doc:`/examples/aggregation` .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 2.2 Removed deprecated arguments: merge_output and reduce_output .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/ .. mongodoc:: mapreduce """ if not isinstance(out, (string_type, abc.Mapping)): raise TypeError("'out' must be an instance of " "%s or a mapping" % (string_type.__name__,)) response = self._map_reduce(map, reduce, out, session, ReadPreference.PRIMARY, **kwargs) if full_response or not response.get('result'): return response elif isinstance(response['result'], dict): dbase = response['result']['db'] coll = response['result']['collection'] return self.__database.client[dbase][coll] else: return self.__database[response["result"]]
[ "def", "map_reduce", "(", "self", ",", "map", ",", "reduce", ",", "out", ",", "full_response", "=", "False", ",", "session", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "out", ",", "(", "string_type", ",", "abc", ".", "Mapping", ")", ")", ":", "raise", "TypeError", "(", "\"'out' must be an instance of \"", "\"%s or a mapping\"", "%", "(", "string_type", ".", "__name__", ",", ")", ")", "response", "=", "self", ".", "_map_reduce", "(", "map", ",", "reduce", ",", "out", ",", "session", ",", "ReadPreference", ".", "PRIMARY", ",", "*", "*", "kwargs", ")", "if", "full_response", "or", "not", "response", ".", "get", "(", "'result'", ")", ":", "return", "response", "elif", "isinstance", "(", "response", "[", "'result'", "]", ",", "dict", ")", ":", "dbase", "=", "response", "[", "'result'", "]", "[", "'db'", "]", "coll", "=", "response", "[", "'result'", "]", "[", "'collection'", "]", "return", "self", ".", "__database", ".", "client", "[", "dbase", "]", "[", "coll", "]", "else", ":", "return", "self", ".", "__database", "[", "response", "[", "\"result\"", "]", "]" ]
43.985714
0.001588
[ "def map_reduce(self, map, reduce, out, full_response=False, session=None,\n", " **kwargs):\n", " \"\"\"Perform a map/reduce operation on this collection.\n", "\n", " If `full_response` is ``False`` (default) returns a\n", " :class:`~pymongo.collection.Collection` instance containing\n", " the results of the operation. Otherwise, returns the full\n", " response from the server to the `map reduce command`_.\n", "\n", " :Parameters:\n", " - `map`: map function (as a JavaScript string)\n", " - `reduce`: reduce function (as a JavaScript string)\n", " - `out`: output collection name or `out object` (dict). See\n", " the `map reduce command`_ documentation for available options.\n", " Note: `out` options are order sensitive. :class:`~bson.son.SON`\n", " can be used to specify multiple options.\n", " e.g. SON([('replace', <collection name>), ('db', <database name>)])\n", " - `full_response` (optional): if ``True``, return full response to\n", " this command - otherwise just return the result collection\n", " - `session` (optional): a\n", " :class:`~pymongo.client_session.ClientSession`.\n", " - `**kwargs` (optional): additional arguments to the\n", " `map reduce command`_ may be passed as keyword arguments to this\n", " helper method, e.g.::\n", "\n", " >>> db.test.map_reduce(map, reduce, \"myresults\", limit=2)\n", "\n", " .. note:: The :meth:`map_reduce` method does **not** obey the\n", " :attr:`read_preference` of this :class:`Collection`. To run\n", " mapReduce on a secondary use the :meth:`inline_map_reduce` method\n", " instead.\n", "\n", " .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of\n", " this collection is automatically applied to this operation (if the\n", " output is not inline) when using MongoDB >= 3.4.\n", "\n", " .. versionchanged:: 3.6\n", " Added ``session`` parameter.\n", "\n", " .. versionchanged:: 3.4\n", " Apply this collection's write concern automatically to this operation\n", " when connected to MongoDB >= 3.4.\n", "\n", " .. seealso:: :doc:`/examples/aggregation`\n", "\n", " .. versionchanged:: 3.4\n", " Added the `collation` option.\n", " .. versionchanged:: 2.2\n", " Removed deprecated arguments: merge_output and reduce_output\n", "\n", " .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/\n", "\n", " .. mongodoc:: mapreduce\n", "\n", " \"\"\"\n", " if not isinstance(out, (string_type, abc.Mapping)):\n", " raise TypeError(\"'out' must be an instance of \"\n", " \"%s or a mapping\" % (string_type.__name__,))\n", "\n", " response = self._map_reduce(map, reduce, out, session,\n", " ReadPreference.PRIMARY, **kwargs)\n", "\n", " if full_response or not response.get('result'):\n", " return response\n", " elif isinstance(response['result'], dict):\n", " dbase = response['result']['db']\n", " coll = response['result']['collection']\n", " return self.__database.client[dbase][coll]\n", " else:\n", " return self.__database[response[\"result\"]]" ]
[ 0, 0.03333333333333333, 0.016129032258064516, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010869565217391304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517 ]
70
0.001303
def squeeze_bits(arr: numpy.ndarray) -> numpy.ndarray: """Return a copy of an integer numpy array with the minimum bitness.""" assert arr.dtype.kind in ("i", "u") if arr.dtype.kind == "i": assert arr.min() >= 0 mlbl = int(arr.max()).bit_length() if mlbl <= 8: dtype = numpy.uint8 elif mlbl <= 16: dtype = numpy.uint16 elif mlbl <= 32: dtype = numpy.uint32 else: dtype = numpy.uint64 return arr.astype(dtype)
[ "def", "squeeze_bits", "(", "arr", ":", "numpy", ".", "ndarray", ")", "->", "numpy", ".", "ndarray", ":", "assert", "arr", ".", "dtype", ".", "kind", "in", "(", "\"i\"", ",", "\"u\"", ")", "if", "arr", ".", "dtype", ".", "kind", "==", "\"i\"", ":", "assert", "arr", ".", "min", "(", ")", ">=", "0", "mlbl", "=", "int", "(", "arr", ".", "max", "(", ")", ")", ".", "bit_length", "(", ")", "if", "mlbl", "<=", "8", ":", "dtype", "=", "numpy", ".", "uint8", "elif", "mlbl", "<=", "16", ":", "dtype", "=", "numpy", ".", "uint16", "elif", "mlbl", "<=", "32", ":", "dtype", "=", "numpy", ".", "uint32", "else", ":", "dtype", "=", "numpy", ".", "uint64", "return", "arr", ".", "astype", "(", "dtype", ")" ]
31.266667
0.00207
[ "def squeeze_bits(arr: numpy.ndarray) -> numpy.ndarray:\n", " \"\"\"Return a copy of an integer numpy array with the minimum bitness.\"\"\"\n", " assert arr.dtype.kind in (\"i\", \"u\")\n", " if arr.dtype.kind == \"i\":\n", " assert arr.min() >= 0\n", " mlbl = int(arr.max()).bit_length()\n", " if mlbl <= 8:\n", " dtype = numpy.uint8\n", " elif mlbl <= 16:\n", " dtype = numpy.uint16\n", " elif mlbl <= 32:\n", " dtype = numpy.uint32\n", " else:\n", " dtype = numpy.uint64\n", " return arr.astype(dtype)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
15
0.002381
def _filter_cluster_data(self): """ Filter the cluster data catalog into the filtered_data catalog, which is what is shown in the H-R diagram. Filter on the values of the sliders, as well as the lasso selection in the skyviewer. """ min_temp = self.temperature_range_slider.value[0] max_temp = self.temperature_range_slider.value[1] temp_mask = np.logical_and( self.cluster.catalog['temperature'] >= min_temp, self.cluster.catalog['temperature'] <= max_temp ) min_lum = self.luminosity_range_slider.value[0] max_lum = self.luminosity_range_slider.value[1] lum_mask = np.logical_and( self.cluster.catalog['luminosity'] >= min_lum, self.cluster.catalog['luminosity'] <= max_lum ) selected_mask = np.isin(self.cluster.catalog['id'], self.selection_ids) filter_mask = temp_mask & lum_mask & selected_mask self.filtered_data = self.cluster.catalog[filter_mask].data self.source.data = { 'id': list(self.filtered_data['id']), 'temperature': list(self.filtered_data['temperature']), 'luminosity': list(self.filtered_data['luminosity']), 'color': list(self.filtered_data['color']) } logging.debug("Selected data is now: %s", self.filtered_data)
[ "def", "_filter_cluster_data", "(", "self", ")", ":", "min_temp", "=", "self", ".", "temperature_range_slider", ".", "value", "[", "0", "]", "max_temp", "=", "self", ".", "temperature_range_slider", ".", "value", "[", "1", "]", "temp_mask", "=", "np", ".", "logical_and", "(", "self", ".", "cluster", ".", "catalog", "[", "'temperature'", "]", ">=", "min_temp", ",", "self", ".", "cluster", ".", "catalog", "[", "'temperature'", "]", "<=", "max_temp", ")", "min_lum", "=", "self", ".", "luminosity_range_slider", ".", "value", "[", "0", "]", "max_lum", "=", "self", ".", "luminosity_range_slider", ".", "value", "[", "1", "]", "lum_mask", "=", "np", ".", "logical_and", "(", "self", ".", "cluster", ".", "catalog", "[", "'luminosity'", "]", ">=", "min_lum", ",", "self", ".", "cluster", ".", "catalog", "[", "'luminosity'", "]", "<=", "max_lum", ")", "selected_mask", "=", "np", ".", "isin", "(", "self", ".", "cluster", ".", "catalog", "[", "'id'", "]", ",", "self", ".", "selection_ids", ")", "filter_mask", "=", "temp_mask", "&", "lum_mask", "&", "selected_mask", "self", ".", "filtered_data", "=", "self", ".", "cluster", ".", "catalog", "[", "filter_mask", "]", ".", "data", "self", ".", "source", ".", "data", "=", "{", "'id'", ":", "list", "(", "self", ".", "filtered_data", "[", "'id'", "]", ")", ",", "'temperature'", ":", "list", "(", "self", ".", "filtered_data", "[", "'temperature'", "]", ")", ",", "'luminosity'", ":", "list", "(", "self", ".", "filtered_data", "[", "'luminosity'", "]", ")", ",", "'color'", ":", "list", "(", "self", ".", "filtered_data", "[", "'color'", "]", ")", "}", "logging", ".", "debug", "(", "\"Selected data is now: %s\"", ",", "self", ".", "filtered_data", ")" ]
38.971429
0.001431
[ "def _filter_cluster_data(self):\n", " \"\"\"\n", " Filter the cluster data catalog into the filtered_data\n", " catalog, which is what is shown in the H-R diagram.\n", "\n", " Filter on the values of the sliders, as well as the lasso\n", " selection in the skyviewer.\n", " \"\"\"\n", " min_temp = self.temperature_range_slider.value[0]\n", " max_temp = self.temperature_range_slider.value[1]\n", " temp_mask = np.logical_and(\n", " self.cluster.catalog['temperature'] >= min_temp,\n", " self.cluster.catalog['temperature'] <= max_temp\n", " )\n", "\n", " min_lum = self.luminosity_range_slider.value[0]\n", " max_lum = self.luminosity_range_slider.value[1]\n", " lum_mask = np.logical_and(\n", " self.cluster.catalog['luminosity'] >= min_lum,\n", " self.cluster.catalog['luminosity'] <= max_lum\n", " )\n", "\n", " selected_mask = np.isin(self.cluster.catalog['id'], self.selection_ids)\n", "\n", " filter_mask = temp_mask & lum_mask & selected_mask\n", " self.filtered_data = self.cluster.catalog[filter_mask].data\n", "\n", " self.source.data = {\n", " 'id': list(self.filtered_data['id']),\n", " 'temperature': list(self.filtered_data['temperature']),\n", " 'luminosity': list(self.filtered_data['luminosity']),\n", " 'color': list(self.filtered_data['color'])\n", " }\n", "\n", " logging.debug(\"Selected data is now: %s\", self.filtered_data)" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.014492753623188406 ]
35
0.002795
def get(self, sid): """ Constructs a AlphaSenderContext :param sid: The sid :returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext :rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext """ return AlphaSenderContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "AlphaSenderContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
37.1
0.013158
[ "def get(self, sid):\n", " \"\"\"\n", " Constructs a AlphaSenderContext\n", "\n", " :param sid: The sid\n", "\n", " :returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext\n", " :rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext\n", " \"\"\"\n", " return AlphaSenderContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0.012048192771084338, 0.012345679012345678, 0, 0.0196078431372549 ]
10
0.012734
def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function): cache = dict() stats = [0, 0] # make statistics updateable non-locally HITS, MISSES = 0, 1 # names for the stats fields make_key = _make_key cache_get = cache.get # bound method to lookup key or return None _len = len # localize the global len() function lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self nonlocal_root = [root] # make updateable non-locally PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields if maxsize == 0: def wrapper(*args, **kwds): # no caching, just do a statistics update after a successful call result = user_function(*args, **kwds) stats[MISSES] += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # simple caching without ordering or size limit key = make_key(args, kwds, typed) result = cache_get(key, root) # root used here as a unique not-found sentinel if result is not root: stats[HITS] += 1 return result result = user_function(*args, **kwds) cache[key] = result stats[MISSES] += 1 return result else: def wrapper(*args, **kwds): # size limited caching that tracks accesses by recency key = make_key(args, kwds, typed) if kwds or typed else args with lock: link = cache_get(key) if link is not None: # record recent use of the key by moving it to the front of the list root, = nonlocal_root link_prev, link_next, key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = root[PREV] last[NEXT] = root[PREV] = link link[PREV] = last link[NEXT] = root stats[HITS] += 1 return result result = user_function(*args, **kwds) with lock: root, = nonlocal_root if key in cache: # getting here means that this same key was added to the # cache while the lock was released. since the link # update is already done, we need only return the # computed result and update the count of misses. pass elif _len(cache) >= maxsize: # use the old root to store the new key and result oldroot = root oldroot[KEY] = key oldroot[RESULT] = result # empty the oldest link and make it the new root root = nonlocal_root[0] = oldroot[NEXT] oldkey = root[KEY] root[KEY] = root[RESULT] = None # now update the cache dictionary for the new links del cache[oldkey] cache[key] = oldroot else: # put result in a new link at the front of the list last = root[PREV] link = [last, root, key, result] last[NEXT] = root[PREV] = cache[key] = link stats[MISSES] += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() root = nonlocal_root[0] root[:] = [root, root, None, None] stats[:] = [0, 0] wrapper.__wrapped__ = user_function wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
[ "def", "lru_cache", "(", "maxsize", "=", "100", ",", "typed", "=", "False", ")", ":", "# Users should only access the lru_cache through its public API:", "# cache_info, cache_clear, and f.__wrapped__", "# The internals of the lru_cache are encapsulated for thread safety and", "# to allow the implementation to change (including a possible C version).", "def", "decorating_function", "(", "user_function", ")", ":", "cache", "=", "dict", "(", ")", "stats", "=", "[", "0", ",", "0", "]", "# make statistics updateable non-locally", "HITS", ",", "MISSES", "=", "0", ",", "1", "# names for the stats fields", "make_key", "=", "_make_key", "cache_get", "=", "cache", ".", "get", "# bound method to lookup key or return None", "_len", "=", "len", "# localize the global len() function", "lock", "=", "RLock", "(", ")", "# because linkedlist updates aren't threadsafe", "root", "=", "[", "]", "# root of the circular doubly linked list", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "# initialize by pointing to self", "nonlocal_root", "=", "[", "root", "]", "# make updateable non-locally", "PREV", ",", "NEXT", ",", "KEY", ",", "RESULT", "=", "0", ",", "1", ",", "2", ",", "3", "# names for the link fields", "if", "maxsize", "==", "0", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# no caching, just do a statistics update after a successful call", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "elif", "maxsize", "is", "None", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# simple caching without ordering or size limit", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "result", "=", "cache_get", "(", "key", ",", "root", ")", "# root used here as a unique not-found sentinel", "if", "result", "is", "not", "root", ":", "stats", "[", "HITS", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "cache", "[", "key", "]", "=", "result", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "else", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# size limited caching that tracks accesses by recency", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "if", "kwds", "or", "typed", "else", "args", "with", "lock", ":", "link", "=", "cache_get", "(", "key", ")", "if", "link", "is", "not", "None", ":", "# record recent use of the key by moving it to the front of the list", "root", ",", "=", "nonlocal_root", "link_prev", ",", "link_next", ",", "key", ",", "result", "=", "link", "link_prev", "[", "NEXT", "]", "=", "link_next", "link_next", "[", "PREV", "]", "=", "link_prev", "last", "=", "root", "[", "PREV", "]", "last", "[", "NEXT", "]", "=", "root", "[", "PREV", "]", "=", "link", "link", "[", "PREV", "]", "=", "last", "link", "[", "NEXT", "]", "=", "root", "stats", "[", "HITS", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "with", "lock", ":", "root", ",", "=", "nonlocal_root", "if", "key", "in", "cache", ":", "# getting here means that this same key was added to the", "# cache while the lock was released. since the link", "# update is already done, we need only return the", "# computed result and update the count of misses.", "pass", "elif", "_len", "(", "cache", ")", ">=", "maxsize", ":", "# use the old root to store the new key and result", "oldroot", "=", "root", "oldroot", "[", "KEY", "]", "=", "key", "oldroot", "[", "RESULT", "]", "=", "result", "# empty the oldest link and make it the new root", "root", "=", "nonlocal_root", "[", "0", "]", "=", "oldroot", "[", "NEXT", "]", "oldkey", "=", "root", "[", "KEY", "]", "root", "[", "KEY", "]", "=", "root", "[", "RESULT", "]", "=", "None", "# now update the cache dictionary for the new links", "del", "cache", "[", "oldkey", "]", "cache", "[", "key", "]", "=", "oldroot", "else", ":", "# put result in a new link at the front of the list", "last", "=", "root", "[", "PREV", "]", "link", "=", "[", "last", ",", "root", ",", "key", ",", "result", "]", "last", "[", "NEXT", "]", "=", "root", "[", "PREV", "]", "=", "cache", "[", "key", "]", "=", "link", "stats", "[", "MISSES", "]", "+=", "1", "return", "result", "def", "cache_info", "(", ")", ":", "\"\"\"Report cache statistics\"\"\"", "with", "lock", ":", "return", "_CacheInfo", "(", "stats", "[", "HITS", "]", ",", "stats", "[", "MISSES", "]", ",", "maxsize", ",", "len", "(", "cache", ")", ")", "def", "cache_clear", "(", ")", ":", "\"\"\"Clear the cache and cache statistics\"\"\"", "with", "lock", ":", "cache", ".", "clear", "(", ")", "root", "=", "nonlocal_root", "[", "0", "]", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "stats", "[", ":", "]", "=", "[", "0", ",", "0", "]", "wrapper", ".", "__wrapped__", "=", "user_function", "wrapper", ".", "cache_info", "=", "cache_info", "wrapper", ".", "cache_clear", "=", "cache_clear", "return", "update_wrapper", "(", "wrapper", ",", "user_function", ")", "return", "decorating_function" ]
43.09375
0.002127
[ "def lru_cache(maxsize=100, typed=False):\n", " \"\"\"Least-recently-used cache decorator.\n", "\n", " If *maxsize* is set to None, the LRU features are disabled and the cache\n", " can grow without bound.\n", "\n", " If *typed* is True, arguments of different types will be cached separately.\n", " For example, f(3.0) and f(3) will be treated as distinct calls with\n", " distinct results.\n", "\n", " Arguments to the cached function must be hashable.\n", "\n", " View the cache statistics named tuple (hits, misses, maxsize, currsize) with\n", " f.cache_info(). Clear the cache and statistics with f.cache_clear().\n", " Access the underlying function with f.__wrapped__.\n", "\n", " See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used\n", "\n", " \"\"\"\n", "\n", " # Users should only access the lru_cache through its public API:\n", " # cache_info, cache_clear, and f.__wrapped__\n", " # The internals of the lru_cache are encapsulated for thread safety and\n", " # to allow the implementation to change (including a possible C version).\n", "\n", " def decorating_function(user_function):\n", "\n", " cache = dict()\n", " stats = [0, 0] # make statistics updateable non-locally\n", " HITS, MISSES = 0, 1 # names for the stats fields\n", " make_key = _make_key\n", " cache_get = cache.get # bound method to lookup key or return None\n", " _len = len # localize the global len() function\n", " lock = RLock() # because linkedlist updates aren't threadsafe\n", " root = [] # root of the circular doubly linked list\n", " root[:] = [root, root, None, None] # initialize by pointing to self\n", " nonlocal_root = [root] # make updateable non-locally\n", " PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields\n", "\n", " if maxsize == 0:\n", "\n", " def wrapper(*args, **kwds):\n", " # no caching, just do a statistics update after a successful call\n", " result = user_function(*args, **kwds)\n", " stats[MISSES] += 1\n", " return result\n", "\n", " elif maxsize is None:\n", "\n", " def wrapper(*args, **kwds):\n", " # simple caching without ordering or size limit\n", " key = make_key(args, kwds, typed)\n", " result = cache_get(key, root) # root used here as a unique not-found sentinel\n", " if result is not root:\n", " stats[HITS] += 1\n", " return result\n", " result = user_function(*args, **kwds)\n", " cache[key] = result\n", " stats[MISSES] += 1\n", " return result\n", "\n", " else:\n", "\n", " def wrapper(*args, **kwds):\n", " # size limited caching that tracks accesses by recency\n", " key = make_key(args, kwds, typed) if kwds or typed else args\n", " with lock:\n", " link = cache_get(key)\n", " if link is not None:\n", " # record recent use of the key by moving it to the front of the list\n", " root, = nonlocal_root\n", " link_prev, link_next, key, result = link\n", " link_prev[NEXT] = link_next\n", " link_next[PREV] = link_prev\n", " last = root[PREV]\n", " last[NEXT] = root[PREV] = link\n", " link[PREV] = last\n", " link[NEXT] = root\n", " stats[HITS] += 1\n", " return result\n", " result = user_function(*args, **kwds)\n", " with lock:\n", " root, = nonlocal_root\n", " if key in cache:\n", " # getting here means that this same key was added to the\n", " # cache while the lock was released. since the link\n", " # update is already done, we need only return the\n", " # computed result and update the count of misses.\n", " pass\n", " elif _len(cache) >= maxsize:\n", " # use the old root to store the new key and result\n", " oldroot = root\n", " oldroot[KEY] = key\n", " oldroot[RESULT] = result\n", " # empty the oldest link and make it the new root\n", " root = nonlocal_root[0] = oldroot[NEXT]\n", " oldkey = root[KEY]\n", " root[KEY] = root[RESULT] = None\n", " # now update the cache dictionary for the new links\n", " del cache[oldkey]\n", " cache[key] = oldroot\n", " else:\n", " # put result in a new link at the front of the list\n", " last = root[PREV]\n", " link = [last, root, key, result]\n", " last[NEXT] = root[PREV] = cache[key] = link\n", " stats[MISSES] += 1\n", " return result\n", "\n", " def cache_info():\n", " \"\"\"Report cache statistics\"\"\"\n", " with lock:\n", " return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))\n", "\n", " def cache_clear():\n", " \"\"\"Clear the cache and cache statistics\"\"\"\n", " with lock:\n", " cache.clear()\n", " root = nonlocal_root[0]\n", " root[:] = [root, root, None, None]\n", " stats[:] = [0, 0]\n", "\n", " wrapper.__wrapped__ = user_function\n", " wrapper.cache_info = cache_info\n", " wrapper.cache_clear = cache_clear\n", " return update_wrapper(wrapper, user_function)\n", "\n", " return decorating_function" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0.011904761904761904, 0, 0.011494252873563218, 0.012195121951219513, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010416666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333 ]
128
0.001279
def p_array_indices(self, p): '''array_indices : array_index | array_index COMMA array_indices''' if len(p) == 2: p[0] = p[1], else: p[0] = (p[1],) + p[3]
[ "def", "p_array_indices", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", ",", "else", ":", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", ")", "+", "p", "[", "3", "]" ]
31.571429
0.008811
[ "def p_array_indices(self, p):\n", " '''array_indices : array_index\n", " | array_index COMMA array_indices'''\n", " if len(p) == 2:\n", " p[0] = p[1],\n", " else:\n", " p[0] = (p[1],) + p[3]" ]
[ 0, 0.02564102564102564, 0, 0, 0, 0, 0.030303030303030304 ]
7
0.007992
def maybe_cythonize_extensions(top_path, config): """Tweaks for building extensions between release and development mode.""" is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO')) if is_release: build_from_c_and_cpp_files(config.ext_modules) else: message = ('Please install cython with a version >= {0} in order ' 'to build a scikit-survival development version.').format( CYTHON_MIN_VERSION) try: import Cython if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION: message += ' Your version of Cython was {0}.'.format( Cython.__version__) raise ValueError(message) from Cython.Build import cythonize except ImportError as exc: exc.args += (message,) raise # http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments directives = {'language_level': '3'} cy_cov = os.environ.get('CYTHON_COVERAGE', False) if cy_cov: directives['linetrace'] = True macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] else: macros = [] config.ext_modules = cythonize( config.ext_modules, compiler_directives=directives) for e in config.ext_modules: e.define_macros.extend(macros)
[ "def", "maybe_cythonize_extensions", "(", "top_path", ",", "config", ")", ":", "is_release", "=", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "top_path", ",", "'PKG-INFO'", ")", ")", "if", "is_release", ":", "build_from_c_and_cpp_files", "(", "config", ".", "ext_modules", ")", "else", ":", "message", "=", "(", "'Please install cython with a version >= {0} in order '", "'to build a scikit-survival development version.'", ")", ".", "format", "(", "CYTHON_MIN_VERSION", ")", "try", ":", "import", "Cython", "if", "LooseVersion", "(", "Cython", ".", "__version__", ")", "<", "CYTHON_MIN_VERSION", ":", "message", "+=", "' Your version of Cython was {0}.'", ".", "format", "(", "Cython", ".", "__version__", ")", "raise", "ValueError", "(", "message", ")", "from", "Cython", ".", "Build", "import", "cythonize", "except", "ImportError", "as", "exc", ":", "exc", ".", "args", "+=", "(", "message", ",", ")", "raise", "# http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments", "directives", "=", "{", "'language_level'", ":", "'3'", "}", "cy_cov", "=", "os", ".", "environ", ".", "get", "(", "'CYTHON_COVERAGE'", ",", "False", ")", "if", "cy_cov", ":", "directives", "[", "'linetrace'", "]", "=", "True", "macros", "=", "[", "(", "'CYTHON_TRACE'", ",", "'1'", ")", ",", "(", "'CYTHON_TRACE_NOGIL'", ",", "'1'", ")", "]", "else", ":", "macros", "=", "[", "]", "config", ".", "ext_modules", "=", "cythonize", "(", "config", ".", "ext_modules", ",", "compiler_directives", "=", "directives", ")", "for", "e", "in", "config", ".", "ext_modules", ":", "e", ".", "define_macros", ".", "extend", "(", "macros", ")" ]
39.583333
0.000685
[ "def maybe_cythonize_extensions(top_path, config):\n", " \"\"\"Tweaks for building extensions between release and development mode.\"\"\"\n", " is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))\n", "\n", " if is_release:\n", " build_from_c_and_cpp_files(config.ext_modules)\n", " else:\n", " message = ('Please install cython with a version >= {0} in order '\n", " 'to build a scikit-survival development version.').format(\n", " CYTHON_MIN_VERSION)\n", " try:\n", " import Cython\n", " if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:\n", " message += ' Your version of Cython was {0}.'.format(\n", " Cython.__version__)\n", " raise ValueError(message)\n", " from Cython.Build import cythonize\n", " except ImportError as exc:\n", " exc.args += (message,)\n", " raise\n", "\n", " # http://docs.cython.org/en/latest/src/userguide/source_files_and_compilation.html#cythonize-arguments\n", " directives = {'language_level': '3'}\n", " cy_cov = os.environ.get('CYTHON_COVERAGE', False)\n", " if cy_cov:\n", " directives['linetrace'] = True\n", " macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]\n", " else:\n", " macros = []\n", "\n", " config.ext_modules = cythonize(\n", " config.ext_modules,\n", " compiler_directives=directives)\n", "\n", " for e in config.ext_modules:\n", " e.define_macros.extend(macros)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023809523809523808 ]
36
0.000661
def syllabify(word): '''Syllabify the given word, whether simplex or complex.''' compound = not word.isalpha() syllabify = _syllabify_complex if compound else _syllabify_simplex syllabifications = list(syllabify(word)) # if variation, order variants from most preferred to least preferred if len(syllabifications) > 1: syllabifications = rank(syllabifications) for word, rules in syllabifications: yield _post_process(word, rules)
[ "def", "syllabify", "(", "word", ")", ":", "compound", "=", "not", "word", ".", "isalpha", "(", ")", "syllabify", "=", "_syllabify_complex", "if", "compound", "else", "_syllabify_simplex", "syllabifications", "=", "list", "(", "syllabify", "(", "word", ")", ")", "# if variation, order variants from most preferred to least preferred", "if", "len", "(", "syllabifications", ")", ">", "1", ":", "syllabifications", "=", "rank", "(", "syllabifications", ")", "for", "word", ",", "rules", "in", "syllabifications", ":", "yield", "_post_process", "(", "word", ",", "rules", ")" ]
38.75
0.002101
[ "def syllabify(word):\n", " '''Syllabify the given word, whether simplex or complex.'''\n", " compound = not word.isalpha()\n", " syllabify = _syllabify_complex if compound else _syllabify_simplex\n", " syllabifications = list(syllabify(word))\n", "\n", " # if variation, order variants from most preferred to least preferred\n", " if len(syllabifications) > 1:\n", " syllabifications = rank(syllabifications)\n", "\n", " for word, rules in syllabifications:\n", " yield _post_process(word, rules)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.025 ]
12
0.002083
def deriv1(x,y,i,n): """ alternative way to smooth the derivative of a noisy signal using least square fit. x=array of x axis y=array of y axis n=smoothing factor i= position in this method the slope in position i is calculated by least square fit of n points before and after position. """ m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0. for ix in range(i,i+n,1): x_=x_+x[ix] y_=y_+y[ix] xy_=xy_+x[ix]*y[ix] x_2=x_2+x[ix]**2 m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2)) return(m)
[ "def", "deriv1", "(", "x", ",", "y", ",", "i", ",", "n", ")", ":", "m_", ",", "x_", ",", "y_", ",", "xy_", ",", "x_2", "=", "0.", ",", "0.", ",", "0.", ",", "0.", ",", "0.", "for", "ix", "in", "range", "(", "i", ",", "i", "+", "n", ",", "1", ")", ":", "x_", "=", "x_", "+", "x", "[", "ix", "]", "y_", "=", "y_", "+", "y", "[", "ix", "]", "xy_", "=", "xy_", "+", "x", "[", "ix", "]", "*", "y", "[", "ix", "]", "x_2", "=", "x_2", "+", "x", "[", "ix", "]", "**", "2", "m", "=", "old_div", "(", "(", "(", "n", "*", "xy_", ")", "-", "(", "x_", "*", "y_", ")", ")", ",", "(", "n", "*", "x_2", "-", "(", "x_", ")", "**", "2", ")", ")", "return", "(", "m", ")" ]
27.05
0.042857
[ "def deriv1(x,y,i,n):\n", " \"\"\"\n", " alternative way to smooth the derivative of a noisy signal\n", " using least square fit.\n", " x=array of x axis\n", " y=array of y axis\n", " n=smoothing factor\n", " i= position\n", "\n", " in this method the slope in position i is calculated by least square fit of n points\n", " before and after position.\n", " \"\"\"\n", " m_,x_,y_,xy_,x_2=0.,0.,0.,0.,0.\n", " for ix in range(i,i+n,1):\n", " x_=x_+x[ix]\n", " y_=y_+y[ix]\n", " xy_=xy_+x[ix]*y[ix]\n", " x_2=x_2+x[ix]**2\n", " m= old_div(( (n*xy_) - (x_*y_) ), ( n*x_2-(x_)**2))\n", " return(m)" ]
[ 0.14285714285714285, 0, 0, 0, 0, 0, 0, 0, 0, 0.011235955056179775, 0, 0, 0.25, 0.06666666666666667, 0.05, 0.05, 0.03571428571428571, 0.04, 0.07142857142857142, 0.07692307692307693 ]
20
0.039741
def update_gunicorns(): """ Updates the dict of gunicorn processes. Run the ps command and parse its output for processes named after gunicorn, building up a dict of gunicorn processes. When new gunicorns are discovered, run the netstat command to determine the ports they're serving on. """ global tick tick += 1 if (tick * screen_delay) % ps_delay != 0: return tick = 0 for pid in gunicorns: gunicorns[pid].update({"workers": 0, "mem": 0}) ps = Popen(PS_ARGS, stdout=PIPE).communicate()[0].split("\n") headings = ps.pop(0).split() name_col = headings.index(cmd_heading) num_cols = len(headings) - 1 for row in ps: cols = row.split(None, num_cols) if cols and "gunicorn: " in cols[name_col]: if "gunicorn: worker" in cols[name_col]: is_worker = True else: is_worker = False if is_worker: pid = cols[headings.index("PPID")] else: pid = cols[headings.index("PID")] if pid not in gunicorns: gunicorns[pid] = {"workers": 0, "mem": 0, "port": None, "name": cols[name_col].strip().split("[",1)[1].split("]",1)[:-1]} gunicorns[pid]["mem"] += int(cols[headings.index("RSS")]) if is_worker: gunicorns[pid]["workers"] += 1 # Remove gunicorns that were not found in the process list. for pid in gunicorns.keys()[:]: if gunicorns[pid]["workers"] == 0: del gunicorns[pid] # Determine ports if any are missing. if not [g for g in gunicorns.values() if g["port"] is None]: return for (pid, port) in ports_for_pids(gunicorns.keys()): if pid in gunicorns: gunicorns[pid]["port"] = port
[ "def", "update_gunicorns", "(", ")", ":", "global", "tick", "tick", "+=", "1", "if", "(", "tick", "*", "screen_delay", ")", "%", "ps_delay", "!=", "0", ":", "return", "tick", "=", "0", "for", "pid", "in", "gunicorns", ":", "gunicorns", "[", "pid", "]", ".", "update", "(", "{", "\"workers\"", ":", "0", ",", "\"mem\"", ":", "0", "}", ")", "ps", "=", "Popen", "(", "PS_ARGS", ",", "stdout", "=", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", ".", "split", "(", "\"\\n\"", ")", "headings", "=", "ps", ".", "pop", "(", "0", ")", ".", "split", "(", ")", "name_col", "=", "headings", ".", "index", "(", "cmd_heading", ")", "num_cols", "=", "len", "(", "headings", ")", "-", "1", "for", "row", "in", "ps", ":", "cols", "=", "row", ".", "split", "(", "None", ",", "num_cols", ")", "if", "cols", "and", "\"gunicorn: \"", "in", "cols", "[", "name_col", "]", ":", "if", "\"gunicorn: worker\"", "in", "cols", "[", "name_col", "]", ":", "is_worker", "=", "True", "else", ":", "is_worker", "=", "False", "if", "is_worker", ":", "pid", "=", "cols", "[", "headings", ".", "index", "(", "\"PPID\"", ")", "]", "else", ":", "pid", "=", "cols", "[", "headings", ".", "index", "(", "\"PID\"", ")", "]", "if", "pid", "not", "in", "gunicorns", ":", "gunicorns", "[", "pid", "]", "=", "{", "\"workers\"", ":", "0", ",", "\"mem\"", ":", "0", ",", "\"port\"", ":", "None", ",", "\"name\"", ":", "cols", "[", "name_col", "]", ".", "strip", "(", ")", ".", "split", "(", "\"[\"", ",", "1", ")", "[", "1", "]", ".", "split", "(", "\"]\"", ",", "1", ")", "[", ":", "-", "1", "]", "}", "gunicorns", "[", "pid", "]", "[", "\"mem\"", "]", "+=", "int", "(", "cols", "[", "headings", ".", "index", "(", "\"RSS\"", ")", "]", ")", "if", "is_worker", ":", "gunicorns", "[", "pid", "]", "[", "\"workers\"", "]", "+=", "1", "# Remove gunicorns that were not found in the process list.", "for", "pid", "in", "gunicorns", ".", "keys", "(", ")", "[", ":", "]", ":", "if", "gunicorns", "[", "pid", "]", "[", "\"workers\"", "]", "==", "0", ":", "del", "gunicorns", "[", "pid", "]", "# Determine ports if any are missing.", "if", "not", "[", "g", "for", "g", "in", "gunicorns", ".", "values", "(", ")", "if", "g", "[", "\"port\"", "]", "is", "None", "]", ":", "return", "for", "(", "pid", ",", "port", ")", "in", "ports_for_pids", "(", "gunicorns", ".", "keys", "(", ")", ")", ":", "if", "pid", "in", "gunicorns", ":", "gunicorns", "[", "pid", "]", "[", "\"port\"", "]", "=", "port" ]
38.891304
0.002181
[ "def update_gunicorns():\n", " \"\"\"\n", " Updates the dict of gunicorn processes. Run the ps command and parse its\n", " output for processes named after gunicorn, building up a dict of gunicorn\n", " processes. When new gunicorns are discovered, run the netstat command to\n", " determine the ports they're serving on.\n", " \"\"\"\n", " global tick\n", " tick += 1\n", " if (tick * screen_delay) % ps_delay != 0:\n", " return\n", " tick = 0\n", " for pid in gunicorns:\n", " gunicorns[pid].update({\"workers\": 0, \"mem\": 0})\n", " ps = Popen(PS_ARGS, stdout=PIPE).communicate()[0].split(\"\\n\")\n", " headings = ps.pop(0).split()\n", " name_col = headings.index(cmd_heading)\n", " num_cols = len(headings) - 1\n", " for row in ps:\n", " cols = row.split(None, num_cols)\n", " if cols and \"gunicorn: \" in cols[name_col]:\n", " if \"gunicorn: worker\" in cols[name_col]:\n", " is_worker = True\n", " else:\n", " is_worker = False\n", "\n", " if is_worker:\n", " pid = cols[headings.index(\"PPID\")]\n", " else:\n", " pid = cols[headings.index(\"PID\")]\n", " if pid not in gunicorns:\n", " gunicorns[pid] = {\"workers\": 0, \"mem\": 0, \"port\": None, \"name\":\n", " cols[name_col].strip().split(\"[\",1)[1].split(\"]\",1)[:-1]}\n", " gunicorns[pid][\"mem\"] += int(cols[headings.index(\"RSS\")])\n", " if is_worker:\n", " gunicorns[pid][\"workers\"] += 1\n", " # Remove gunicorns that were not found in the process list.\n", " for pid in gunicorns.keys()[:]:\n", " if gunicorns[pid][\"workers\"] == 0:\n", " del gunicorns[pid]\n", " # Determine ports if any are missing.\n", " if not [g for g in gunicorns.values() if g[\"port\"] is None]:\n", " return\n", " for (pid, port) in ports_for_pids(gunicorns.keys()):\n", " if pid in gunicorns:\n", " gunicorns[pid][\"port\"] = port" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025 ]
46
0.001366
def _filter_keys(d: dict, keys: set) -> dict: """ Select a subset of keys from a dictionary. """ return {key: d[key] for key in keys if key in d}
[ "def", "_filter_keys", "(", "d", ":", "dict", ",", "keys", ":", "set", ")", "->", "dict", ":", "return", "{", "key", ":", "d", "[", "key", "]", "for", "key", "in", "keys", "if", "key", "in", "d", "}" ]
34.6
0.011299
[ "def _filter_keys(d: dict, keys: set) -> dict:\n", " \"\"\"\n", " Select a subset of keys from a dictionary.\n", " \"\"\"\n", " return {key: d[key] for key in keys if key in d}" ]
[ 0, 0.08333333333333333, 0, 0, 0.017857142857142856 ]
5
0.020238
def tag_array(events): """ Return a numpy array mapping events to tags - Rows corresponds to events - Columns correspond to tags """ all_tags = sorted(set(tag for event in events for tag in event.tags)) array = np.zeros((len(events), len(all_tags))) for row, event in enumerate(events): for tag in event.tags: array[row, all_tags.index(tag)] = 1 return array
[ "def", "tag_array", "(", "events", ")", ":", "all_tags", "=", "sorted", "(", "set", "(", "tag", "for", "event", "in", "events", "for", "tag", "in", "event", ".", "tags", ")", ")", "array", "=", "np", ".", "zeros", "(", "(", "len", "(", "events", ")", ",", "len", "(", "all_tags", ")", ")", ")", "for", "row", ",", "event", "in", "enumerate", "(", "events", ")", ":", "for", "tag", "in", "event", ".", "tags", ":", "array", "[", "row", ",", "all_tags", ".", "index", "(", "tag", ")", "]", "=", "1", "return", "array" ]
31
0.00241
[ "def tag_array(events):\n", " \"\"\"\n", " Return a numpy array mapping events to tags\n", "\n", " - Rows corresponds to events\n", " - Columns correspond to tags\n", " \"\"\"\n", " all_tags = sorted(set(tag for event in events for tag in event.tags))\n", " array = np.zeros((len(events), len(all_tags)))\n", " for row, event in enumerate(events):\n", " for tag in event.tags:\n", " array[row, all_tags.index(tag)] = 1\n", " return array" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625 ]
13
0.004808
def scale(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example:: &target=scale(Server.instance01.threads.busy,10) &target=scale(Server.instance*.threads.busy,10) """ for series in seriesList: series.name = "scale(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
[ "def", "scale", "(", "requestContext", ",", "seriesList", ",", "factor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"scale(%s,%g)\"", "%", "(", "series", ".", "name", ",", "float", "(", "factor", ")", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safeMul", "(", "value", ",", "factor", ")", "return", "seriesList" ]
33.705882
0.001698
[ "def scale(requestContext, seriesList, factor):\n", " \"\"\"\n", " Takes one metric or a wildcard seriesList followed by a constant, and\n", " multiplies the datapoint by the constant provided at each point.\n", "\n", " Example::\n", "\n", " &target=scale(Server.instance01.threads.busy,10)\n", " &target=scale(Server.instance*.threads.busy,10)\n", "\n", " \"\"\"\n", " for series in seriesList:\n", " series.name = \"scale(%s,%g)\" % (series.name, float(factor))\n", " series.pathExpression = series.name\n", " for i, value in enumerate(series):\n", " series[i] = safeMul(value, factor)\n", " return seriesList" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
17
0.002801
def graphql_impl( schema, source, root_value, context_value, variable_values, operation_name, field_resolver, type_resolver, middleware, execution_context_class, ) -> AwaitableOrValue[ExecutionResult]: """Execute a query, return asynchronously only if necessary.""" # Validate Schema schema_validation_errors = validate_schema(schema) if schema_validation_errors: return ExecutionResult(data=None, errors=schema_validation_errors) # Parse try: document = parse(source) except GraphQLError as error: return ExecutionResult(data=None, errors=[error]) except Exception as error: error = GraphQLError(str(error), original_error=error) return ExecutionResult(data=None, errors=[error]) # Validate from .validation import validate validation_errors = validate(schema, document) if validation_errors: return ExecutionResult(data=None, errors=validation_errors) # Execute return execute( schema, document, root_value, context_value, variable_values, operation_name, field_resolver, type_resolver, middleware, execution_context_class, )
[ "def", "graphql_impl", "(", "schema", ",", "source", ",", "root_value", ",", "context_value", ",", "variable_values", ",", "operation_name", ",", "field_resolver", ",", "type_resolver", ",", "middleware", ",", "execution_context_class", ",", ")", "->", "AwaitableOrValue", "[", "ExecutionResult", "]", ":", "# Validate Schema", "schema_validation_errors", "=", "validate_schema", "(", "schema", ")", "if", "schema_validation_errors", ":", "return", "ExecutionResult", "(", "data", "=", "None", ",", "errors", "=", "schema_validation_errors", ")", "# Parse", "try", ":", "document", "=", "parse", "(", "source", ")", "except", "GraphQLError", "as", "error", ":", "return", "ExecutionResult", "(", "data", "=", "None", ",", "errors", "=", "[", "error", "]", ")", "except", "Exception", "as", "error", ":", "error", "=", "GraphQLError", "(", "str", "(", "error", ")", ",", "original_error", "=", "error", ")", "return", "ExecutionResult", "(", "data", "=", "None", ",", "errors", "=", "[", "error", "]", ")", "# Validate", "from", ".", "validation", "import", "validate", "validation_errors", "=", "validate", "(", "schema", ",", "document", ")", "if", "validation_errors", ":", "return", "ExecutionResult", "(", "data", "=", "None", ",", "errors", "=", "validation_errors", ")", "# Execute", "return", "execute", "(", "schema", ",", "document", ",", "root_value", ",", "context_value", ",", "variable_values", ",", "operation_name", ",", "field_resolver", ",", "type_resolver", ",", "middleware", ",", "execution_context_class", ",", ")" ]
25.808511
0.000794
[ "def graphql_impl(\n", " schema,\n", " source,\n", " root_value,\n", " context_value,\n", " variable_values,\n", " operation_name,\n", " field_resolver,\n", " type_resolver,\n", " middleware,\n", " execution_context_class,\n", ") -> AwaitableOrValue[ExecutionResult]:\n", " \"\"\"Execute a query, return asynchronously only if necessary.\"\"\"\n", " # Validate Schema\n", " schema_validation_errors = validate_schema(schema)\n", " if schema_validation_errors:\n", " return ExecutionResult(data=None, errors=schema_validation_errors)\n", "\n", " # Parse\n", " try:\n", " document = parse(source)\n", " except GraphQLError as error:\n", " return ExecutionResult(data=None, errors=[error])\n", " except Exception as error:\n", " error = GraphQLError(str(error), original_error=error)\n", " return ExecutionResult(data=None, errors=[error])\n", "\n", " # Validate\n", " from .validation import validate\n", "\n", " validation_errors = validate(schema, document)\n", " if validation_errors:\n", " return ExecutionResult(data=None, errors=validation_errors)\n", "\n", " # Execute\n", " return execute(\n", " schema,\n", " document,\n", " root_value,\n", " context_value,\n", " variable_values,\n", " operation_name,\n", " field_resolver,\n", " type_resolver,\n", " middleware,\n", " execution_context_class,\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2 ]
47
0.004255
def eye_plot(x,L,S=0): """ Eye pattern plot of a baseband digital communications waveform. The signal must be real, but can be multivalued in terms of the underlying modulation scheme. Used for BPSK eye plots in the Case Study article. Parameters ---------- x : ndarray of the real input data vector/array L : display length in samples (usually two symbols) S : start index Returns ------- None : A plot window opens containing the eye plot Notes ----- Increase S to eliminate filter transients. Examples -------- 1000 bits at 10 samples per bit with 'rc' shaping. >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import digitalcom as dc >>> x,b, data = dc.NRZ_bits(1000,10,'rc') >>> dc.eye_plot(x,20,60) >>> plt.show() """ plt.figure(figsize=(6,4)) idx = np.arange(0,L+1) plt.plot(idx,x[S:S+L+1],'b') k_max = int((len(x) - S)/L)-1 for k in range(1,k_max): plt.plot(idx,x[S+k*L:S+L+1+k*L],'b') plt.grid() plt.xlabel('Time Index - n') plt.ylabel('Amplitude') plt.title('Eye Plot') return 0
[ "def", "eye_plot", "(", "x", ",", "L", ",", "S", "=", "0", ")", ":", "plt", ".", "figure", "(", "figsize", "=", "(", "6", ",", "4", ")", ")", "idx", "=", "np", ".", "arange", "(", "0", ",", "L", "+", "1", ")", "plt", ".", "plot", "(", "idx", ",", "x", "[", "S", ":", "S", "+", "L", "+", "1", "]", ",", "'b'", ")", "k_max", "=", "int", "(", "(", "len", "(", "x", ")", "-", "S", ")", "/", "L", ")", "-", "1", "for", "k", "in", "range", "(", "1", ",", "k_max", ")", ":", "plt", ".", "plot", "(", "idx", ",", "x", "[", "S", "+", "k", "*", "L", ":", "S", "+", "L", "+", "1", "+", "k", "*", "L", "]", ",", "'b'", ")", "plt", ".", "grid", "(", ")", "plt", ".", "xlabel", "(", "'Time Index - n'", ")", "plt", ".", "ylabel", "(", "'Amplitude'", ")", "plt", ".", "title", "(", "'Eye Plot'", ")", "return", "0" ]
26.52381
0.012121
[ "def eye_plot(x,L,S=0):\n", " \"\"\"\n", " Eye pattern plot of a baseband digital communications waveform.\n", "\n", " The signal must be real, but can be multivalued in terms of the underlying\n", " modulation scheme. Used for BPSK eye plots in the Case Study article.\n", "\n", " Parameters\n", " ----------\n", " x : ndarray of the real input data vector/array\n", " L : display length in samples (usually two symbols)\n", " S : start index\n", "\n", " Returns\n", " -------\n", " None : A plot window opens containing the eye plot\n", " \n", " Notes\n", " -----\n", " Increase S to eliminate filter transients.\n", " \n", " Examples\n", " --------\n", " 1000 bits at 10 samples per bit with 'rc' shaping.\n", "\n", " >>> import matplotlib.pyplot as plt\n", " >>> from sk_dsp_comm import digitalcom as dc\n", " >>> x,b, data = dc.NRZ_bits(1000,10,'rc')\n", " >>> dc.eye_plot(x,20,60)\n", " >>> plt.show()\n", " \"\"\"\n", " plt.figure(figsize=(6,4))\n", " idx = np.arange(0,L+1)\n", " plt.plot(idx,x[S:S+L+1],'b')\n", " k_max = int((len(x) - S)/L)-1\n", " for k in range(1,k_max):\n", " plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')\n", " plt.grid()\n", " plt.xlabel('Time Index - n')\n", " plt.ylabel('Amplitude')\n", " plt.title('Eye Plot')\n", " return 0" ]
[ 0.08695652173913043, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03333333333333333, 0.037037037037037035, 0.06060606060606061, 0, 0.034482758620689655, 0.08695652173913043, 0, 0, 0, 0, 0.08333333333333333 ]
42
0.019588
def detectFileEncoding(self, fileName): ''' Detect content encoding of specific file. It will return None if it can't determine the encoding. ''' try: import chardet except ImportError: return with open(fileName, 'rb') as inputFile: raw = inputFile.read(2048) result = chardet.detect(raw) if result['confidence'] > 0.9: if result['encoding'].lower() == 'ascii': # UTF-8 files can be falsely detected as ASCII files if they # don't contain non-ASCII characters in first 2048 bytes. # We map ASCII to UTF-8 to avoid such situations. return 'utf-8' return result['encoding']
[ "def", "detectFileEncoding", "(", "self", ",", "fileName", ")", ":", "try", ":", "import", "chardet", "except", "ImportError", ":", "return", "with", "open", "(", "fileName", ",", "'rb'", ")", "as", "inputFile", ":", "raw", "=", "inputFile", ".", "read", "(", "2048", ")", "result", "=", "chardet", ".", "detect", "(", "raw", ")", "if", "result", "[", "'confidence'", "]", ">", "0.9", ":", "if", "result", "[", "'encoding'", "]", ".", "lower", "(", ")", "==", "'ascii'", ":", "# UTF-8 files can be falsely detected as ASCII files if they", "# don't contain non-ASCII characters in first 2048 bytes.", "# We map ASCII to UTF-8 to avoid such situations.", "return", "'utf-8'", "return", "result", "[", "'encoding'", "]" ]
27.363636
0.032103
[ "def detectFileEncoding(self, fileName):\n", "\t\t'''\n", "\t\tDetect content encoding of specific file.\n", "\n", "\t\tIt will return None if it can't determine the encoding.\n", "\t\t'''\n", "\t\ttry:\n", "\t\t\timport chardet\n", "\t\texcept ImportError:\n", "\t\t\treturn\n", "\n", "\t\twith open(fileName, 'rb') as inputFile:\n", "\t\t\traw = inputFile.read(2048)\n", "\n", "\t\tresult = chardet.detect(raw)\n", "\t\tif result['confidence'] > 0.9:\n", "\t\t\tif result['encoding'].lower() == 'ascii':\n", "\t\t\t\t# UTF-8 files can be falsely detected as ASCII files if they\n", "\t\t\t\t# don't contain non-ASCII characters in first 2048 bytes.\n", "\t\t\t\t# We map ASCII to UTF-8 to avoid such situations.\n", "\t\t\t\treturn 'utf-8'\n", "\t\t\treturn result['encoding']" ]
[ 0, 0.3333333333333333, 0.022727272727272728, 0, 0.017241379310344827, 0.16666666666666666, 0.14285714285714285, 0.05555555555555555, 0.045454545454545456, 0.1, 0, 0.023809523809523808, 0.03333333333333333, 0, 0.03225806451612903, 0.030303030303030304, 0.022222222222222223, 0.015384615384615385, 0.016129032258064516, 0.018518518518518517, 0.05263157894736842, 0.07142857142857142 ]
22
0.054539
def model_eval(sess, x, y, predictions, X_test=None, Y_test=None, feed=None, args=None): """ Compute the accuracy of a TF model on some data :param sess: TF session to use :param x: input placeholder :param y: output placeholder (for labels) :param predictions: model output predictions :param X_test: numpy array with training inputs :param Y_test: numpy array with training outputs :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :param args: dict or argparse `Namespace` object. Should contain `batch_size` :return: a float with the accuracy value """ global _model_eval_cache args = _ArgsWrapper(args or {}) assert args.batch_size, "Batch size was not given in args dict" if X_test is None or Y_test is None: raise ValueError("X_test argument and Y_test argument " "must be supplied.") # Define accuracy symbolically key = (y, predictions) if key in _model_eval_cache: correct_preds = _model_eval_cache[key] else: correct_preds = tf.equal(tf.argmax(y, axis=-1), tf.argmax(predictions, axis=-1)) _model_eval_cache[key] = correct_preds # Init result var accuracy = 0.0 with sess.as_default(): # Compute number of batches nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size)) assert nb_batches * args.batch_size >= len(X_test) X_cur = np.zeros((args.batch_size,) + X_test.shape[1:], dtype=X_test.dtype) Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:], dtype=Y_test.dtype) for batch in range(nb_batches): if batch % 100 == 0 and batch > 0: _logger.debug("Batch " + str(batch)) # Must not use the `batch_indices` function here, because it # repeats some examples. # It's acceptable to repeat during training, but not eval. start = batch * args.batch_size end = min(len(X_test), start + args.batch_size) # The last batch may be smaller than all others. This should not # affect the accuarcy disproportionately. cur_batch_size = end - start X_cur[:cur_batch_size] = X_test[start:end] Y_cur[:cur_batch_size] = Y_test[start:end] feed_dict = {x: X_cur, y: Y_cur} if feed is not None: feed_dict.update(feed) cur_corr_preds = correct_preds.eval(feed_dict=feed_dict) accuracy += cur_corr_preds[:cur_batch_size].sum() assert end >= len(X_test) # Divide by number of examples to get final value accuracy /= len(X_test) return accuracy
[ "def", "model_eval", "(", "sess", ",", "x", ",", "y", ",", "predictions", ",", "X_test", "=", "None", ",", "Y_test", "=", "None", ",", "feed", "=", "None", ",", "args", "=", "None", ")", ":", "global", "_model_eval_cache", "args", "=", "_ArgsWrapper", "(", "args", "or", "{", "}", ")", "assert", "args", ".", "batch_size", ",", "\"Batch size was not given in args dict\"", "if", "X_test", "is", "None", "or", "Y_test", "is", "None", ":", "raise", "ValueError", "(", "\"X_test argument and Y_test argument \"", "\"must be supplied.\"", ")", "# Define accuracy symbolically", "key", "=", "(", "y", ",", "predictions", ")", "if", "key", "in", "_model_eval_cache", ":", "correct_preds", "=", "_model_eval_cache", "[", "key", "]", "else", ":", "correct_preds", "=", "tf", ".", "equal", "(", "tf", ".", "argmax", "(", "y", ",", "axis", "=", "-", "1", ")", ",", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "_model_eval_cache", "[", "key", "]", "=", "correct_preds", "# Init result var", "accuracy", "=", "0.0", "with", "sess", ".", "as_default", "(", ")", ":", "# Compute number of batches", "nb_batches", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "len", "(", "X_test", ")", ")", "/", "args", ".", "batch_size", ")", ")", "assert", "nb_batches", "*", "args", ".", "batch_size", ">=", "len", "(", "X_test", ")", "X_cur", "=", "np", ".", "zeros", "(", "(", "args", ".", "batch_size", ",", ")", "+", "X_test", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "X_test", ".", "dtype", ")", "Y_cur", "=", "np", ".", "zeros", "(", "(", "args", ".", "batch_size", ",", ")", "+", "Y_test", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "Y_test", ".", "dtype", ")", "for", "batch", "in", "range", "(", "nb_batches", ")", ":", "if", "batch", "%", "100", "==", "0", "and", "batch", ">", "0", ":", "_logger", ".", "debug", "(", "\"Batch \"", "+", "str", "(", "batch", ")", ")", "# Must not use the `batch_indices` function here, because it", "# repeats some examples.", "# It's acceptable to repeat during training, but not eval.", "start", "=", "batch", "*", "args", ".", "batch_size", "end", "=", "min", "(", "len", "(", "X_test", ")", ",", "start", "+", "args", ".", "batch_size", ")", "# The last batch may be smaller than all others. This should not", "# affect the accuarcy disproportionately.", "cur_batch_size", "=", "end", "-", "start", "X_cur", "[", ":", "cur_batch_size", "]", "=", "X_test", "[", "start", ":", "end", "]", "Y_cur", "[", ":", "cur_batch_size", "]", "=", "Y_test", "[", "start", ":", "end", "]", "feed_dict", "=", "{", "x", ":", "X_cur", ",", "y", ":", "Y_cur", "}", "if", "feed", "is", "not", "None", ":", "feed_dict", ".", "update", "(", "feed", ")", "cur_corr_preds", "=", "correct_preds", ".", "eval", "(", "feed_dict", "=", "feed_dict", ")", "accuracy", "+=", "cur_corr_preds", "[", ":", "cur_batch_size", "]", ".", "sum", "(", ")", "assert", "end", ">=", "len", "(", "X_test", ")", "# Divide by number of examples to get final value", "accuracy", "/=", "len", "(", "X_test", ")", "return", "accuracy" ]
35.891892
0.010627
[ "def model_eval(sess, x, y, predictions, X_test=None, Y_test=None,\n", " feed=None, args=None):\n", " \"\"\"\n", " Compute the accuracy of a TF model on some data\n", " :param sess: TF session to use\n", " :param x: input placeholder\n", " :param y: output placeholder (for labels)\n", " :param predictions: model output predictions\n", " :param X_test: numpy array with training inputs\n", " :param Y_test: numpy array with training outputs\n", " :param feed: An optional dictionary that is appended to the feeding\n", " dictionary before the session runs. Can be used to feed\n", " the learning phase of a Keras model for instance.\n", " :param args: dict or argparse `Namespace` object.\n", " Should contain `batch_size`\n", " :return: a float with the accuracy value\n", " \"\"\"\n", " global _model_eval_cache\n", " args = _ArgsWrapper(args or {})\n", "\n", " assert args.batch_size, \"Batch size was not given in args dict\"\n", " if X_test is None or Y_test is None:\n", " raise ValueError(\"X_test argument and Y_test argument \"\n", " \"must be supplied.\")\n", "\n", " # Define accuracy symbolically\n", " key = (y, predictions)\n", " if key in _model_eval_cache:\n", " correct_preds = _model_eval_cache[key]\n", " else:\n", " correct_preds = tf.equal(tf.argmax(y, axis=-1),\n", " tf.argmax(predictions, axis=-1))\n", " _model_eval_cache[key] = correct_preds\n", "\n", " # Init result var\n", " accuracy = 0.0\n", "\n", " with sess.as_default():\n", " # Compute number of batches\n", " nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))\n", " assert nb_batches * args.batch_size >= len(X_test)\n", "\n", " X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],\n", " dtype=X_test.dtype)\n", " Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],\n", " dtype=Y_test.dtype)\n", " for batch in range(nb_batches):\n", " if batch % 100 == 0 and batch > 0:\n", " _logger.debug(\"Batch \" + str(batch))\n", "\n", " # Must not use the `batch_indices` function here, because it\n", " # repeats some examples.\n", " # It's acceptable to repeat during training, but not eval.\n", " start = batch * args.batch_size\n", " end = min(len(X_test), start + args.batch_size)\n", "\n", " # The last batch may be smaller than all others. This should not\n", " # affect the accuarcy disproportionately.\n", " cur_batch_size = end - start\n", " X_cur[:cur_batch_size] = X_test[start:end]\n", " Y_cur[:cur_batch_size] = Y_test[start:end]\n", " feed_dict = {x: X_cur, y: Y_cur}\n", " if feed is not None:\n", " feed_dict.update(feed)\n", " cur_corr_preds = correct_preds.eval(feed_dict=feed_dict)\n", "\n", " accuracy += cur_corr_preds[:cur_batch_size].sum()\n", "\n", " assert end >= len(X_test)\n", "\n", " # Divide by number of examples to get final value\n", " accuracy /= len(X_test)\n", "\n", " return accuracy" ]
[ 0, 0, 0.16666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0.029411764705882353, 0, 0.015151515151515152, 0.02564102564102564, 0, 0, 0, 0.030303030303030304, 0.04, 0.03225806451612903, 0, 0.125, 0, 0, 0, 0, 0.05, 0.058823529411764705, 0, 0.038461538461538464, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0, 0.014925373134328358, 0.03225806451612903, 0.015384615384615385, 0.02631578947368421, 0.018518518518518517, 0, 0.014084507042253521, 0.020833333333333332, 0.02857142857142857, 0.02040816326530612, 0.02040816326530612, 0.02564102564102564, 0.037037037037037035, 0, 0.015873015873015872, 0, 0.017857142857142856, 0, 0, 0, 0, 0, 0, 0.11764705882352941 ]
74
0.01485
def interpolate(self, xi, yi, zdata, order=1): """ Base class to handle nearest neighbour, linear, and cubic interpolation. Given a triangulation of a set of nodes and values at the nodes, this method interpolates the value at the given xi,yi coordinates. Parameters ---------- xi : float / array of floats, shape (l,) x Cartesian coordinate(s) yi : float / array of floats, shape (l,) y Cartesian coordinate(s) zdata : array of floats, shape (n,) value at each point in the triangulation must be the same size of the mesh order : int (default=1) order of the interpolatory function used 0 = nearest-neighbour 1 = linear 3 = cubic Returns ------- zi : float / array of floats, shape (l,) interpolates value(s) at (xi, yi) err : int / array of ints, shape (l,) whether interpolation (0), extrapolation (1) or error (other) """ if order == 0: zierr = np.zeros_like(xi, dtype=np.int) return self.interpolate_nearest(xi, yi, zdata), zierr elif order == 1: return self.interpolate_linear(xi, yi, zdata) elif order == 3: return self.interpolate_cubic(xi, yi, zdata) else: raise ValueError("order must be 0, 1, or 3")
[ "def", "interpolate", "(", "self", ",", "xi", ",", "yi", ",", "zdata", ",", "order", "=", "1", ")", ":", "if", "order", "==", "0", ":", "zierr", "=", "np", ".", "zeros_like", "(", "xi", ",", "dtype", "=", "np", ".", "int", ")", "return", "self", ".", "interpolate_nearest", "(", "xi", ",", "yi", ",", "zdata", ")", ",", "zierr", "elif", "order", "==", "1", ":", "return", "self", ".", "interpolate_linear", "(", "xi", ",", "yi", ",", "zdata", ")", "elif", "order", "==", "3", ":", "return", "self", ".", "interpolate_cubic", "(", "xi", ",", "yi", ",", "zdata", ")", "else", ":", "raise", "ValueError", "(", "\"order must be 0, 1, or 3\"", ")" ]
38.842105
0.001983
[ "def interpolate(self, xi, yi, zdata, order=1):\n", " \"\"\"\n", " Base class to handle nearest neighbour, linear, and cubic interpolation.\n", " Given a triangulation of a set of nodes and values at the nodes,\n", " this method interpolates the value at the given xi,yi coordinates.\n", "\n", " Parameters\n", " ----------\n", " xi : float / array of floats, shape (l,)\n", " x Cartesian coordinate(s)\n", " yi : float / array of floats, shape (l,)\n", " y Cartesian coordinate(s)\n", " zdata : array of floats, shape (n,)\n", " value at each point in the triangulation\n", " must be the same size of the mesh\n", " order : int (default=1)\n", " order of the interpolatory function used\n", " 0 = nearest-neighbour\n", " 1 = linear\n", " 3 = cubic\n", "\n", " Returns\n", " -------\n", " zi : float / array of floats, shape (l,)\n", " interpolates value(s) at (xi, yi)\n", " err : int / array of ints, shape (l,)\n", " whether interpolation (0), extrapolation (1) or error (other)\n", " \"\"\"\n", "\n", " if order == 0:\n", " zierr = np.zeros_like(xi, dtype=np.int)\n", " return self.interpolate_nearest(xi, yi, zdata), zierr\n", " elif order == 1:\n", " return self.interpolate_linear(xi, yi, zdata)\n", " elif order == 3:\n", " return self.interpolate_cubic(xi, yi, zdata)\n", " else:\n", " raise ValueError(\"order must be 0, 1, or 3\")" ]
[ 0, 0.08333333333333333, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017857142857142856 ]
38
0.002988
def _needSwapWH(self, oldDirection, newDirection ): """! \~english return screen direction status @return Boolean @note No need to rotate if the screen orientation is 0 degrees and 180 degrees \~chinese 返回屏幕方向状态 @return 布尔值 @note 如果屏幕方向是0度和180度就不需要旋转 """ if abs(newDirection - oldDirection) == 0: return False if abs(newDirection - oldDirection) % 180 == 0: return False if abs(newDirection - oldDirection) % 90 == 0: return True return False
[ "def", "_needSwapWH", "(", "self", ",", "oldDirection", ",", "newDirection", ")", ":", "if", "abs", "(", "newDirection", "-", "oldDirection", ")", "==", "0", ":", "return", "False", "if", "abs", "(", "newDirection", "-", "oldDirection", ")", "%", "180", "==", "0", ":", "return", "False", "if", "abs", "(", "newDirection", "-", "oldDirection", ")", "%", "90", "==", "0", ":", "return", "True", "return", "False" ]
33.75
0.016216
[ "def _needSwapWH(self, oldDirection, newDirection ):\n", " \"\"\"!\n", " \\~english\n", " return screen direction status\n", " @return Boolean\n", " @note No need to rotate if the screen orientation is 0 degrees and 180 degrees\n", "\n", " \\~chinese\n", " 返回屏幕方向状态\n", " @return 布尔值\n", " @note 如果屏幕方向是0度和180度就不需要旋转\n", " \"\"\"\n", " if abs(newDirection - oldDirection) == 0: return False\n", " if abs(newDirection - oldDirection) % 180 == 0: return False\n", " if abs(newDirection - oldDirection) % 90 == 0: return True\n", " return False" ]
[ 0.019230769230769232, 0.07692307692307693, 0.05555555555555555, 0, 0, 0.011494252873563218, 0, 0.05555555555555555, 0, 0, 0, 0, 0.015873015873015872, 0.014492753623188406, 0.014925373134328358, 0.05 ]
16
0.019628
def chooseBestDuplicates(tped, samples, oldSamples, completion, concordance_all, prefix): """Choose the best duplicates according to the completion rate. :param tped: the ``tped`` containing the duplicated samples. :param samples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param completion: the completion of each of the duplicated samples. :param concordance_all: the concordance of every duplicated samples. :param prefix: the prefix of all the files. :type tped: :py:class:`numpy.array` :type samples: dict :type oldSamples: dict :type completion: :py:class:`numpy.array` :type concordance_all: dict :type prefix: str :returns: a tuple where the first element is a list of the chosen samples' indexes, the second on is the completion and the last one is the concordance (a map). These are the steps to find the best duplicated sample: 1. Sort the list of concordances. 2. Sort the list of completions. 3. Choose the best of the concordance and put in a set. 4. Choose the best of the completion and put it in a set. 5. Compute the intersection of the two sets. If there is one sample or more, then randomly choose one sample. 6. If the intersection doesn't contain at least one sample, redo steps 3 and 4, but increase the number of chosen best by one. Redo step 5 and 6 (if required). The chosen samples are written in ``prefix.chosen_samples.info``. The rest are written in ``prefix.excluded_samples.info``. """ # The output files chosenFile = None try: chosenFile = open(prefix + ".chosen_samples.info", "w") except IOError: msg = "%(prefix)s.chosen_samples.info: can't write file" % locals() raise ProgramError(msg) print >>chosenFile, "\t".join(["origIndex", "dupIndex", "famID", "indID"]) excludedFile = None try: excludedFile = open(prefix + ".excluded_samples.info", "w") except IOError: msg = "%(prefix)s.excluded_samples.info: can't write file" % locals() raise ProgramError(msg) print >>excludedFile, "\t".join(["origIndex", "dupIndex", "famID", "indID"]) # For each duplicated sample chosenIndexes = {} sampleConcordance = {} for sample, indexes in samples.iteritems(): # Getting the completion for those duplicated samples currCompletion = completion[indexes] # Sorting those completion sortedCompletionIndexes = np.argsort(currCompletion) # Getting the concordance concordance = concordance_all[sample] currConcordance = [[] for i in xrange(len(indexes))] for i in xrange(len(indexes)): indexToKeep = list(set(range(len(indexes))) - set([i])) currConcordance[i] = np.mean(concordance[i, indexToKeep]) currConcordance = np.array(currConcordance) if sample not in sampleConcordance: sampleConcordance[sample] = currConcordance # Sorting the concordance sortedConcordanceIndexes = np.argsort(currConcordance) # Trying to find the best duplicate to keep nbToCheck = 1 chosenIndex = None while nbToCheck <= len(indexes): # Getting the `nbToCheck` best value (higher to lower) completionValue = currCompletion[ sortedCompletionIndexes[nbToCheck*-1] ] concordanceValue = currConcordance[ sortedConcordanceIndexes[nbToCheck*-1] ] # Getting the indexes to consider completionToConsider = set( np.where(currCompletion >= completionValue)[0] ) concordanceToConsider = set( np.where(currConcordance >= concordanceValue)[0]) # Getting the intersection of the indexes toConsider = concordanceToConsider & completionToConsider if len(toConsider) >= 1: chosenIndex = random.choice(list(toConsider)) break nbToCheck += 1 if chosenIndex is None: msg = "Could not choose the best sample ID for {}".format(sample) raise ProgramError(msg) # Printing the chosen samples print >>chosenFile, "\t".join([str(oldSamples[sample][chosenIndex]+1), str(indexes[chosenIndex]+1), sample[0], sample[1]]) # Printing the excluded samples for i, index in enumerate(indexes): if i != chosenIndex: print >>excludedFile, "\t".join([str(oldSamples[sample][i]+1), str(index+1), sample[0], sample[1]]) chosenIndexes[sample] = indexes[chosenIndex] # Closing the output files chosenFile.close() excludedFile.close() return chosenIndexes, completion, sampleConcordance
[ "def", "chooseBestDuplicates", "(", "tped", ",", "samples", ",", "oldSamples", ",", "completion", ",", "concordance_all", ",", "prefix", ")", ":", "# The output files", "chosenFile", "=", "None", "try", ":", "chosenFile", "=", "open", "(", "prefix", "+", "\".chosen_samples.info\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.chosen_samples.info: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "print", ">>", "chosenFile", ",", "\"\\t\"", ".", "join", "(", "[", "\"origIndex\"", ",", "\"dupIndex\"", ",", "\"famID\"", ",", "\"indID\"", "]", ")", "excludedFile", "=", "None", "try", ":", "excludedFile", "=", "open", "(", "prefix", "+", "\".excluded_samples.info\"", ",", "\"w\"", ")", "except", "IOError", ":", "msg", "=", "\"%(prefix)s.excluded_samples.info: can't write file\"", "%", "locals", "(", ")", "raise", "ProgramError", "(", "msg", ")", "print", ">>", "excludedFile", ",", "\"\\t\"", ".", "join", "(", "[", "\"origIndex\"", ",", "\"dupIndex\"", ",", "\"famID\"", ",", "\"indID\"", "]", ")", "# For each duplicated sample", "chosenIndexes", "=", "{", "}", "sampleConcordance", "=", "{", "}", "for", "sample", ",", "indexes", "in", "samples", ".", "iteritems", "(", ")", ":", "# Getting the completion for those duplicated samples", "currCompletion", "=", "completion", "[", "indexes", "]", "# Sorting those completion", "sortedCompletionIndexes", "=", "np", ".", "argsort", "(", "currCompletion", ")", "# Getting the concordance", "concordance", "=", "concordance_all", "[", "sample", "]", "currConcordance", "=", "[", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "indexes", ")", ")", "]", "for", "i", "in", "xrange", "(", "len", "(", "indexes", ")", ")", ":", "indexToKeep", "=", "list", "(", "set", "(", "range", "(", "len", "(", "indexes", ")", ")", ")", "-", "set", "(", "[", "i", "]", ")", ")", "currConcordance", "[", "i", "]", "=", "np", ".", "mean", "(", "concordance", "[", "i", ",", "indexToKeep", "]", ")", "currConcordance", "=", "np", ".", "array", "(", "currConcordance", ")", "if", "sample", "not", "in", "sampleConcordance", ":", "sampleConcordance", "[", "sample", "]", "=", "currConcordance", "# Sorting the concordance", "sortedConcordanceIndexes", "=", "np", ".", "argsort", "(", "currConcordance", ")", "# Trying to find the best duplicate to keep", "nbToCheck", "=", "1", "chosenIndex", "=", "None", "while", "nbToCheck", "<=", "len", "(", "indexes", ")", ":", "# Getting the `nbToCheck` best value (higher to lower)", "completionValue", "=", "currCompletion", "[", "sortedCompletionIndexes", "[", "nbToCheck", "*", "-", "1", "]", "]", "concordanceValue", "=", "currConcordance", "[", "sortedConcordanceIndexes", "[", "nbToCheck", "*", "-", "1", "]", "]", "# Getting the indexes to consider", "completionToConsider", "=", "set", "(", "np", ".", "where", "(", "currCompletion", ">=", "completionValue", ")", "[", "0", "]", ")", "concordanceToConsider", "=", "set", "(", "np", ".", "where", "(", "currConcordance", ">=", "concordanceValue", ")", "[", "0", "]", ")", "# Getting the intersection of the indexes", "toConsider", "=", "concordanceToConsider", "&", "completionToConsider", "if", "len", "(", "toConsider", ")", ">=", "1", ":", "chosenIndex", "=", "random", ".", "choice", "(", "list", "(", "toConsider", ")", ")", "break", "nbToCheck", "+=", "1", "if", "chosenIndex", "is", "None", ":", "msg", "=", "\"Could not choose the best sample ID for {}\"", ".", "format", "(", "sample", ")", "raise", "ProgramError", "(", "msg", ")", "# Printing the chosen samples", "print", ">>", "chosenFile", ",", "\"\\t\"", ".", "join", "(", "[", "str", "(", "oldSamples", "[", "sample", "]", "[", "chosenIndex", "]", "+", "1", ")", ",", "str", "(", "indexes", "[", "chosenIndex", "]", "+", "1", ")", ",", "sample", "[", "0", "]", ",", "sample", "[", "1", "]", "]", ")", "# Printing the excluded samples", "for", "i", ",", "index", "in", "enumerate", "(", "indexes", ")", ":", "if", "i", "!=", "chosenIndex", ":", "print", ">>", "excludedFile", ",", "\"\\t\"", ".", "join", "(", "[", "str", "(", "oldSamples", "[", "sample", "]", "[", "i", "]", "+", "1", ")", ",", "str", "(", "index", "+", "1", ")", ",", "sample", "[", "0", "]", ",", "sample", "[", "1", "]", "]", ")", "chosenIndexes", "[", "sample", "]", "=", "indexes", "[", "chosenIndex", "]", "# Closing the output files", "chosenFile", ".", "close", "(", ")", "excludedFile", ".", "close", "(", ")", "return", "chosenIndexes", ",", "completion", ",", "sampleConcordance" ]
39.317829
0.000192
[ "def chooseBestDuplicates(tped, samples, oldSamples, completion,\n", " concordance_all, prefix):\n", " \"\"\"Choose the best duplicates according to the completion rate.\n", "\n", " :param tped: the ``tped`` containing the duplicated samples.\n", " :param samples: the updated position of the samples in the tped containing\n", " only duplicated samples.\n", " :param oldSamples: the original duplicated sample positions.\n", " :param completion: the completion of each of the duplicated samples.\n", " :param concordance_all: the concordance of every duplicated samples.\n", " :param prefix: the prefix of all the files.\n", "\n", " :type tped: :py:class:`numpy.array`\n", " :type samples: dict\n", " :type oldSamples: dict\n", " :type completion: :py:class:`numpy.array`\n", " :type concordance_all: dict\n", " :type prefix: str\n", "\n", " :returns: a tuple where the first element is a list of the chosen samples'\n", " indexes, the second on is the completion and the last one is the\n", " concordance (a map).\n", "\n", " These are the steps to find the best duplicated sample:\n", "\n", " 1. Sort the list of concordances.\n", " 2. Sort the list of completions.\n", " 3. Choose the best of the concordance and put in a set.\n", " 4. Choose the best of the completion and put it in a set.\n", " 5. Compute the intersection of the two sets. If there is one sample or\n", " more, then randomly choose one sample.\n", " 6. If the intersection doesn't contain at least one sample, redo steps 3\n", " and 4, but increase the number of chosen best by one. Redo step 5 and 6\n", " (if required).\n", "\n", " The chosen samples are written in ``prefix.chosen_samples.info``. The rest\n", " are written in ``prefix.excluded_samples.info``.\n", "\n", " \"\"\"\n", " # The output files\n", " chosenFile = None\n", " try:\n", " chosenFile = open(prefix + \".chosen_samples.info\", \"w\")\n", " except IOError:\n", " msg = \"%(prefix)s.chosen_samples.info: can't write file\" % locals()\n", " raise ProgramError(msg)\n", " print >>chosenFile, \"\\t\".join([\"origIndex\", \"dupIndex\", \"famID\", \"indID\"])\n", "\n", " excludedFile = None\n", " try:\n", " excludedFile = open(prefix + \".excluded_samples.info\", \"w\")\n", " except IOError:\n", " msg = \"%(prefix)s.excluded_samples.info: can't write file\" % locals()\n", " raise ProgramError(msg)\n", " print >>excludedFile, \"\\t\".join([\"origIndex\", \"dupIndex\", \"famID\",\n", " \"indID\"])\n", "\n", " # For each duplicated sample\n", " chosenIndexes = {}\n", " sampleConcordance = {}\n", " for sample, indexes in samples.iteritems():\n", " # Getting the completion for those duplicated samples\n", " currCompletion = completion[indexes]\n", "\n", " # Sorting those completion\n", " sortedCompletionIndexes = np.argsort(currCompletion)\n", "\n", " # Getting the concordance\n", " concordance = concordance_all[sample]\n", " currConcordance = [[] for i in xrange(len(indexes))]\n", " for i in xrange(len(indexes)):\n", " indexToKeep = list(set(range(len(indexes))) - set([i]))\n", " currConcordance[i] = np.mean(concordance[i, indexToKeep])\n", " currConcordance = np.array(currConcordance)\n", " if sample not in sampleConcordance:\n", " sampleConcordance[sample] = currConcordance\n", "\n", " # Sorting the concordance\n", " sortedConcordanceIndexes = np.argsort(currConcordance)\n", "\n", " # Trying to find the best duplicate to keep\n", " nbToCheck = 1\n", " chosenIndex = None\n", " while nbToCheck <= len(indexes):\n", " # Getting the `nbToCheck` best value (higher to lower)\n", " completionValue = currCompletion[\n", " sortedCompletionIndexes[nbToCheck*-1]\n", " ]\n", " concordanceValue = currConcordance[\n", " sortedConcordanceIndexes[nbToCheck*-1]\n", " ]\n", "\n", " # Getting the indexes to consider\n", " completionToConsider = set(\n", " np.where(currCompletion >= completionValue)[0]\n", " )\n", " concordanceToConsider = set(\n", " np.where(currConcordance >= concordanceValue)[0])\n", "\n", " # Getting the intersection of the indexes\n", " toConsider = concordanceToConsider & completionToConsider\n", " if len(toConsider) >= 1:\n", " chosenIndex = random.choice(list(toConsider))\n", " break\n", " nbToCheck += 1\n", "\n", " if chosenIndex is None:\n", " msg = \"Could not choose the best sample ID for {}\".format(sample)\n", " raise ProgramError(msg)\n", "\n", " # Printing the chosen samples\n", " print >>chosenFile, \"\\t\".join([str(oldSamples[sample][chosenIndex]+1),\n", " str(indexes[chosenIndex]+1), sample[0],\n", " sample[1]])\n", "\n", " # Printing the excluded samples\n", " for i, index in enumerate(indexes):\n", " if i != chosenIndex:\n", " print >>excludedFile, \"\\t\".join([str(oldSamples[sample][i]+1),\n", " str(index+1), sample[0],\n", " sample[1]])\n", "\n", " chosenIndexes[sample] = indexes[chosenIndex]\n", "\n", " # Closing the output files\n", " chosenFile.close()\n", " excludedFile.close()\n", "\n", " return chosenIndexes, completion, sampleConcordance" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01818181818181818 ]
129
0.000141
def anglesep(lon0: float, lat0: float, lon1: float, lat1: float, deg: bool = True) -> float: """ Parameters ---------- lon0 : float longitude of first point lat0 : float latitude of first point lon1 : float longitude of second point lat1 : float latitude of second point deg : bool, optional degrees input/output (False: radians in/out) Returns ------- sep_rad : float or numpy.ndarray of float angular separation For reference, this is from astropy astropy/coordinates/angle_utilities.py Angular separation between two points on a sphere. """ if angular_separation is None: raise ImportError('angledist requires AstroPy. Try angledis_meeus') if deg: lon0 = radians(lon0) lat0 = radians(lat0) lon1 = radians(lon1) lat1 = radians(lat1) sep_rad = angular_separation(lon0, lat0, lon1, lat1) if deg: return degrees(sep_rad) else: return sep_rad
[ "def", "anglesep", "(", "lon0", ":", "float", ",", "lat0", ":", "float", ",", "lon1", ":", "float", ",", "lat1", ":", "float", ",", "deg", ":", "bool", "=", "True", ")", "->", "float", ":", "if", "angular_separation", "is", "None", ":", "raise", "ImportError", "(", "'angledist requires AstroPy. Try angledis_meeus'", ")", "if", "deg", ":", "lon0", "=", "radians", "(", "lon0", ")", "lat0", "=", "radians", "(", "lat0", ")", "lon1", "=", "radians", "(", "lon1", ")", "lat1", "=", "radians", "(", "lat1", ")", "sep_rad", "=", "angular_separation", "(", "lon0", ",", "lat0", ",", "lon1", ",", "lat1", ")", "if", "deg", ":", "return", "degrees", "(", "sep_rad", ")", "else", ":", "return", "sep_rad" ]
24.463415
0.000959
[ "def anglesep(lon0: float, lat0: float,\n", " lon1: float, lat1: float, deg: bool = True) -> float:\n", " \"\"\"\n", " Parameters\n", " ----------\n", "\n", " lon0 : float\n", " longitude of first point\n", " lat0 : float\n", " latitude of first point\n", " lon1 : float\n", " longitude of second point\n", " lat1 : float\n", " latitude of second point\n", " deg : bool, optional\n", " degrees input/output (False: radians in/out)\n", "\n", " Returns\n", " -------\n", "\n", " sep_rad : float or numpy.ndarray of float\n", " angular separation\n", "\n", " For reference, this is from astropy astropy/coordinates/angle_utilities.py\n", " Angular separation between two points on a sphere.\n", " \"\"\"\n", " if angular_separation is None:\n", " raise ImportError('angledist requires AstroPy. Try angledis_meeus')\n", "\n", " if deg:\n", " lon0 = radians(lon0)\n", " lat0 = radians(lat0)\n", " lon1 = radians(lon1)\n", " lat1 = radians(lat1)\n", "\n", " sep_rad = angular_separation(lon0, lat0, lon1, lat1)\n", "\n", " if deg:\n", " return degrees(sep_rad)\n", " else:\n", " return sep_rad" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.045454545454545456 ]
41
0.001109
def stop(self): """Signals the background thread to stop. This does not terminate the background thread. It simply queues the stop signal. If the main process exits before the background thread processes the stop signal, it will be terminated without finishing work. The ``grace_period`` parameter will give the background thread some time to finish processing before this function returns. :rtype: bool :returns: True if the thread terminated. False if the thread is still running. """ if not self.is_alive: return True with self._lock: self._queue.put_nowait(_WORKER_TERMINATOR) self._thread.join(timeout=self._grace_period) success = not self.is_alive self._thread = None return success
[ "def", "stop", "(", "self", ")", ":", "if", "not", "self", ".", "is_alive", ":", "return", "True", "with", "self", ".", "_lock", ":", "self", ".", "_queue", ".", "put_nowait", "(", "_WORKER_TERMINATOR", ")", "self", ".", "_thread", ".", "join", "(", "timeout", "=", "self", ".", "_grace_period", ")", "success", "=", "not", "self", ".", "is_alive", "self", ".", "_thread", "=", "None", "return", "success" ]
35.375
0.002294
[ "def stop(self):\n", " \"\"\"Signals the background thread to stop.\n", "\n", " This does not terminate the background thread. It simply queues the\n", " stop signal. If the main process exits before the background thread\n", " processes the stop signal, it will be terminated without finishing\n", " work. The ``grace_period`` parameter will give the background\n", " thread some time to finish processing before this function returns.\n", "\n", " :rtype: bool\n", " :returns: True if the thread terminated. False if the thread is still\n", " running.\n", " \"\"\"\n", " if not self.is_alive:\n", " return True\n", "\n", " with self._lock:\n", " self._queue.put_nowait(_WORKER_TERMINATOR)\n", " self._thread.join(timeout=self._grace_period)\n", "\n", " success = not self.is_alive\n", " self._thread = None\n", "\n", " return success" ]
[ 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.038461538461538464 ]
24
0.002436
def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) if not ascending: result = self.ngroups - 1 - result return result
[ "def", "ngroup", "(", "self", ",", "ascending", "=", "True", ")", ":", "with", "_group_selection_context", "(", "self", ")", ":", "index", "=", "self", ".", "_selected_obj", ".", "index", "result", "=", "Series", "(", "self", ".", "grouper", ".", "group_info", "[", "0", "]", ",", "index", ")", "if", "not", "ascending", ":", "result", "=", "self", ".", "ngroups", "-", "1", "-", "result", "return", "result" ]
23.609375
0.001271
[ "def ngroup(self, ascending=True):\n", " \"\"\"\n", " Number each group from 0 to the number of groups - 1.\n", "\n", " This is the enumerative complement of cumcount. Note that the\n", " numbers given to the groups match the order in which the groups\n", " would be seen when iterating over the groupby object, not the\n", " order they are first observed.\n", "\n", " .. versionadded:: 0.20.2\n", "\n", " Parameters\n", " ----------\n", " ascending : bool, default True\n", " If False, number in reverse, from number of group - 1 to 0.\n", "\n", " See Also\n", " --------\n", " .cumcount : Number the rows in each group.\n", "\n", " Examples\n", " --------\n", "\n", " >>> df = pd.DataFrame({\"A\": list(\"aaabba\")})\n", " >>> df\n", " A\n", " 0 a\n", " 1 a\n", " 2 a\n", " 3 b\n", " 4 b\n", " 5 a\n", " >>> df.groupby('A').ngroup()\n", " 0 0\n", " 1 0\n", " 2 0\n", " 3 1\n", " 4 1\n", " 5 0\n", " dtype: int64\n", " >>> df.groupby('A').ngroup(ascending=False)\n", " 0 1\n", " 1 1\n", " 2 1\n", " 3 0\n", " 4 0\n", " 5 1\n", " dtype: int64\n", " >>> df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()\n", " 0 0\n", " 1 0\n", " 2 1\n", " 3 3\n", " 4 2\n", " 5 0\n", " dtype: int64\n", " \"\"\"\n", "\n", " with _group_selection_context(self):\n", " index = self._selected_obj.index\n", " result = Series(self.grouper.group_info[0], index)\n", " if not ascending:\n", " result = self.ngroups - 1 - result\n", " return result" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04 ]
64
0.001927
def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
[ "def", "get_seqs_type", "(", "seqs", ")", ":", "region_p", "=", "re", ".", "compile", "(", "r'^(.+):(\\d+)-(\\d+)$'", ")", "if", "isinstance", "(", "seqs", ",", "Fasta", ")", ":", "return", "\"fasta\"", "elif", "isinstance", "(", "seqs", ",", "list", ")", ":", "if", "len", "(", "seqs", ")", "==", "0", ":", "raise", "ValueError", "(", "\"empty list of sequences to scan\"", ")", "else", ":", "if", "region_p", ".", "search", "(", "seqs", "[", "0", "]", ")", ":", "return", "\"regions\"", "else", ":", "raise", "ValueError", "(", "\"unknown region type\"", ")", "elif", "isinstance", "(", "seqs", ",", "str", ")", "or", "isinstance", "(", "seqs", ",", "unicode", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "seqs", ")", ":", "ftype", "=", "determine_file_type", "(", "seqs", ")", "if", "ftype", "==", "\"unknown\"", ":", "raise", "ValueError", "(", "\"unknown type\"", ")", "elif", "ftype", "==", "\"narrowpeak\"", ":", "raise", "ValueError", "(", "\"narrowPeak not yet supported in this function\"", ")", "else", ":", "return", "ftype", "+", "\"file\"", "else", ":", "raise", "ValueError", "(", "\"no file found with name {}\"", ".", "format", "(", "seqs", ")", ")", "else", ":", "raise", "ValueError", "(", "\"unknown type {}\"", ".", "format", "(", "type", "(", "seqs", ")", ".", "__name__", ")", ")" ]
33.970588
0.001684
[ "def get_seqs_type(seqs):\n", " \"\"\"\n", " automagically determine input type\n", " the following types are detected:\n", " - Fasta object\n", " - FASTA file\n", " - list of regions\n", " - region file\n", " - BED file\n", " \"\"\"\n", " region_p = re.compile(r'^(.+):(\\d+)-(\\d+)$')\n", " if isinstance(seqs, Fasta):\n", " return \"fasta\"\n", " elif isinstance(seqs, list):\n", " if len(seqs) == 0:\n", " raise ValueError(\"empty list of sequences to scan\")\n", " else:\n", " if region_p.search(seqs[0]):\n", " return \"regions\"\n", " else:\n", " raise ValueError(\"unknown region type\")\n", " elif isinstance(seqs, str) or isinstance(seqs, unicode):\n", " if os.path.isfile(seqs):\n", " ftype = determine_file_type(seqs)\n", " if ftype == \"unknown\":\n", " raise ValueError(\"unknown type\")\n", " elif ftype == \"narrowpeak\":\n", " raise ValueError(\"narrowPeak not yet supported in this function\")\n", " else:\n", " return ftype + \"file\"\n", " else:\n", " raise ValueError(\"no file found with name {}\".format(seqs))\n", " else:\n", " raise ValueError(\"unknown type {}\".format(type(seqs).__name__))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0.014084507042253521 ]
34
0.000773
def _update_simulation_start_cards(self): """ Update GSSHA cards for simulation start """ if self.simulation_start is not None: self._update_card("START_DATE", self.simulation_start.strftime("%Y %m %d")) self._update_card("START_TIME", self.simulation_start.strftime("%H %M"))
[ "def", "_update_simulation_start_cards", "(", "self", ")", ":", "if", "self", ".", "simulation_start", "is", "not", "None", ":", "self", ".", "_update_card", "(", "\"START_DATE\"", ",", "self", ".", "simulation_start", ".", "strftime", "(", "\"%Y %m %d\"", ")", ")", "self", ".", "_update_card", "(", "\"START_TIME\"", ",", "self", ".", "simulation_start", ".", "strftime", "(", "\"%H %M\"", ")", ")" ]
46.571429
0.012048
[ "def _update_simulation_start_cards(self):\n", " \"\"\"\n", " Update GSSHA cards for simulation start\n", " \"\"\"\n", " if self.simulation_start is not None:\n", " self._update_card(\"START_DATE\", self.simulation_start.strftime(\"%Y %m %d\"))\n", " self._update_card(\"START_TIME\", self.simulation_start.strftime(\"%H %M\"))" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0.011363636363636364, 0.023809523809523808 ]
7
0.016929
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: """Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate. """ return pd.concat([ self._validate_input(table, failed_only=failed_only), self._validate_output(table, failed_only=failed_only), ]).fillna(True)
[ "def", "validate", "(", "self", ",", "table", ":", "pd", ".", "DataFrame", ",", "failed_only", "=", "False", ")", "->", "pd", ".", "DataFrame", ":", "return", "pd", ".", "concat", "(", "[", "self", ".", "_validate_input", "(", "table", ",", "failed_only", "=", "failed_only", ")", ",", "self", ".", "_validate_output", "(", "table", ",", "failed_only", "=", "failed_only", ")", ",", "]", ")", ".", "fillna", "(", "True", ")" ]
50.909091
0.008772
[ "def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:\n", " \"\"\"Return a dataframe of validation results for the appropriate series vs the vector of validators.\n", "\n", " Args:\n", " table (pd.DataFrame): A dataframe on which to apply validation logic.\n", " failed_only (bool): If ``True``: return only the indexes that failed to validate.\n", " \"\"\"\n", " return pd.concat([\n", " self._validate_input(table, failed_only=failed_only),\n", " self._validate_output(table, failed_only=failed_only),\n", " ]).fillna(True)" ]
[ 0, 0.018518518518518517, 0, 0, 0.012195121951219513, 0.010638297872340425, 0, 0, 0, 0, 0.043478260869565216 ]
11
0.007712
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TaskQueueStatisticsContext for this TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """ if self._context is None: self._context = TaskQueueStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "TaskQueueStatisticsContext", "(", "self", ".", "_version", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", "task_queue_sid", "=", "self", ".", "_solution", "[", "'task_queue_sid'", "]", ",", ")", "return", "self", ".", "_context" ]
45.333333
0.008646
[ "def _proxy(self):\n", " \"\"\"\n", " Generate an instance context for the instance, the context is capable of\n", " performing various actions. All instance actions are proxied to the context\n", "\n", " :returns: TaskQueueStatisticsContext for this TaskQueueStatisticsInstance\n", " :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext\n", " \"\"\"\n", " if self._context is None:\n", " self._context = TaskQueueStatisticsContext(\n", " self._version,\n", " workspace_sid=self._solution['workspace_sid'],\n", " task_queue_sid=self._solution['task_queue_sid'],\n", " )\n", " return self._context" ]
[ 0, 0.08333333333333333, 0.012345679012345678, 0.011764705882352941, 0, 0.012195121951219513, 0.008928571428571428, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
15
0.010952
def create(cls, name, template='High-Security IPS Template'): """ Create an IPS Policy :param str name: Name of policy :param str template: name of template :raises CreatePolicyFailed: policy failed to create :return: IPSPolicy """ try: if cls.typeof == 'ips_template_policy' and template is None: fw_template = None else: fw_template = IPSTemplatePolicy(template).href except ElementNotFound: raise LoadPolicyFailed( 'Cannot find specified firewall template: {}'.format(template)) json = { 'name': name, 'template': fw_template} try: return ElementCreator(cls, json) except CreateElementFailed as err: raise CreatePolicyFailed(err)
[ "def", "create", "(", "cls", ",", "name", ",", "template", "=", "'High-Security IPS Template'", ")", ":", "try", ":", "if", "cls", ".", "typeof", "==", "'ips_template_policy'", "and", "template", "is", "None", ":", "fw_template", "=", "None", "else", ":", "fw_template", "=", "IPSTemplatePolicy", "(", "template", ")", ".", "href", "except", "ElementNotFound", ":", "raise", "LoadPolicyFailed", "(", "'Cannot find specified firewall template: {}'", ".", "format", "(", "template", ")", ")", "json", "=", "{", "'name'", ":", "name", ",", "'template'", ":", "fw_template", "}", "try", ":", "return", "ElementCreator", "(", "cls", ",", "json", ")", "except", "CreateElementFailed", "as", "err", ":", "raise", "CreatePolicyFailed", "(", "err", ")" ]
34.916667
0.002323
[ "def create(cls, name, template='High-Security IPS Template'):\n", " \"\"\"\n", " Create an IPS Policy\n", "\n", " :param str name: Name of policy\n", " :param str template: name of template\n", " :raises CreatePolicyFailed: policy failed to create\n", " :return: IPSPolicy\n", " \"\"\"\n", " try:\n", " if cls.typeof == 'ips_template_policy' and template is None:\n", " fw_template = None\n", " else:\n", " fw_template = IPSTemplatePolicy(template).href\n", " except ElementNotFound:\n", " raise LoadPolicyFailed(\n", " 'Cannot find specified firewall template: {}'.format(template))\n", " json = {\n", " 'name': name,\n", " 'template': fw_template}\n", " try:\n", " return ElementCreator(cls, json)\n", " except CreateElementFailed as err:\n", " raise CreatePolicyFailed(err)" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.024390243902439025 ]
24
0.004488
def add_pk_if_required(db, table, name): """Return a class deriving from our Model class as well as the SQLAlchemy model. :param `sqlalchemy.schema.Table` table: table to create primary key for :param table: table to create primary key for """ db.metadata.reflect(bind=db.engine) cls_dict = {'__tablename__': name} if not table.primary_key: for column in table.columns: column.primary_key = True Table(name, db.metadata, *table.columns, extend_existing=True) cls_dict['__table__'] = table db.metadata.create_all(bind=db.engine) return type(str(name), (sandman_model, db.Model), cls_dict)
[ "def", "add_pk_if_required", "(", "db", ",", "table", ",", "name", ")", ":", "db", ".", "metadata", ".", "reflect", "(", "bind", "=", "db", ".", "engine", ")", "cls_dict", "=", "{", "'__tablename__'", ":", "name", "}", "if", "not", "table", ".", "primary_key", ":", "for", "column", "in", "table", ".", "columns", ":", "column", ".", "primary_key", "=", "True", "Table", "(", "name", ",", "db", ".", "metadata", ",", "*", "table", ".", "columns", ",", "extend_existing", "=", "True", ")", "cls_dict", "[", "'__table__'", "]", "=", "table", "db", ".", "metadata", ".", "create_all", "(", "bind", "=", "db", ".", "engine", ")", "return", "type", "(", "str", "(", "name", ")", ",", "(", "sandman_model", ",", "db", ".", "Model", ")", ",", "cls_dict", ")" ]
36.333333
0.00149
[ "def add_pk_if_required(db, table, name):\n", " \"\"\"Return a class deriving from our Model class as well as the SQLAlchemy\n", " model.\n", "\n", " :param `sqlalchemy.schema.Table` table: table to create primary key for\n", " :param table: table to create primary key for\n", "\n", " \"\"\"\n", " db.metadata.reflect(bind=db.engine)\n", " cls_dict = {'__tablename__': name}\n", " if not table.primary_key:\n", " for column in table.columns:\n", " column.primary_key = True\n", " Table(name, db.metadata, *table.columns, extend_existing=True)\n", " cls_dict['__table__'] = table\n", " db.metadata.create_all(bind=db.engine)\n", "\n", " return type(str(name), (sandman_model, db.Model), cls_dict)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015873015873015872 ]
18
0.000882
def covar_errors(params, data, errs, B, C=None): """ Take a set of parameters that were fit with lmfit, and replace the errors with the 1\sigma errors calculated using the covariance matrix. Parameters ---------- params : lmfit.Parameters Model data : 2d-array Image data errs : 2d-array ? Image noise. B : 2d-array B matrix. C : 2d-array C matrix. Optional. If supplied then Bmatrix will not be used. Returns ------- params : lmfit.Parameters Modified model. """ mask = np.where(np.isfinite(data)) # calculate the proper parameter errors and copy them across. if C is not None: try: J = lmfit_jacobian(params, mask[0], mask[1], errs=errs) covar = np.transpose(J).dot(inv(C)).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: C = None if C is None: try: J = lmfit_jacobian(params, mask[0], mask[1], B=B, errs=errs) covar = np.transpose(J).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: onesigma = [-2] * len(mask[0]) for i in range(params['components'].value): prefix = "c{0}_".format(i) j = 0 for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: if params[prefix + p].vary: params[prefix + p].stderr = onesigma[j] j += 1 return params
[ "def", "covar_errors", "(", "params", ",", "data", ",", "errs", ",", "B", ",", "C", "=", "None", ")", ":", "mask", "=", "np", ".", "where", "(", "np", ".", "isfinite", "(", "data", ")", ")", "# calculate the proper parameter errors and copy them across.", "if", "C", "is", "not", "None", ":", "try", ":", "J", "=", "lmfit_jacobian", "(", "params", ",", "mask", "[", "0", "]", ",", "mask", "[", "1", "]", ",", "errs", "=", "errs", ")", "covar", "=", "np", ".", "transpose", "(", "J", ")", ".", "dot", "(", "inv", "(", "C", ")", ")", ".", "dot", "(", "J", ")", "onesigma", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "inv", "(", "covar", ")", ")", ")", "except", "(", "np", ".", "linalg", ".", "linalg", ".", "LinAlgError", ",", "ValueError", ")", "as", "_", ":", "C", "=", "None", "if", "C", "is", "None", ":", "try", ":", "J", "=", "lmfit_jacobian", "(", "params", ",", "mask", "[", "0", "]", ",", "mask", "[", "1", "]", ",", "B", "=", "B", ",", "errs", "=", "errs", ")", "covar", "=", "np", ".", "transpose", "(", "J", ")", ".", "dot", "(", "J", ")", "onesigma", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "inv", "(", "covar", ")", ")", ")", "except", "(", "np", ".", "linalg", ".", "linalg", ".", "LinAlgError", ",", "ValueError", ")", "as", "_", ":", "onesigma", "=", "[", "-", "2", "]", "*", "len", "(", "mask", "[", "0", "]", ")", "for", "i", "in", "range", "(", "params", "[", "'components'", "]", ".", "value", ")", ":", "prefix", "=", "\"c{0}_\"", ".", "format", "(", "i", ")", "j", "=", "0", "for", "p", "in", "[", "'amp'", ",", "'xo'", ",", "'yo'", ",", "'sx'", ",", "'sy'", ",", "'theta'", "]", ":", "if", "params", "[", "prefix", "+", "p", "]", ".", "vary", ":", "params", "[", "prefix", "+", "p", "]", ".", "stderr", "=", "onesigma", "[", "j", "]", "j", "+=", "1", "return", "params" ]
26.701754
0.001267
[ "def covar_errors(params, data, errs, B, C=None):\n", " \"\"\"\n", " Take a set of parameters that were fit with lmfit, and replace the errors\n", " with the 1\\sigma errors calculated using the covariance matrix.\n", "\n", "\n", " Parameters\n", " ----------\n", " params : lmfit.Parameters\n", " Model\n", "\n", " data : 2d-array\n", " Image data\n", "\n", " errs : 2d-array ?\n", " Image noise.\n", "\n", " B : 2d-array\n", " B matrix.\n", "\n", " C : 2d-array\n", " C matrix. Optional. If supplied then Bmatrix will not be used.\n", "\n", " Returns\n", " -------\n", " params : lmfit.Parameters\n", " Modified model.\n", " \"\"\"\n", "\n", " mask = np.where(np.isfinite(data))\n", "\n", " # calculate the proper parameter errors and copy them across.\n", " if C is not None:\n", " try:\n", " J = lmfit_jacobian(params, mask[0], mask[1], errs=errs)\n", " covar = np.transpose(J).dot(inv(C)).dot(J)\n", " onesigma = np.sqrt(np.diag(inv(covar)))\n", " except (np.linalg.linalg.LinAlgError, ValueError) as _:\n", " C = None\n", "\n", " if C is None:\n", " try:\n", " J = lmfit_jacobian(params, mask[0], mask[1], B=B, errs=errs)\n", " covar = np.transpose(J).dot(J)\n", " onesigma = np.sqrt(np.diag(inv(covar)))\n", " except (np.linalg.linalg.LinAlgError, ValueError) as _:\n", " onesigma = [-2] * len(mask[0])\n", "\n", " for i in range(params['components'].value):\n", " prefix = \"c{0}_\".format(i)\n", " j = 0\n", " for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']:\n", " if params[prefix + p].vary:\n", " params[prefix + p].stderr = onesigma[j]\n", " j += 1\n", "\n", " return params" ]
[ 0, 0, 0, 0.014705882352941176, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
57
0.00129
def returnArrayFilters(arr1, arr2, limitsArr1, limitsArr2): """#TODO: docstring :param arr1: #TODO: docstring :param arr2: #TODO: docstring :param limitsArr1: #TODO: docstring :param limitsArr2: #TODO: docstring :returns: #TODO: docstring """ posL = bisect.bisect_left(arr1, limitsArr1[0]) posR = bisect.bisect_right(arr1, limitsArr1[1]) matchMask = ((arr2[posL:posR] <= limitsArr2[1]) & (arr2[posL:posR] >= limitsArr2[0]) ) return posL, posR, matchMask
[ "def", "returnArrayFilters", "(", "arr1", ",", "arr2", ",", "limitsArr1", ",", "limitsArr2", ")", ":", "posL", "=", "bisect", ".", "bisect_left", "(", "arr1", ",", "limitsArr1", "[", "0", "]", ")", "posR", "=", "bisect", ".", "bisect_right", "(", "arr1", ",", "limitsArr1", "[", "1", "]", ")", "matchMask", "=", "(", "(", "arr2", "[", "posL", ":", "posR", "]", "<=", "limitsArr2", "[", "1", "]", ")", "&", "(", "arr2", "[", "posL", ":", "posR", "]", ">=", "limitsArr2", "[", "0", "]", ")", ")", "return", "posL", ",", "posR", ",", "matchMask" ]
32.375
0.001876
[ "def returnArrayFilters(arr1, arr2, limitsArr1, limitsArr2):\n", " \"\"\"#TODO: docstring\n", "\n", " :param arr1: #TODO: docstring\n", " :param arr2: #TODO: docstring\n", " :param limitsArr1: #TODO: docstring\n", " :param limitsArr2: #TODO: docstring\n", "\n", " :returns: #TODO: docstring\n", " \"\"\"\n", " posL = bisect.bisect_left(arr1, limitsArr1[0])\n", " posR = bisect.bisect_right(arr1, limitsArr1[1])\n", " matchMask = ((arr2[posL:posR] <= limitsArr2[1]) &\n", " (arr2[posL:posR] >= limitsArr2[0])\n", " )\n", " return posL, posR, matchMask" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03125 ]
16
0.001953
def apply_enhancement(data, func, exclude=None, separate=False, pass_dask=False): """Apply `func` to the provided data. Args: data (xarray.DataArray): Data to be modified inplace. func (callable): Function to be applied to an xarray exclude (iterable): Bands in the 'bands' dimension to not include in the calculations. separate (bool): Apply `func` one band at a time. Default is False. pass_dask (bool): Pass the underlying dask array instead of the xarray.DataArray. """ attrs = data.attrs bands = data.coords['bands'].values if exclude is None: exclude = ['A'] if 'A' in bands else [] if separate: data_arrs = [] for idx, band_name in enumerate(bands): band_data = data.sel(bands=[band_name]) if band_name in exclude: # don't modify alpha data_arrs.append(band_data) continue if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data, index=idx) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data, index=idx) data_arrs.append(band_data) # we assume that the func can add attrs attrs.update(band_data.attrs) data.data = xr.concat(data_arrs, dim='bands').data data.attrs = attrs return data else: band_data = data.sel(bands=[b for b in bands if b not in exclude]) if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data) attrs.update(band_data.attrs) # combine the new data with the excluded data new_data = xr.concat([band_data, data.sel(bands=exclude)], dim='bands') data.data = new_data.sel(bands=bands).data data.attrs = attrs return data
[ "def", "apply_enhancement", "(", "data", ",", "func", ",", "exclude", "=", "None", ",", "separate", "=", "False", ",", "pass_dask", "=", "False", ")", ":", "attrs", "=", "data", ".", "attrs", "bands", "=", "data", ".", "coords", "[", "'bands'", "]", ".", "values", "if", "exclude", "is", "None", ":", "exclude", "=", "[", "'A'", "]", "if", "'A'", "in", "bands", "else", "[", "]", "if", "separate", ":", "data_arrs", "=", "[", "]", "for", "idx", ",", "band_name", "in", "enumerate", "(", "bands", ")", ":", "band_data", "=", "data", ".", "sel", "(", "bands", "=", "[", "band_name", "]", ")", "if", "band_name", "in", "exclude", ":", "# don't modify alpha", "data_arrs", ".", "append", "(", "band_data", ")", "continue", "if", "pass_dask", ":", "dims", "=", "band_data", ".", "dims", "coords", "=", "band_data", ".", "coords", "d_arr", "=", "func", "(", "band_data", ".", "data", ",", "index", "=", "idx", ")", "band_data", "=", "xr", ".", "DataArray", "(", "d_arr", ",", "dims", "=", "dims", ",", "coords", "=", "coords", ")", "else", ":", "band_data", "=", "func", "(", "band_data", ",", "index", "=", "idx", ")", "data_arrs", ".", "append", "(", "band_data", ")", "# we assume that the func can add attrs", "attrs", ".", "update", "(", "band_data", ".", "attrs", ")", "data", ".", "data", "=", "xr", ".", "concat", "(", "data_arrs", ",", "dim", "=", "'bands'", ")", ".", "data", "data", ".", "attrs", "=", "attrs", "return", "data", "else", ":", "band_data", "=", "data", ".", "sel", "(", "bands", "=", "[", "b", "for", "b", "in", "bands", "if", "b", "not", "in", "exclude", "]", ")", "if", "pass_dask", ":", "dims", "=", "band_data", ".", "dims", "coords", "=", "band_data", ".", "coords", "d_arr", "=", "func", "(", "band_data", ".", "data", ")", "band_data", "=", "xr", ".", "DataArray", "(", "d_arr", ",", "dims", "=", "dims", ",", "coords", "=", "coords", ")", "else", ":", "band_data", "=", "func", "(", "band_data", ")", "attrs", ".", "update", "(", "band_data", ".", "attrs", ")", "# combine the new data with the excluded data", "new_data", "=", "xr", ".", "concat", "(", "[", "band_data", ",", "data", ".", "sel", "(", "bands", "=", "exclude", ")", "]", ",", "dim", "=", "'bands'", ")", "data", ".", "data", "=", "new_data", ".", "sel", "(", "bands", "=", "bands", ")", ".", "data", "data", ".", "attrs", "=", "attrs", "return", "data" ]
35.901639
0.000444
[ "def apply_enhancement(data, func, exclude=None, separate=False,\n", " pass_dask=False):\n", " \"\"\"Apply `func` to the provided data.\n", "\n", " Args:\n", " data (xarray.DataArray): Data to be modified inplace.\n", " func (callable): Function to be applied to an xarray\n", " exclude (iterable): Bands in the 'bands' dimension to not include\n", " in the calculations.\n", " separate (bool): Apply `func` one band at a time. Default is False.\n", " pass_dask (bool): Pass the underlying dask array instead of the\n", " xarray.DataArray.\n", "\n", " \"\"\"\n", " attrs = data.attrs\n", " bands = data.coords['bands'].values\n", " if exclude is None:\n", " exclude = ['A'] if 'A' in bands else []\n", "\n", " if separate:\n", " data_arrs = []\n", " for idx, band_name in enumerate(bands):\n", " band_data = data.sel(bands=[band_name])\n", " if band_name in exclude:\n", " # don't modify alpha\n", " data_arrs.append(band_data)\n", " continue\n", "\n", " if pass_dask:\n", " dims = band_data.dims\n", " coords = band_data.coords\n", " d_arr = func(band_data.data, index=idx)\n", " band_data = xr.DataArray(d_arr, dims=dims, coords=coords)\n", " else:\n", " band_data = func(band_data, index=idx)\n", " data_arrs.append(band_data)\n", " # we assume that the func can add attrs\n", " attrs.update(band_data.attrs)\n", "\n", " data.data = xr.concat(data_arrs, dim='bands').data\n", " data.attrs = attrs\n", " return data\n", " else:\n", " band_data = data.sel(bands=[b for b in bands\n", " if b not in exclude])\n", " if pass_dask:\n", " dims = band_data.dims\n", " coords = band_data.coords\n", " d_arr = func(band_data.data)\n", " band_data = xr.DataArray(d_arr, dims=dims, coords=coords)\n", " else:\n", " band_data = func(band_data)\n", "\n", " attrs.update(band_data.attrs)\n", " # combine the new data with the excluded data\n", " new_data = xr.concat([band_data, data.sel(bands=exclude)],\n", " dim='bands')\n", " data.data = new_data.sel(bands=bands).data\n", " data.attrs = attrs\n", "\n", " return data" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.06666666666666667 ]
61
0.001093
def delta(t1, t2, words=True, justnow=datetime.timedelta(seconds=10)): ''' Calculates the estimated delta between two time objects in human-readable format. Used internally by the :func:`day` and :func:`duration` functions. :param t1: timestamp, :class:`datetime.date` or :class:`datetime.datetime` object :param t2: timestamp, :class:`datetime.date` or :class:`datetime.datetime` object :param words: default ``True``, allow words like "yesterday", "tomorrow" and "just now" :param justnow: default ``datetime.timedelta(seconds=10)``, :class:`datetime.timedelta` object representing tolerance for considering a delta as meaning 'just now' >>> (x,y) = delta(_to_datetime('2012-06-13T15:24:17'), \ _to_datetime('2013-12-11T12:34:56')) >>> print(x) 77 weeks >>> int(y) -594639 ''' t1 = _to_datetime(t1) t2 = _to_datetime(t2) diff = t1 - t2 date_diff = t1.date() - t2.date() # The datetime module includes milliseconds with float precision. Floats # will give unexpected results here, so we round the value here total = math.ceil(_total_seconds(diff)) total_abs = abs(total) if total_abs < TIME_DAY: if abs(diff) < justnow and words: return ( _('just now'), 0, ) elif total_abs < TIME_MINUTE: seconds = total_abs return ( _multi( _('%d second'), _('%d seconds'), seconds ) % (seconds,), 0, ) elif total_abs < TIME_MINUTE * 2 and words: return ( _('a minute'), 0, ) elif total_abs < TIME_HOUR: minutes, seconds = divmod(total_abs, TIME_MINUTE) if total < 0: seconds *= -1 return ( _multi( _('%d minute'), _('%d minutes'), minutes ) % (minutes,), seconds, ) elif total_abs < TIME_HOUR * 2 and words: return ( _('an hour'), 0, ) else: hours, seconds = divmod(total_abs, TIME_HOUR) if total < 0: seconds *= -1 return ( _multi( _('%d hour'), _('%d hours'), hours ) % (hours,), seconds, ) elif date_diff.days == 1 and words: return (_('tomorrow'), 0) elif date_diff.days == -1 and words: return (_('yesterday'), 0) elif total_abs < TIME_WEEK: days, seconds = divmod(total_abs, TIME_DAY) if total < 0: seconds *= -1 return ( _multi( _('%d day'), _('%d days'), days ) % (days,), seconds, ) elif abs(diff.days) == TIME_WEEK and words: if total > 0: return (_('next week'), diff.seconds) else: return (_('last week'), diff.seconds) # FIXME # # The biggest reliable unit we can supply to the user is a week (for now?), # because we can not safely determine the amount of days in the covered # month/year span. else: weeks, seconds = divmod(total_abs, TIME_WEEK) if total < 0: seconds *= -1 return ( _multi( _('%d week'), _('%d weeks'), weeks ) % (weeks,), seconds, )
[ "def", "delta", "(", "t1", ",", "t2", ",", "words", "=", "True", ",", "justnow", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "10", ")", ")", ":", "t1", "=", "_to_datetime", "(", "t1", ")", "t2", "=", "_to_datetime", "(", "t2", ")", "diff", "=", "t1", "-", "t2", "date_diff", "=", "t1", ".", "date", "(", ")", "-", "t2", ".", "date", "(", ")", "# The datetime module includes milliseconds with float precision. Floats", "# will give unexpected results here, so we round the value here", "total", "=", "math", ".", "ceil", "(", "_total_seconds", "(", "diff", ")", ")", "total_abs", "=", "abs", "(", "total", ")", "if", "total_abs", "<", "TIME_DAY", ":", "if", "abs", "(", "diff", ")", "<", "justnow", "and", "words", ":", "return", "(", "_", "(", "'just now'", ")", ",", "0", ",", ")", "elif", "total_abs", "<", "TIME_MINUTE", ":", "seconds", "=", "total_abs", "return", "(", "_multi", "(", "_", "(", "'%d second'", ")", ",", "_", "(", "'%d seconds'", ")", ",", "seconds", ")", "%", "(", "seconds", ",", ")", ",", "0", ",", ")", "elif", "total_abs", "<", "TIME_MINUTE", "*", "2", "and", "words", ":", "return", "(", "_", "(", "'a minute'", ")", ",", "0", ",", ")", "elif", "total_abs", "<", "TIME_HOUR", ":", "minutes", ",", "seconds", "=", "divmod", "(", "total_abs", ",", "TIME_MINUTE", ")", "if", "total", "<", "0", ":", "seconds", "*=", "-", "1", "return", "(", "_multi", "(", "_", "(", "'%d minute'", ")", ",", "_", "(", "'%d minutes'", ")", ",", "minutes", ")", "%", "(", "minutes", ",", ")", ",", "seconds", ",", ")", "elif", "total_abs", "<", "TIME_HOUR", "*", "2", "and", "words", ":", "return", "(", "_", "(", "'an hour'", ")", ",", "0", ",", ")", "else", ":", "hours", ",", "seconds", "=", "divmod", "(", "total_abs", ",", "TIME_HOUR", ")", "if", "total", "<", "0", ":", "seconds", "*=", "-", "1", "return", "(", "_multi", "(", "_", "(", "'%d hour'", ")", ",", "_", "(", "'%d hours'", ")", ",", "hours", ")", "%", "(", "hours", ",", ")", ",", "seconds", ",", ")", "elif", "date_diff", ".", "days", "==", "1", "and", "words", ":", "return", "(", "_", "(", "'tomorrow'", ")", ",", "0", ")", "elif", "date_diff", ".", "days", "==", "-", "1", "and", "words", ":", "return", "(", "_", "(", "'yesterday'", ")", ",", "0", ")", "elif", "total_abs", "<", "TIME_WEEK", ":", "days", ",", "seconds", "=", "divmod", "(", "total_abs", ",", "TIME_DAY", ")", "if", "total", "<", "0", ":", "seconds", "*=", "-", "1", "return", "(", "_multi", "(", "_", "(", "'%d day'", ")", ",", "_", "(", "'%d days'", ")", ",", "days", ")", "%", "(", "days", ",", ")", ",", "seconds", ",", ")", "elif", "abs", "(", "diff", ".", "days", ")", "==", "TIME_WEEK", "and", "words", ":", "if", "total", ">", "0", ":", "return", "(", "_", "(", "'next week'", ")", ",", "diff", ".", "seconds", ")", "else", ":", "return", "(", "_", "(", "'last week'", ")", ",", "diff", ".", "seconds", ")", "# FIXME", "#", "# The biggest reliable unit we can supply to the user is a week (for now?),", "# because we can not safely determine the amount of days in the covered", "# month/year span.", "else", ":", "weeks", ",", "seconds", "=", "divmod", "(", "total_abs", ",", "TIME_WEEK", ")", "if", "total", "<", "0", ":", "seconds", "*=", "-", "1", "return", "(", "_multi", "(", "_", "(", "'%d week'", ")", ",", "_", "(", "'%d weeks'", ")", ",", "weeks", ")", "%", "(", "weeks", ",", ")", ",", "seconds", ",", ")" ]
27.575758
0.000265
[ "def delta(t1, t2, words=True, justnow=datetime.timedelta(seconds=10)):\n", " '''\n", " Calculates the estimated delta between two time objects in human-readable\n", " format. Used internally by the :func:`day` and :func:`duration` functions.\n", "\n", " :param t1: timestamp, :class:`datetime.date` or :class:`datetime.datetime`\n", " object\n", " :param t2: timestamp, :class:`datetime.date` or :class:`datetime.datetime`\n", " object\n", " :param words: default ``True``, allow words like \"yesterday\", \"tomorrow\"\n", " and \"just now\"\n", " :param justnow: default ``datetime.timedelta(seconds=10)``,\n", " :class:`datetime.timedelta` object representing tolerance for\n", " considering a delta as meaning 'just now'\n", "\n", " >>> (x,y) = delta(_to_datetime('2012-06-13T15:24:17'), \\\n", " _to_datetime('2013-12-11T12:34:56'))\n", " >>> print(x)\n", " 77 weeks\n", " >>> int(y)\n", " -594639\n", " '''\n", "\n", " t1 = _to_datetime(t1)\n", " t2 = _to_datetime(t2)\n", " diff = t1 - t2\n", " date_diff = t1.date() - t2.date()\n", "\n", " # The datetime module includes milliseconds with float precision. Floats\n", " # will give unexpected results here, so we round the value here\n", " total = math.ceil(_total_seconds(diff))\n", " total_abs = abs(total)\n", "\n", " if total_abs < TIME_DAY:\n", " if abs(diff) < justnow and words:\n", " return (\n", " _('just now'),\n", " 0,\n", " )\n", "\n", " elif total_abs < TIME_MINUTE:\n", " seconds = total_abs\n", " return (\n", " _multi(\n", " _('%d second'),\n", " _('%d seconds'),\n", " seconds\n", " ) % (seconds,),\n", " 0,\n", " )\n", " elif total_abs < TIME_MINUTE * 2 and words:\n", " return (\n", " _('a minute'),\n", " 0,\n", " )\n", "\n", " elif total_abs < TIME_HOUR:\n", " minutes, seconds = divmod(total_abs, TIME_MINUTE)\n", " if total < 0:\n", " seconds *= -1\n", " return (\n", " _multi(\n", " _('%d minute'),\n", " _('%d minutes'),\n", " minutes\n", " ) % (minutes,),\n", " seconds,\n", " )\n", "\n", " elif total_abs < TIME_HOUR * 2 and words:\n", " return (\n", " _('an hour'),\n", " 0,\n", " )\n", "\n", " else:\n", " hours, seconds = divmod(total_abs, TIME_HOUR)\n", " if total < 0:\n", " seconds *= -1\n", "\n", " return (\n", " _multi(\n", " _('%d hour'),\n", " _('%d hours'),\n", " hours\n", " ) % (hours,),\n", " seconds,\n", " )\n", "\n", " elif date_diff.days == 1 and words:\n", " return (_('tomorrow'), 0)\n", "\n", " elif date_diff.days == -1 and words:\n", " return (_('yesterday'), 0)\n", "\n", " elif total_abs < TIME_WEEK:\n", " days, seconds = divmod(total_abs, TIME_DAY)\n", " if total < 0:\n", " seconds *= -1\n", " return (\n", " _multi(\n", " _('%d day'),\n", " _('%d days'),\n", " days\n", " ) % (days,),\n", " seconds,\n", " )\n", "\n", " elif abs(diff.days) == TIME_WEEK and words:\n", " if total > 0:\n", " return (_('next week'), diff.seconds)\n", " else:\n", " return (_('last week'), diff.seconds)\n", "\n", "# FIXME\n", "#\n", "# The biggest reliable unit we can supply to the user is a week (for now?),\n", "# because we can not safely determine the amount of days in the covered\n", "# month/year span.\n", "\n", " else:\n", " weeks, seconds = divmod(total_abs, TIME_WEEK)\n", " if total < 0:\n", " seconds *= -1\n", " return (\n", " _multi(\n", " _('%d week'),\n", " _('%d weeks'),\n", " weeks\n", " ) % (weeks,),\n", " seconds,\n", " )" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1111111111111111 ]
132
0.000842
def concatenate_textlcs(lclist, sortby='rjd', normalize=True): '''This concatenates a list of light curves. Does not care about overlaps or duplicates. The light curves must all be from the same aperture. The intended use is to concatenate light curves across CCDs or instrument changes for a single object. These can then be normalized later using standard astrobase tools to search for variablity and/or periodicity. sortby is a column to sort the final concatenated light curve by in ascending order. If normalize is True, then each light curve's magnitude columns are normalized to zero. The returned lcdict has an extra column: 'lcn' that tracks which measurement belongs to which input light curve. This can be used with lcdict['concatenated'] which relates input light curve index to input light curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that contains the total number of concatenated light curves. ''' # read the first light curve lcdict = read_hatpi_textlc(lclist[0]) # track which LC goes where # initial LC lccounter = 0 lcdict['concatenated'] = {lccounter: os.path.abspath(lclist[0])} lcdict['lcn'] = np.full_like(lcdict['rjd'], lccounter) # normalize if needed if normalize: for col in MAGCOLS: if col in lcdict: thismedval = np.nanmedian(lcdict[col]) # handle fluxes if col in ('ifl1','ifl2','ifl3'): lcdict[col] = lcdict[col] / thismedval # handle mags else: lcdict[col] = lcdict[col] - thismedval # now read the rest for lcf in lclist[1:]: thislcd = read_hatpi_textlc(lcf) # if the columns don't agree, skip this LC if thislcd['columns'] != lcdict['columns']: LOGERROR('file %s does not have the ' 'same columns as first file %s, skipping...' % (lcf, lclist[0])) continue # otherwise, go ahead and start concatenatin' else: LOGINFO('adding %s (ndet: %s) to %s (ndet: %s)' % (lcf, thislcd['objectinfo']['ndet'], lclist[0], lcdict[lcdict['columns'][0]].size)) # update LC tracking lccounter = lccounter + 1 lcdict['concatenated'][lccounter] = os.path.abspath(lcf) lcdict['lcn'] = np.concatenate(( lcdict['lcn'], np.full_like(thislcd['rjd'],lccounter) )) # concatenate the columns for col in lcdict['columns']: # handle normalization for magnitude columns if normalize and col in MAGCOLS: thismedval = np.nanmedian(thislcd[col]) # handle fluxes if col in ('ifl1','ifl2','ifl3'): thislcd[col] = thislcd[col] / thismedval # handle mags else: thislcd[col] = thislcd[col] - thismedval # concatenate the values lcdict[col] = np.concatenate((lcdict[col], thislcd[col])) # # now we're all done concatenatin' # # make sure to add up the ndet lcdict['objectinfo']['ndet'] = lcdict[lcdict['columns'][0]].size # update the stations lcdict['objectinfo']['stations'] = [ 'HP%s' % x for x in np.unique(lcdict['stf']).tolist() ] # update the total LC count lcdict['nconcatenated'] = lccounter + 1 # if we're supposed to sort by a column, do so if sortby and sortby in [x[0] for x in COLDEFS]: LOGINFO('sorting concatenated light curve by %s...' % sortby) sortind = np.argsort(lcdict[sortby]) # sort all the measurement columns by this index for col in lcdict['columns']: lcdict[col] = lcdict[col][sortind] # make sure to sort the lcn index as well lcdict['lcn'] = lcdict['lcn'][sortind] LOGINFO('done. concatenated light curve has %s detections' % lcdict['objectinfo']['ndet']) return lcdict
[ "def", "concatenate_textlcs", "(", "lclist", ",", "sortby", "=", "'rjd'", ",", "normalize", "=", "True", ")", ":", "# read the first light curve", "lcdict", "=", "read_hatpi_textlc", "(", "lclist", "[", "0", "]", ")", "# track which LC goes where", "# initial LC", "lccounter", "=", "0", "lcdict", "[", "'concatenated'", "]", "=", "{", "lccounter", ":", "os", ".", "path", ".", "abspath", "(", "lclist", "[", "0", "]", ")", "}", "lcdict", "[", "'lcn'", "]", "=", "np", ".", "full_like", "(", "lcdict", "[", "'rjd'", "]", ",", "lccounter", ")", "# normalize if needed", "if", "normalize", ":", "for", "col", "in", "MAGCOLS", ":", "if", "col", "in", "lcdict", ":", "thismedval", "=", "np", ".", "nanmedian", "(", "lcdict", "[", "col", "]", ")", "# handle fluxes", "if", "col", "in", "(", "'ifl1'", ",", "'ifl2'", ",", "'ifl3'", ")", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "/", "thismedval", "# handle mags", "else", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "-", "thismedval", "# now read the rest", "for", "lcf", "in", "lclist", "[", "1", ":", "]", ":", "thislcd", "=", "read_hatpi_textlc", "(", "lcf", ")", "# if the columns don't agree, skip this LC", "if", "thislcd", "[", "'columns'", "]", "!=", "lcdict", "[", "'columns'", "]", ":", "LOGERROR", "(", "'file %s does not have the '", "'same columns as first file %s, skipping...'", "%", "(", "lcf", ",", "lclist", "[", "0", "]", ")", ")", "continue", "# otherwise, go ahead and start concatenatin'", "else", ":", "LOGINFO", "(", "'adding %s (ndet: %s) to %s (ndet: %s)'", "%", "(", "lcf", ",", "thislcd", "[", "'objectinfo'", "]", "[", "'ndet'", "]", ",", "lclist", "[", "0", "]", ",", "lcdict", "[", "lcdict", "[", "'columns'", "]", "[", "0", "]", "]", ".", "size", ")", ")", "# update LC tracking", "lccounter", "=", "lccounter", "+", "1", "lcdict", "[", "'concatenated'", "]", "[", "lccounter", "]", "=", "os", ".", "path", ".", "abspath", "(", "lcf", ")", "lcdict", "[", "'lcn'", "]", "=", "np", ".", "concatenate", "(", "(", "lcdict", "[", "'lcn'", "]", ",", "np", ".", "full_like", "(", "thislcd", "[", "'rjd'", "]", ",", "lccounter", ")", ")", ")", "# concatenate the columns", "for", "col", "in", "lcdict", "[", "'columns'", "]", ":", "# handle normalization for magnitude columns", "if", "normalize", "and", "col", "in", "MAGCOLS", ":", "thismedval", "=", "np", ".", "nanmedian", "(", "thislcd", "[", "col", "]", ")", "# handle fluxes", "if", "col", "in", "(", "'ifl1'", ",", "'ifl2'", ",", "'ifl3'", ")", ":", "thislcd", "[", "col", "]", "=", "thislcd", "[", "col", "]", "/", "thismedval", "# handle mags", "else", ":", "thislcd", "[", "col", "]", "=", "thislcd", "[", "col", "]", "-", "thismedval", "# concatenate the values", "lcdict", "[", "col", "]", "=", "np", ".", "concatenate", "(", "(", "lcdict", "[", "col", "]", ",", "thislcd", "[", "col", "]", ")", ")", "#", "# now we're all done concatenatin'", "#", "# make sure to add up the ndet", "lcdict", "[", "'objectinfo'", "]", "[", "'ndet'", "]", "=", "lcdict", "[", "lcdict", "[", "'columns'", "]", "[", "0", "]", "]", ".", "size", "# update the stations", "lcdict", "[", "'objectinfo'", "]", "[", "'stations'", "]", "=", "[", "'HP%s'", "%", "x", "for", "x", "in", "np", ".", "unique", "(", "lcdict", "[", "'stf'", "]", ")", ".", "tolist", "(", ")", "]", "# update the total LC count", "lcdict", "[", "'nconcatenated'", "]", "=", "lccounter", "+", "1", "# if we're supposed to sort by a column, do so", "if", "sortby", "and", "sortby", "in", "[", "x", "[", "0", "]", "for", "x", "in", "COLDEFS", "]", ":", "LOGINFO", "(", "'sorting concatenated light curve by %s...'", "%", "sortby", ")", "sortind", "=", "np", ".", "argsort", "(", "lcdict", "[", "sortby", "]", ")", "# sort all the measurement columns by this index", "for", "col", "in", "lcdict", "[", "'columns'", "]", ":", "lcdict", "[", "col", "]", "=", "lcdict", "[", "col", "]", "[", "sortind", "]", "# make sure to sort the lcn index as well", "lcdict", "[", "'lcn'", "]", "=", "lcdict", "[", "'lcn'", "]", "[", "sortind", "]", "LOGINFO", "(", "'done. concatenated light curve has %s detections'", "%", "lcdict", "[", "'objectinfo'", "]", "[", "'ndet'", "]", ")", "return", "lcdict" ]
32.734375
0.001621
[ "def concatenate_textlcs(lclist,\n", " sortby='rjd',\n", " normalize=True):\n", " '''This concatenates a list of light curves.\n", "\n", " Does not care about overlaps or duplicates. The light curves must all be\n", " from the same aperture.\n", "\n", " The intended use is to concatenate light curves across CCDs or instrument\n", " changes for a single object. These can then be normalized later using\n", " standard astrobase tools to search for variablity and/or periodicity.\n", "\n", " sortby is a column to sort the final concatenated light curve by in\n", " ascending order.\n", "\n", " If normalize is True, then each light curve's magnitude columns are\n", " normalized to zero.\n", "\n", " The returned lcdict has an extra column: 'lcn' that tracks which measurement\n", " belongs to which input light curve. This can be used with\n", " lcdict['concatenated'] which relates input light curve index to input light\n", " curve filepath. Finally, there is an 'nconcatenated' key in the lcdict that\n", " contains the total number of concatenated light curves.\n", "\n", " '''\n", "\n", " # read the first light curve\n", " lcdict = read_hatpi_textlc(lclist[0])\n", "\n", " # track which LC goes where\n", " # initial LC\n", " lccounter = 0\n", " lcdict['concatenated'] = {lccounter: os.path.abspath(lclist[0])}\n", " lcdict['lcn'] = np.full_like(lcdict['rjd'], lccounter)\n", "\n", " # normalize if needed\n", " if normalize:\n", "\n", " for col in MAGCOLS:\n", "\n", " if col in lcdict:\n", " thismedval = np.nanmedian(lcdict[col])\n", "\n", " # handle fluxes\n", " if col in ('ifl1','ifl2','ifl3'):\n", " lcdict[col] = lcdict[col] / thismedval\n", " # handle mags\n", " else:\n", " lcdict[col] = lcdict[col] - thismedval\n", "\n", " # now read the rest\n", " for lcf in lclist[1:]:\n", "\n", " thislcd = read_hatpi_textlc(lcf)\n", "\n", " # if the columns don't agree, skip this LC\n", " if thislcd['columns'] != lcdict['columns']:\n", " LOGERROR('file %s does not have the '\n", " 'same columns as first file %s, skipping...'\n", " % (lcf, lclist[0]))\n", " continue\n", "\n", " # otherwise, go ahead and start concatenatin'\n", " else:\n", "\n", " LOGINFO('adding %s (ndet: %s) to %s (ndet: %s)'\n", " % (lcf,\n", " thislcd['objectinfo']['ndet'],\n", " lclist[0],\n", " lcdict[lcdict['columns'][0]].size))\n", "\n", " # update LC tracking\n", " lccounter = lccounter + 1\n", " lcdict['concatenated'][lccounter] = os.path.abspath(lcf)\n", " lcdict['lcn'] = np.concatenate((\n", " lcdict['lcn'],\n", " np.full_like(thislcd['rjd'],lccounter)\n", " ))\n", "\n", " # concatenate the columns\n", " for col in lcdict['columns']:\n", "\n", " # handle normalization for magnitude columns\n", " if normalize and col in MAGCOLS:\n", "\n", " thismedval = np.nanmedian(thislcd[col])\n", "\n", " # handle fluxes\n", " if col in ('ifl1','ifl2','ifl3'):\n", " thislcd[col] = thislcd[col] / thismedval\n", " # handle mags\n", " else:\n", " thislcd[col] = thislcd[col] - thismedval\n", "\n", " # concatenate the values\n", " lcdict[col] = np.concatenate((lcdict[col], thislcd[col]))\n", "\n", " #\n", " # now we're all done concatenatin'\n", " #\n", "\n", " # make sure to add up the ndet\n", " lcdict['objectinfo']['ndet'] = lcdict[lcdict['columns'][0]].size\n", "\n", " # update the stations\n", " lcdict['objectinfo']['stations'] = [\n", " 'HP%s' % x for x in np.unique(lcdict['stf']).tolist()\n", " ]\n", "\n", " # update the total LC count\n", " lcdict['nconcatenated'] = lccounter + 1\n", "\n", " # if we're supposed to sort by a column, do so\n", " if sortby and sortby in [x[0] for x in COLDEFS]:\n", "\n", " LOGINFO('sorting concatenated light curve by %s...' % sortby)\n", " sortind = np.argsort(lcdict[sortby])\n", "\n", " # sort all the measurement columns by this index\n", " for col in lcdict['columns']:\n", " lcdict[col] = lcdict[col][sortind]\n", "\n", " # make sure to sort the lcn index as well\n", " lcdict['lcn'] = lcdict['lcn'][sortind]\n", "\n", " LOGINFO('done. concatenated light curve has %s detections' %\n", " lcdict['objectinfo']['ndet'])\n", " return lcdict" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012345679012345678, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01818181818181818, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.037037037037037035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.058823529411764705 ]
128
0.0013
def msgBox(self, promptType, _timeout=-1, **options): ''' Send a user prompt request to the GUI Arguments: promptType (string): The prompt type to send to the GUI. Currently the only type supported is 'confirm'. _timeout (int): The optional amount of time for which the prompt should be displayed to the user before a timeout occurs. Defaults to -1 which indicates there is no timeout limit. options (dict): The keyword arguments that should be passed to the requested prompt type. Check prompt specific sections below for information on what arguments are expected to be present. Raises: ValueError: If the prompt type received is an unexpected value **Confirm Prompt** Display a message to the user and prompt them for a confirm/deny response to the message. Arguments: msg (string): The message to display to the user Returns: True if the user picks 'Confirm', False if the user picks 'Deny' Raises: KeyError: If the options passed to the prompt handler doesn't contain a `msg` attribute. APITimeoutError: If the timeout value is reached without receiving a response. ''' if promptType == 'confirm': return self._sendConfirmPrompt(_timeout, options) else: raise ValueError('Unknown prompt type: {}'.format(promptType))
[ "def", "msgBox", "(", "self", ",", "promptType", ",", "_timeout", "=", "-", "1", ",", "*", "*", "options", ")", ":", "if", "promptType", "==", "'confirm'", ":", "return", "self", ".", "_sendConfirmPrompt", "(", "_timeout", ",", "options", ")", "else", ":", "raise", "ValueError", "(", "'Unknown prompt type: {}'", ".", "format", "(", "promptType", ")", ")" ]
35
0.001813
[ "def msgBox(self, promptType, _timeout=-1, **options):\n", " ''' Send a user prompt request to the GUI\n", "\n", " Arguments:\n", " promptType (string):\n", " The prompt type to send to the GUI. Currently\n", " the only type supported is 'confirm'.\n", "\n", " _timeout (int):\n", " The optional amount of time for which the prompt\n", " should be displayed to the user before a timeout occurs.\n", " Defaults to -1 which indicates there is no timeout limit.\n", "\n", " options (dict):\n", " The keyword arguments that should be passed to the requested\n", " prompt type. Check prompt specific sections below for information on what\n", " arguments are expected to be present.\n", "\n", " Raises:\n", " ValueError:\n", " If the prompt type received is an unexpected value\n", "\n", " **Confirm Prompt**\n", "\n", " Display a message to the user and prompt them for a confirm/deny\n", " response to the message.\n", "\n", " Arguments:\n", " msg (string):\n", " The message to display to the user\n", "\n", " Returns:\n", " True if the user picks 'Confirm', False if the user picks 'Deny'\n", "\n", " Raises:\n", " KeyError:\n", " If the options passed to the prompt handler doesn't contain a\n", " `msg` attribute.\n", "\n", " APITimeoutError:\n", " If the timeout value is reached without receiving a response.\n", " '''\n", " if promptType == 'confirm':\n", " return self._sendConfirmPrompt(_timeout, options)\n", " else:\n", " raise ValueError('Unknown prompt type: {}'.format(promptType))" ]
[ 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011111111111111112, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.013513513513513514 ]
46
0.00097
def last_modified(self): """Retrieves the last modified time stamp of the incident/incidents from the output response Returns: last_modified(namedtuple): List of named tuples of last modified time stamp of the incident/incidents """ resource_list = self.traffic_incident() last_modified = namedtuple('last_modified', 'last_modified') if len(resource_list) == 1 and resource_list[0] is None: return None else: try: return [last_modified(resource['lastModified']) for resource in resource_list] except (KeyError, TypeError): return [last_modified(resource['LastModifiedUTC']) for resource in resource_list]
[ "def", "last_modified", "(", "self", ")", ":", "resource_list", "=", "self", ".", "traffic_incident", "(", ")", "last_modified", "=", "namedtuple", "(", "'last_modified'", ",", "'last_modified'", ")", "if", "len", "(", "resource_list", ")", "==", "1", "and", "resource_list", "[", "0", "]", "is", "None", ":", "return", "None", "else", ":", "try", ":", "return", "[", "last_modified", "(", "resource", "[", "'lastModified'", "]", ")", "for", "resource", "in", "resource_list", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "return", "[", "last_modified", "(", "resource", "[", "'LastModifiedUTC'", "]", ")", "for", "resource", "in", "resource_list", "]" ]
41.631579
0.002472
[ "def last_modified(self):\n", " \"\"\"Retrieves the last modified time stamp of the incident/incidents\n", " from the output response\n", "\n", " Returns:\n", " last_modified(namedtuple): List of named tuples of last modified\n", " time stamp of the incident/incidents\n", " \"\"\"\n", " resource_list = self.traffic_incident()\n", " last_modified = namedtuple('last_modified', 'last_modified')\n", " if len(resource_list) == 1 and resource_list[0] is None:\n", " return None\n", " else:\n", " try:\n", " return [last_modified(resource['lastModified'])\n", " for resource in resource_list]\n", " except (KeyError, TypeError):\n", " return [last_modified(resource['LastModifiedUTC'])\n", " for resource in resource_list]" ]
[ 0, 0.013157894736842105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.018518518518518517 ]
19
0.001667
def create_ad_hoc_field(cls, db_type): ''' Give an SQL column description such as "Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)" this method returns a matching enum field. ''' import re try: Enum # exists in Python 3.4+ except NameError: from enum import Enum # use the enum34 library instead members = {} for match in re.finditer("'(\w+)' = (\d+)", db_type): members[match.group(1)] = int(match.group(2)) enum_cls = Enum('AdHocEnum', members) field_class = Enum8Field if db_type.startswith('Enum8') else Enum16Field return field_class(enum_cls)
[ "def", "create_ad_hoc_field", "(", "cls", ",", "db_type", ")", ":", "import", "re", "try", ":", "Enum", "# exists in Python 3.4+", "except", "NameError", ":", "from", "enum", "import", "Enum", "# use the enum34 library instead", "members", "=", "{", "}", "for", "match", "in", "re", ".", "finditer", "(", "\"'(\\w+)' = (\\d+)\"", ",", "db_type", ")", ":", "members", "[", "match", ".", "group", "(", "1", ")", "]", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "enum_cls", "=", "Enum", "(", "'AdHocEnum'", ",", "members", ")", "field_class", "=", "Enum8Field", "if", "db_type", ".", "startswith", "(", "'Enum8'", ")", "else", "Enum16Field", "return", "field_class", "(", "enum_cls", ")" ]
41.5
0.011782
[ "def create_ad_hoc_field(cls, db_type):\n", " '''\n", " Give an SQL column description such as \"Enum8('apple' = 1, 'banana' = 2, 'orange' = 3)\"\n", " this method returns a matching enum field.\n", " '''\n", " import re\n", " try:\n", " Enum # exists in Python 3.4+\n", " except NameError:\n", " from enum import Enum # use the enum34 library instead\n", " members = {}\n", " for match in re.finditer(\"'(\\w+)' = (\\d+)\", db_type):\n", " members[match.group(1)] = int(match.group(2))\n", " enum_cls = Enum('AdHocEnum', members)\n", " field_class = Enum8Field if db_type.startswith('Enum8') else Enum16Field\n", " return field_class(enum_cls)" ]
[ 0, 0.08333333333333333, 0.010416666666666666, 0, 0, 0, 0, 0.024390243902439025, 0, 0.014925373134328358, 0, 0.03225806451612903, 0, 0, 0.012345679012345678, 0.027777777777777776 ]
16
0.01284
def run_container(docker_client, backup_data): """Pull the Docker image and creates a container with a '/backup' volume. This volume will be mounted on the temporary workdir previously created. It will then start the container and return the container object. """ docker_client.pull(backup_data['image']) container = docker_client.create_container( image=backup_data['image'], volumes=['/backup'], command=backup_data['command']) docker_client.start(container.get('Id'), binds={ backup_data['workdir']: { 'bind': '/backup', 'ro': False } }) return container
[ "def", "run_container", "(", "docker_client", ",", "backup_data", ")", ":", "docker_client", ".", "pull", "(", "backup_data", "[", "'image'", "]", ")", "container", "=", "docker_client", ".", "create_container", "(", "image", "=", "backup_data", "[", "'image'", "]", ",", "volumes", "=", "[", "'/backup'", "]", ",", "command", "=", "backup_data", "[", "'command'", "]", ")", "docker_client", ".", "start", "(", "container", ".", "get", "(", "'Id'", ")", ",", "binds", "=", "{", "backup_data", "[", "'workdir'", "]", ":", "{", "'bind'", ":", "'/backup'", ",", "'ro'", ":", "False", "}", "}", ")", "return", "container" ]
31
0.00149
[ "def run_container(docker_client, backup_data):\n", " \"\"\"Pull the Docker image and creates a container\n", " with a '/backup' volume. This volume will be mounted\n", " on the temporary workdir previously created.\n", "\n", " It will then start the container and return the container object.\n", " \"\"\"\n", " docker_client.pull(backup_data['image'])\n", " container = docker_client.create_container(\n", " image=backup_data['image'],\n", " volumes=['/backup'],\n", " command=backup_data['command'])\n", "\n", " docker_client.start(container.get('Id'), binds={\n", " backup_data['workdir']:\n", " {\n", " 'bind': '/backup',\n", " 'ro': False\n", " }\n", " })\n", " return container" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
21
0.002381
def _path_to_be_kept(self, path): """Does the given path pass the filtering criteria?""" if self.excludes and (path in self.excludes or helpers.is_inside_any(self.excludes, path)): return False if self.includes: return (path in self.includes or helpers.is_inside_any(self.includes, path)) return True
[ "def", "_path_to_be_kept", "(", "self", ",", "path", ")", ":", "if", "self", ".", "excludes", "and", "(", "path", "in", "self", ".", "excludes", "or", "helpers", ".", "is_inside_any", "(", "self", ".", "excludes", ",", "path", ")", ")", ":", "return", "False", "if", "self", ".", "includes", ":", "return", "(", "path", "in", "self", ".", "includes", "or", "helpers", ".", "is_inside_any", "(", "self", ".", "includes", ",", "path", ")", ")", "return", "True" ]
42.222222
0.010309
[ "def _path_to_be_kept(self, path):\n", " \"\"\"Does the given path pass the filtering criteria?\"\"\"\n", " if self.excludes and (path in self.excludes\n", " or helpers.is_inside_any(self.excludes, path)):\n", " return False\n", " if self.includes:\n", " return (path in self.includes\n", " or helpers.is_inside_any(self.includes, path))\n", " return True" ]
[ 0, 0.015873015873015872, 0, 0.015625, 0, 0, 0, 0.015873015873015872, 0.05263157894736842 ]
9
0.011111
def get_all_lower(self): """Return all parent GO IDs through both reverse 'is_a' and all relationships.""" all_lower = set() for lower in self.get_goterms_lower(): all_lower.add(lower.item_id) all_lower |= lower.get_all_lower() return all_lower
[ "def", "get_all_lower", "(", "self", ")", ":", "all_lower", "=", "set", "(", ")", "for", "lower", "in", "self", ".", "get_goterms_lower", "(", ")", ":", "all_lower", ".", "add", "(", "lower", ".", "item_id", ")", "all_lower", "|=", "lower", ".", "get_all_lower", "(", ")", "return", "all_lower" ]
42
0.01
[ "def get_all_lower(self):\n", " \"\"\"Return all parent GO IDs through both reverse 'is_a' and all relationships.\"\"\"\n", " all_lower = set()\n", " for lower in self.get_goterms_lower():\n", " all_lower.add(lower.item_id)\n", " all_lower |= lower.get_all_lower()\n", " return all_lower" ]
[ 0, 0.022222222222222223, 0, 0, 0, 0, 0.041666666666666664 ]
7
0.009127
def stop(self): """Halts the acquisition, this must be called before resetting acquisition""" try: self.aitask.stop() self.aotask.stop() pass except: print u"No task running" self.aitask = None self.aotask = None
[ "def", "stop", "(", "self", ")", ":", "try", ":", "self", ".", "aitask", ".", "stop", "(", ")", "self", ".", "aotask", ".", "stop", "(", ")", "pass", "except", ":", "print", "u\"No task running\"", "self", ".", "aitask", "=", "None", "self", ".", "aotask", "=", "None" ]
29.6
0.016393
[ "def stop(self):\n", " \"\"\"Halts the acquisition, this must be called before resetting acquisition\"\"\"\n", " try:\n", " self.aitask.stop()\n", " self.aotask.stop()\n", " pass\n", " except: \n", " print u\"No task running\"\n", " self.aitask = None\n", " self.aotask = None" ]
[ 0, 0.023255813953488372, 0, 0, 0, 0, 0.09523809523809523, 0, 0, 0.038461538461538464 ]
10
0.015696
def operation_recorder(self): """ :class:`BaseOperationRecorder`: **Deprecated:** The operation recorder that was last added to the connection, or `None` if the connection does not currently have any recorders. *New in pywbem 0.9 as experimental. Deprecated since pywbem 0.12.* Instead of using this deprecated property, the :attr:`~pywbem.WBEMConnection.operation_recorders` property should be used to retrieve the recorders of the connection, and the :meth:`~pywbem.WBEMConnection.add_operation_recorder` method should be used to add a recorder. This property is settable; setting this property will cause the specified operation recorder to be added to the connection as if :meth:`~pywbem.WBEMConnection.add_operation_recorder` was used. `None` is not permitted as a new value for this property. Raises: ValueError: Operation recorder must not be `None`. ValueError: Cannot add the same recorder class multiple times. """ warnings.warn( "Reading the WBEMConnection.operation_recorder property has been " "deprecated. Use the operation_recorders property instead.", DeprecationWarning, 2) try: last_recorder = self._operation_recorders[-1] except IndexError: last_recorder = None return last_recorder
[ "def", "operation_recorder", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"Reading the WBEMConnection.operation_recorder property has been \"", "\"deprecated. Use the operation_recorders property instead.\"", ",", "DeprecationWarning", ",", "2", ")", "try", ":", "last_recorder", "=", "self", ".", "_operation_recorders", "[", "-", "1", "]", "except", "IndexError", ":", "last_recorder", "=", "None", "return", "last_recorder" ]
42.878788
0.001382
[ "def operation_recorder(self):\n", " \"\"\"\n", " :class:`BaseOperationRecorder`: **Deprecated:** The operation recorder\n", " that was last added to the connection, or `None` if the connection does\n", " not currently have any recorders.\n", "\n", " *New in pywbem 0.9 as experimental. Deprecated since pywbem 0.12.*\n", "\n", " Instead of using this deprecated property, the\n", " :attr:`~pywbem.WBEMConnection.operation_recorders` property should be\n", " used to retrieve the recorders of the connection, and the\n", " :meth:`~pywbem.WBEMConnection.add_operation_recorder` method should be\n", " used to add a recorder.\n", "\n", " This property is settable; setting this property will cause the\n", " specified operation recorder to be added to the connection as if\n", " :meth:`~pywbem.WBEMConnection.add_operation_recorder` was used.\n", " `None` is not permitted as a new value for this property.\n", "\n", " Raises:\n", "\n", " ValueError: Operation recorder must not be `None`.\n", " ValueError: Cannot add the same recorder class multiple times.\n", " \"\"\"\n", " warnings.warn(\n", " \"Reading the WBEMConnection.operation_recorder property has been \"\n", " \"deprecated. Use the operation_recorders property instead.\",\n", " DeprecationWarning, 2)\n", " try:\n", " last_recorder = self._operation_recorders[-1]\n", " except IndexError:\n", " last_recorder = None\n", " return last_recorder" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
33
0.003608
def dfsummary_made(self): """check if the summary table exists""" try: empty = self.dfsummary.empty except AttributeError: empty = True return not empty
[ "def", "dfsummary_made", "(", "self", ")", ":", "try", ":", "empty", "=", "self", ".", "dfsummary", ".", "empty", "except", "AttributeError", ":", "empty", "=", "True", "return", "not", "empty" ]
28.857143
0.009615
[ "def dfsummary_made(self):\n", " \"\"\"check if the summary table exists\"\"\"\n", " try:\n", " empty = self.dfsummary.empty\n", " except AttributeError:\n", " empty = True\n", " return not empty" ]
[ 0, 0.020833333333333332, 0, 0, 0, 0, 0.041666666666666664 ]
7
0.008929
def list_requests(status=None): ''' List certificate requests made to CertCentral. You can filter by status: ``pending``, ``approved``, ``rejected`` CLI Example: .. code-block:: bash salt-run digicert.list_requests pending ''' if status: url = '{0}/request?status={1}'.format(_base_url(), status) else: url = '{0}/request'.format(_base_url()) reqs = _paginate(url, "requests", method='GET', decode=True, decode_type='json', raise_error=False, header_dict={ 'X-DC-DEVKEY': _api_key(), 'Content-Type': 'application/json', } ) ret = {'requests': reqs} return ret
[ "def", "list_requests", "(", "status", "=", "None", ")", ":", "if", "status", ":", "url", "=", "'{0}/request?status={1}'", ".", "format", "(", "_base_url", "(", ")", ",", "status", ")", "else", ":", "url", "=", "'{0}/request'", ".", "format", "(", "_base_url", "(", ")", ")", "reqs", "=", "_paginate", "(", "url", ",", "\"requests\"", ",", "method", "=", "'GET'", ",", "decode", "=", "True", ",", "decode_type", "=", "'json'", ",", "raise_error", "=", "False", ",", "header_dict", "=", "{", "'X-DC-DEVKEY'", ":", "_api_key", "(", ")", ",", "'Content-Type'", ":", "'application/json'", ",", "}", ")", "ret", "=", "{", "'requests'", ":", "reqs", "}", "return", "ret" ]
26.8
0.002401
[ "def list_requests(status=None):\n", " '''\n", " List certificate requests made to CertCentral. You can filter by\n", " status: ``pending``, ``approved``, ``rejected``\n", "\n", " CLI Example:\n", "\n", " .. code-block:: bash\n", "\n", " salt-run digicert.list_requests pending\n", " '''\n", " if status:\n", " url = '{0}/request?status={1}'.format(_base_url(), status)\n", " else:\n", " url = '{0}/request'.format(_base_url())\n", "\n", " reqs = _paginate(url,\n", " \"requests\",\n", " method='GET',\n", " decode=True,\n", " decode_type='json',\n", " raise_error=False,\n", " header_dict={\n", " 'X-DC-DEVKEY': _api_key(),\n", " 'Content-Type': 'application/json',\n", " }\n", " )\n", "\n", " ret = {'requests': reqs}\n", " return ret" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.16666666666666666, 0, 0, 0.07142857142857142 ]
30
0.007937
def apply_same_chip_constraints(vertices_resources, nets, constraints): """Modify a set of vertices_resources, nets and constraints to account for all SameChipConstraints. To allow placement algorithms to handle SameChipConstraints without any special cases, Vertices identified in a SameChipConstraint are merged into a new vertex whose vertices_resources are the sum total of their parts which may be placed as if a single vertex. Once placed, the placement can be expanded into a full placement of all the original vertices using :py:func:`finalise_same_chip_constraints`. A typical use pattern might look like:: def my_placer(vertices_resources, nets, machine, constraints): # Should be done first thing since this may redefine # vertices_resources, nets and constraints. vertices_resources, nets, constraints, substitutions = \\ apply_same_chip_constraints(vertices_resources, nets, constraints) # ...deal with other types of constraint... # ...perform placement... finalise_same_chip_constraints(substitutions, placements) return placements Note that this function does not modify its arguments but rather returns new copies of the structures supplied. Parameters ---------- vertices_resources : {vertex: {resource: quantity, ...}, ...} nets : [:py:class:`~rig.netlist.Net`, ...] constraints : [constraint, ...] Returns ------- (vertices_resources, nets, constraints, substitutions) The vertices_resources, nets and constraints values contain modified copies of the supplied data structures modified to contain a single vertex in place of the individual constrained vertices. substitutions is a list of :py:class:`MergedVertex` objects which resulted from the combining of the constrained vertices. The order of the list is the order the substitutions were carried out. The :py:func:`finalise_same_chip_constraints` function can be used to expand a set of substitutions. """ # Make a copy of the basic structures to be modified by this function vertices_resources = vertices_resources.copy() nets = nets[:] constraints = constraints[:] substitutions = [] for same_chip_constraint in constraints: if not isinstance(same_chip_constraint, SameChipConstraint): continue # Skip constraints which don't actually merge anything... if len(same_chip_constraint.vertices) <= 1: continue # The new (merged) vertex with which to replace the constrained # vertices merged_vertex = MergedVertex(same_chip_constraint.vertices) substitutions.append(merged_vertex) # A set containing the set of vertices to be merged (to remove # duplicates) merged_vertices = set(same_chip_constraint.vertices) # Remove the merged vertices from the set of vertices resources and # accumulate the total resources consumed. Note add_resources is not # used since we don't know if the resources consumed by each vertex are # overlapping. total_resources = {} for vertex in merged_vertices: resources = vertices_resources.pop(vertex) for resource, value in iteritems(resources): total_resources[resource] = (total_resources.get(resource, 0) + value) vertices_resources[merged_vertex] = total_resources # Update any nets which pointed to a merged vertex for net_num, net in enumerate(nets): net_changed = False # Change net sources if net.source in merged_vertices: net_changed = True net = Net(merged_vertex, net.sinks, net.weight) # Change net sinks for sink_num, sink in enumerate(net.sinks): if sink in merged_vertices: if not net_changed: net = Net(net.source, net.sinks, net.weight) net_changed = True net.sinks[sink_num] = merged_vertex if net_changed: nets[net_num] = net # Update any constraints which refer to a merged vertex for constraint_num, constraint in enumerate(constraints): if isinstance(constraint, LocationConstraint): if constraint.vertex in merged_vertices: constraints[constraint_num] = LocationConstraint( merged_vertex, constraint.location) elif isinstance(constraint, SameChipConstraint): if not set(constraint.vertices).isdisjoint(merged_vertices): constraints[constraint_num] = SameChipConstraint([ merged_vertex if v in merged_vertices else v for v in constraint.vertices ]) elif isinstance(constraint, RouteEndpointConstraint): if constraint.vertex in merged_vertices: constraints[constraint_num] = RouteEndpointConstraint( merged_vertex, constraint.route) return (vertices_resources, nets, constraints, substitutions)
[ "def", "apply_same_chip_constraints", "(", "vertices_resources", ",", "nets", ",", "constraints", ")", ":", "# Make a copy of the basic structures to be modified by this function", "vertices_resources", "=", "vertices_resources", ".", "copy", "(", ")", "nets", "=", "nets", "[", ":", "]", "constraints", "=", "constraints", "[", ":", "]", "substitutions", "=", "[", "]", "for", "same_chip_constraint", "in", "constraints", ":", "if", "not", "isinstance", "(", "same_chip_constraint", ",", "SameChipConstraint", ")", ":", "continue", "# Skip constraints which don't actually merge anything...", "if", "len", "(", "same_chip_constraint", ".", "vertices", ")", "<=", "1", ":", "continue", "# The new (merged) vertex with which to replace the constrained", "# vertices", "merged_vertex", "=", "MergedVertex", "(", "same_chip_constraint", ".", "vertices", ")", "substitutions", ".", "append", "(", "merged_vertex", ")", "# A set containing the set of vertices to be merged (to remove", "# duplicates)", "merged_vertices", "=", "set", "(", "same_chip_constraint", ".", "vertices", ")", "# Remove the merged vertices from the set of vertices resources and", "# accumulate the total resources consumed. Note add_resources is not", "# used since we don't know if the resources consumed by each vertex are", "# overlapping.", "total_resources", "=", "{", "}", "for", "vertex", "in", "merged_vertices", ":", "resources", "=", "vertices_resources", ".", "pop", "(", "vertex", ")", "for", "resource", ",", "value", "in", "iteritems", "(", "resources", ")", ":", "total_resources", "[", "resource", "]", "=", "(", "total_resources", ".", "get", "(", "resource", ",", "0", ")", "+", "value", ")", "vertices_resources", "[", "merged_vertex", "]", "=", "total_resources", "# Update any nets which pointed to a merged vertex", "for", "net_num", ",", "net", "in", "enumerate", "(", "nets", ")", ":", "net_changed", "=", "False", "# Change net sources", "if", "net", ".", "source", "in", "merged_vertices", ":", "net_changed", "=", "True", "net", "=", "Net", "(", "merged_vertex", ",", "net", ".", "sinks", ",", "net", ".", "weight", ")", "# Change net sinks", "for", "sink_num", ",", "sink", "in", "enumerate", "(", "net", ".", "sinks", ")", ":", "if", "sink", "in", "merged_vertices", ":", "if", "not", "net_changed", ":", "net", "=", "Net", "(", "net", ".", "source", ",", "net", ".", "sinks", ",", "net", ".", "weight", ")", "net_changed", "=", "True", "net", ".", "sinks", "[", "sink_num", "]", "=", "merged_vertex", "if", "net_changed", ":", "nets", "[", "net_num", "]", "=", "net", "# Update any constraints which refer to a merged vertex", "for", "constraint_num", ",", "constraint", "in", "enumerate", "(", "constraints", ")", ":", "if", "isinstance", "(", "constraint", ",", "LocationConstraint", ")", ":", "if", "constraint", ".", "vertex", "in", "merged_vertices", ":", "constraints", "[", "constraint_num", "]", "=", "LocationConstraint", "(", "merged_vertex", ",", "constraint", ".", "location", ")", "elif", "isinstance", "(", "constraint", ",", "SameChipConstraint", ")", ":", "if", "not", "set", "(", "constraint", ".", "vertices", ")", ".", "isdisjoint", "(", "merged_vertices", ")", ":", "constraints", "[", "constraint_num", "]", "=", "SameChipConstraint", "(", "[", "merged_vertex", "if", "v", "in", "merged_vertices", "else", "v", "for", "v", "in", "constraint", ".", "vertices", "]", ")", "elif", "isinstance", "(", "constraint", ",", "RouteEndpointConstraint", ")", ":", "if", "constraint", ".", "vertex", "in", "merged_vertices", ":", "constraints", "[", "constraint_num", "]", "=", "RouteEndpointConstraint", "(", "merged_vertex", ",", "constraint", ".", "route", ")", "return", "(", "vertices_resources", ",", "nets", ",", "constraints", ",", "substitutions", ")" ]
43.081301
0.000184
[ "def apply_same_chip_constraints(vertices_resources, nets, constraints):\n", " \"\"\"Modify a set of vertices_resources, nets and constraints to account for\n", " all SameChipConstraints.\n", "\n", " To allow placement algorithms to handle SameChipConstraints without any\n", " special cases, Vertices identified in a SameChipConstraint are merged into\n", " a new vertex whose vertices_resources are the sum total of their parts\n", " which may be placed as if a single vertex. Once placed, the placement can\n", " be expanded into a full placement of all the original vertices using\n", " :py:func:`finalise_same_chip_constraints`.\n", "\n", " A typical use pattern might look like::\n", "\n", " def my_placer(vertices_resources, nets, machine, constraints):\n", " # Should be done first thing since this may redefine\n", " # vertices_resources, nets and constraints.\n", " vertices_resources, nets, constraints, substitutions = \\\\\n", " apply_same_chip_constraints(vertices_resources,\n", " nets, constraints)\n", "\n", " # ...deal with other types of constraint...\n", "\n", " # ...perform placement...\n", "\n", " finalise_same_chip_constraints(substitutions, placements)\n", " return placements\n", "\n", " Note that this function does not modify its arguments but rather returns\n", " new copies of the structures supplied.\n", "\n", " Parameters\n", " ----------\n", " vertices_resources : {vertex: {resource: quantity, ...}, ...}\n", " nets : [:py:class:`~rig.netlist.Net`, ...]\n", " constraints : [constraint, ...]\n", "\n", " Returns\n", " -------\n", " (vertices_resources, nets, constraints, substitutions)\n", " The vertices_resources, nets and constraints values contain modified\n", " copies of the supplied data structures modified to contain a single\n", " vertex in place of the individual constrained vertices.\n", "\n", " substitutions is a list of :py:class:`MergedVertex` objects which\n", " resulted from the combining of the constrained vertices. The order of\n", " the list is the order the substitutions were carried out. The\n", " :py:func:`finalise_same_chip_constraints` function can be used to\n", " expand a set of substitutions.\n", " \"\"\"\n", " # Make a copy of the basic structures to be modified by this function\n", " vertices_resources = vertices_resources.copy()\n", " nets = nets[:]\n", " constraints = constraints[:]\n", "\n", " substitutions = []\n", "\n", " for same_chip_constraint in constraints:\n", " if not isinstance(same_chip_constraint, SameChipConstraint):\n", " continue\n", "\n", " # Skip constraints which don't actually merge anything...\n", " if len(same_chip_constraint.vertices) <= 1:\n", " continue\n", "\n", " # The new (merged) vertex with which to replace the constrained\n", " # vertices\n", " merged_vertex = MergedVertex(same_chip_constraint.vertices)\n", " substitutions.append(merged_vertex)\n", "\n", " # A set containing the set of vertices to be merged (to remove\n", " # duplicates)\n", " merged_vertices = set(same_chip_constraint.vertices)\n", "\n", " # Remove the merged vertices from the set of vertices resources and\n", " # accumulate the total resources consumed. Note add_resources is not\n", " # used since we don't know if the resources consumed by each vertex are\n", " # overlapping.\n", " total_resources = {}\n", " for vertex in merged_vertices:\n", " resources = vertices_resources.pop(vertex)\n", " for resource, value in iteritems(resources):\n", " total_resources[resource] = (total_resources.get(resource, 0) +\n", " value)\n", " vertices_resources[merged_vertex] = total_resources\n", "\n", " # Update any nets which pointed to a merged vertex\n", " for net_num, net in enumerate(nets):\n", " net_changed = False\n", "\n", " # Change net sources\n", " if net.source in merged_vertices:\n", " net_changed = True\n", " net = Net(merged_vertex, net.sinks, net.weight)\n", "\n", " # Change net sinks\n", " for sink_num, sink in enumerate(net.sinks):\n", " if sink in merged_vertices:\n", " if not net_changed:\n", " net = Net(net.source, net.sinks, net.weight)\n", " net_changed = True\n", " net.sinks[sink_num] = merged_vertex\n", "\n", " if net_changed:\n", " nets[net_num] = net\n", "\n", " # Update any constraints which refer to a merged vertex\n", " for constraint_num, constraint in enumerate(constraints):\n", " if isinstance(constraint, LocationConstraint):\n", " if constraint.vertex in merged_vertices:\n", " constraints[constraint_num] = LocationConstraint(\n", " merged_vertex, constraint.location)\n", " elif isinstance(constraint, SameChipConstraint):\n", " if not set(constraint.vertices).isdisjoint(merged_vertices):\n", " constraints[constraint_num] = SameChipConstraint([\n", " merged_vertex if v in merged_vertices else v\n", " for v in constraint.vertices\n", " ])\n", " elif isinstance(constraint, RouteEndpointConstraint):\n", " if constraint.vertex in merged_vertices:\n", " constraints[constraint_num] = RouteEndpointConstraint(\n", " merged_vertex, constraint.route)\n", "\n", " return (vertices_resources, nets, constraints, substitutions)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015384615384615385 ]
123
0.000125
def p_type_specifier_2(t): """type_specifier : INT | HYPER | FLOAT | DOUBLE | QUADRUPLE | BOOL | ID | UNSIGNED | enum_type_spec | struct_type_spec | union_type_spec""" # FRED - Note UNSIGNED is not in spec if isinstance(t[1], type_info): t[0] = t[1] else: t[0] = type_info(t[1], t.lineno(1))
[ "def", "p_type_specifier_2", "(", "t", ")", ":", "# FRED - Note UNSIGNED is not in spec", "if", "isinstance", "(", "t", "[", "1", "]", ",", "type_info", ")", ":", "t", "[", "0", "]", "=", "t", "[", "1", "]", "else", ":", "t", "[", "0", "]", "=", "type_info", "(", "t", "[", "1", "]", ",", "t", ".", "lineno", "(", "1", ")", ")" ]
31
0.001842
[ "def p_type_specifier_2(t):\n", " \"\"\"type_specifier : INT\n", " | HYPER\n", " | FLOAT\n", " | DOUBLE\n", " | QUADRUPLE\n", " | BOOL\n", " | ID\n", " | UNSIGNED\n", " | enum_type_spec\n", " | struct_type_spec\n", " | union_type_spec\"\"\"\n", " # FRED - Note UNSIGNED is not in spec\n", " if isinstance(t[1], type_info):\n", " t[0] = t[1]\n", " else:\n", " t[0] = type_info(t[1], t.lineno(1))" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023255813953488372 ]
17
0.001368
def grad(self, X, *params): """ Return the gradient of the basis function for each parameter. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. *params : optional parameter aguments, these are the parameters of the concatenated bases `in the order` they were concatenated. Returns ------- list or ndarray : this will be a list of ndarrays if there are multiple parameters, or just an ndarray if there is a single parameter. The ndarrays can have more than two dimensions (i.e. tensors of rank > 2), depending on the dimensions of the basis function parameters. If there are *no* parameters, ``[]`` is returned. """ # Establish a few dimensions N = X.shape[0] D = self.get_dim(X) endinds = self.__base_locations(X) # for the Padding indices args = list(params) # Generate structured gradients with appropriate zero padding def make_dPhi(i, g): # Pad the gradient with respect to the total basis dimensionality dPhi_dim = (N, D) if g.ndim < 3 else (N, D, g.shape[2]) dPhi = np.zeros(dPhi_dim) dPhi[:, endinds[i]:endinds[i + 1]] = g return dPhi # Get gradients from each basis for i, base in enumerate(self.bases): # evaluate gradient and deal with multiple parameter gradients by # keeping track of the basis index g, args, sargs = base._grad_popargs(X, *args) for gg in atleast_tuple(g): if len(gg) == 0: continue yield make_dPhi(i, gg)
[ "def", "grad", "(", "self", ",", "X", ",", "*", "params", ")", ":", "# Establish a few dimensions", "N", "=", "X", ".", "shape", "[", "0", "]", "D", "=", "self", ".", "get_dim", "(", "X", ")", "endinds", "=", "self", ".", "__base_locations", "(", "X", ")", "# for the Padding indices", "args", "=", "list", "(", "params", ")", "# Generate structured gradients with appropriate zero padding", "def", "make_dPhi", "(", "i", ",", "g", ")", ":", "# Pad the gradient with respect to the total basis dimensionality", "dPhi_dim", "=", "(", "N", ",", "D", ")", "if", "g", ".", "ndim", "<", "3", "else", "(", "N", ",", "D", ",", "g", ".", "shape", "[", "2", "]", ")", "dPhi", "=", "np", ".", "zeros", "(", "dPhi_dim", ")", "dPhi", "[", ":", ",", "endinds", "[", "i", "]", ":", "endinds", "[", "i", "+", "1", "]", "]", "=", "g", "return", "dPhi", "# Get gradients from each basis", "for", "i", ",", "base", "in", "enumerate", "(", "self", ".", "bases", ")", ":", "# evaluate gradient and deal with multiple parameter gradients by", "# keeping track of the basis index", "g", ",", "args", ",", "sargs", "=", "base", ".", "_grad_popargs", "(", "X", ",", "*", "args", ")", "for", "gg", "in", "atleast_tuple", "(", "g", ")", ":", "if", "len", "(", "gg", ")", "==", "0", ":", "continue", "yield", "make_dPhi", "(", "i", ",", "gg", ")" ]
36.673469
0.001084
[ "def grad(self, X, *params):\n", " \"\"\"\n", " Return the gradient of the basis function for each parameter.\n", "\n", " Parameters\n", " ----------\n", " X : ndarray\n", " (N, d) array of observations where N is the number of samples, and\n", " d is the dimensionality of X.\n", " *params : optional\n", " parameter aguments, these are the parameters of the concatenated\n", " bases `in the order` they were concatenated.\n", "\n", " Returns\n", " -------\n", " list or ndarray :\n", " this will be a list of ndarrays if there are multiple parameters,\n", " or just an ndarray if there is a single parameter. The ndarrays can\n", " have more than two dimensions (i.e. tensors of rank > 2), depending\n", " on the dimensions of the basis function parameters. If there are\n", " *no* parameters, ``[]`` is returned.\n", " \"\"\"\n", " # Establish a few dimensions\n", " N = X.shape[0]\n", " D = self.get_dim(X)\n", " endinds = self.__base_locations(X) # for the Padding indices\n", " args = list(params)\n", "\n", " # Generate structured gradients with appropriate zero padding\n", " def make_dPhi(i, g):\n", "\n", " # Pad the gradient with respect to the total basis dimensionality\n", " dPhi_dim = (N, D) if g.ndim < 3 else (N, D, g.shape[2])\n", " dPhi = np.zeros(dPhi_dim)\n", " dPhi[:, endinds[i]:endinds[i + 1]] = g\n", "\n", " return dPhi\n", "\n", " # Get gradients from each basis\n", " for i, base in enumerate(self.bases):\n", "\n", " # evaluate gradient and deal with multiple parameter gradients by\n", " # keeping track of the basis index\n", " g, args, sargs = base._grad_popargs(X, *args)\n", "\n", " for gg in atleast_tuple(g):\n", " if len(gg) == 0:\n", " continue\n", " yield make_dPhi(i, gg)" ]
[ 0, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02631578947368421 ]
49
0.002238
def wait_for_click(self, button, timeOut=10.0): """ Wait for a mouse click Usage: C{mouse.wait_for_click(self, button, timeOut=10.0)} @param button: they mouse button click to wait for as a button number, 1-9 @param timeOut: maximum time, in seconds, to wait for the keypress to occur """ button = int(button) w = iomediator.Waiter(None, None, button, timeOut) w.wait()
[ "def", "wait_for_click", "(", "self", ",", "button", ",", "timeOut", "=", "10.0", ")", ":", "button", "=", "int", "(", "button", ")", "w", "=", "iomediator", ".", "Waiter", "(", "None", ",", "None", ",", "button", ",", "timeOut", ")", "w", ".", "wait", "(", ")" ]
36.666667
0.011086
[ "def wait_for_click(self, button, timeOut=10.0):\n", " \"\"\"\n", " Wait for a mouse click\n", " \n", " Usage: C{mouse.wait_for_click(self, button, timeOut=10.0)}\n", "\n", " @param button: they mouse button click to wait for as a button number, 1-9\n", " @param timeOut: maximum time, in seconds, to wait for the keypress to occur\n", " \"\"\"\n", " button = int(button)\n", " w = iomediator.Waiter(None, None, button, timeOut)\n", " w.wait()" ]
[ 0, 0.08333333333333333, 0, 0.1111111111111111, 0, 0, 0.012048192771084338, 0.011904761904761904, 0, 0, 0, 0.0625 ]
12
0.023408
def _fix_missing_tenant_id(self, context, body, key): """Will add the tenant_id to the context from body. It is assumed that the body must have a tenant_id because neutron core could never have gotten here otherwise. """ if not body: raise n_exc.BadRequest(resource=key, msg="Body malformed") resource = body.get(key) if not resource: raise n_exc.BadRequest(resource=key, msg="Body malformed") if context.tenant_id is None: context.tenant_id = resource.get("tenant_id") if context.tenant_id is None: msg = _("Running without keystone AuthN requires " "that tenant_id is specified") raise n_exc.BadRequest(resource=key, msg=msg)
[ "def", "_fix_missing_tenant_id", "(", "self", ",", "context", ",", "body", ",", "key", ")", ":", "if", "not", "body", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "\"Body malformed\"", ")", "resource", "=", "body", ".", "get", "(", "key", ")", "if", "not", "resource", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "\"Body malformed\"", ")", "if", "context", ".", "tenant_id", "is", "None", ":", "context", ".", "tenant_id", "=", "resource", ".", "get", "(", "\"tenant_id\"", ")", "if", "context", ".", "tenant_id", "is", "None", ":", "msg", "=", "_", "(", "\"Running without keystone AuthN requires \"", "\"that tenant_id is specified\"", ")", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "msg", ")" ]
43.789474
0.002353
[ "def _fix_missing_tenant_id(self, context, body, key):\n", " \"\"\"Will add the tenant_id to the context from body.\n", "\n", " It is assumed that the body must have a tenant_id because neutron\n", " core could never have gotten here otherwise.\n", " \"\"\"\n", " if not body:\n", " raise n_exc.BadRequest(resource=key,\n", " msg=\"Body malformed\")\n", " resource = body.get(key)\n", " if not resource:\n", " raise n_exc.BadRequest(resource=key,\n", " msg=\"Body malformed\")\n", " if context.tenant_id is None:\n", " context.tenant_id = resource.get(\"tenant_id\")\n", " if context.tenant_id is None:\n", " msg = _(\"Running without keystone AuthN requires \"\n", " \"that tenant_id is specified\")\n", " raise n_exc.BadRequest(resource=key, msg=msg)" ]
[ 0, 0.016666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.017543859649122806 ]
19
0.001801
def search(self, **kwargs): """Searches for files/folders Args: \*\*kwargs (dict): A dictionary containing necessary parameters (check https://developers.box.com/docs/#search for list of parameters) Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ query_string = {} for key, value in kwargs.iteritems(): query_string[key] = value return self.__request("GET","search",querystring=query_string)
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "query_string", "=", "{", "}", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", "(", ")", ":", "query_string", "[", "key", "]", "=", "value", "return", "self", ".", "__request", "(", "\"GET\"", ",", "\"search\"", ",", "querystring", "=", "query_string", ")" ]
33.863636
0.009138
[ "def search(self, **kwargs):\n", " \"\"\"Searches for files/folders\n", "\n", " Args:\n", " \\*\\*kwargs (dict): A dictionary containing necessary parameters\n", " (check https://developers.box.com/docs/#search for\n", " list of parameters)\n", "\n", " Returns:\n", " dict. Response from Box.\n", "\n", " Raises:\n", " BoxError: An error response is returned from Box (status_code >= 400).\n", "\n", " BoxHttpResponseError: Response from Box is malformed.\n", "\n", " requests.exceptions.*: Any connection related problem.\n", " \"\"\"\n", " query_string = {}\n", " for key, value in kwargs.iteritems():\n", " query_string[key] = value\n", " return self.__request(\"GET\",\"search\",querystring=query_string)" ]
[ 0, 0.02631578947368421, 0, 0, 0.02631578947368421, 0, 0, 0, 0, 0, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0.04285714285714286 ]
22
0.004888
def read_cands(filename): """Read in the contents of a cands comb file""" import sre lines=file(filename).readlines() exps=[] cands=[] coo=[] for line in lines: if ( line[0:2]=="##" ) : break exps.append(line[2:].strip()) for line in lines: if ( line[0]=="#" ) : continue if len(line.strip())==0: if len(coo)!=0: cands.append(coo) coo=[] continue vals=line.split() cols=['x','y','x_0','y_0','flux','size','max_int','elon'] values={} for j in range(len(cols)): col=cols.pop().strip() val=vals.pop().strip() values[col]=float(val) coo.append(values) cands.append(coo) return {'fileId': exps, 'cands': cands}
[ "def", "read_cands", "(", "filename", ")", ":", "import", "sre", "lines", "=", "file", "(", "filename", ")", ".", "readlines", "(", ")", "exps", "=", "[", "]", "cands", "=", "[", "]", "coo", "=", "[", "]", "for", "line", "in", "lines", ":", "if", "(", "line", "[", "0", ":", "2", "]", "==", "\"##\"", ")", ":", "break", "exps", ".", "append", "(", "line", "[", "2", ":", "]", ".", "strip", "(", ")", ")", "for", "line", "in", "lines", ":", "if", "(", "line", "[", "0", "]", "==", "\"#\"", ")", ":", "continue", "if", "len", "(", "line", ".", "strip", "(", ")", ")", "==", "0", ":", "if", "len", "(", "coo", ")", "!=", "0", ":", "cands", ".", "append", "(", "coo", ")", "coo", "=", "[", "]", "continue", "vals", "=", "line", ".", "split", "(", ")", "cols", "=", "[", "'x'", ",", "'y'", ",", "'x_0'", ",", "'y_0'", ",", "'flux'", ",", "'size'", ",", "'max_int'", ",", "'elon'", "]", "values", "=", "{", "}", "for", "j", "in", "range", "(", "len", "(", "cols", ")", ")", ":", "col", "=", "cols", ".", "pop", "(", ")", ".", "strip", "(", ")", "val", "=", "vals", ".", "pop", "(", ")", ".", "strip", "(", ")", "values", "[", "col", "]", "=", "float", "(", "val", ")", "coo", ".", "append", "(", "values", ")", "cands", ".", "append", "(", "coo", ")", "return", "{", "'fileId'", ":", "exps", ",", "'cands'", ":", "cands", "}" ]
25.967742
0.035928
[ "def read_cands(filename):\n", " \"\"\"Read in the contents of a cands comb file\"\"\"\n", " import sre\n", " \n", " lines=file(filename).readlines()\n", " exps=[]\n", " cands=[]\n", " coo=[]\n", " for line in lines:\n", " if ( line[0:2]==\"##\" ) :\n", " break\n", " exps.append(line[2:].strip())\n", "\n", " for line in lines:\n", " if ( line[0]==\"#\" ) :\n", " continue\n", " if len(line.strip())==0:\n", " if len(coo)!=0:\n", " cands.append(coo)\n", " coo=[]\n", " continue\n", " vals=line.split()\n", " cols=['x','y','x_0','y_0','flux','size','max_int','elon']\n", " values={}\n", " for j in range(len(cols)):\n", " col=cols.pop().strip()\n", " val=vals.pop().strip()\n", " values[col]=float(val)\n", " coo.append(values)\n", " cands.append(coo)\n", " return {'fileId': exps, 'cands': cands}" ]
[ 0, 0, 0, 0.2, 0.02702702702702703, 0.08333333333333333, 0.07692307692307693, 0.09090909090909091, 0, 0.12121212121212122, 0, 0, 0, 0, 0.13333333333333333, 0, 0.030303030303030304, 0.03571428571428571, 0, 0.05263157894736842, 0, 0.038461538461538464, 0.12121212121212122, 0.05555555555555555, 0, 0.02857142857142857, 0.02857142857142857, 0.02857142857142857, 0, 0, 0.023255813953488372 ]
31
0.037922
def remove_prefix(self, auth, spec, recursive = False): """ Remove prefix matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [prefix_spec] Specifies prefixe to remove. * `recursive` [bool] When set to True, also remove child prefixes. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.remove_prefix` for full understanding. """ self._logger.debug("remove_prefix called; spec: %s" % unicode(spec)) # sanity check - do we have all attributes? if 'id' in spec: # recursive requires a prefix, so translate id to prefix p = self.list_prefix(auth, spec)[0] del spec['id'] spec['prefix'] = p['prefix'] spec['vrf_id'] = p['vrf_id'] elif 'prefix' in spec: pass else: raise NipapMissingInputError('missing prefix or id of prefix') prefixes = self.list_prefix(auth, spec) if recursive: spec['type'] = 'host' self._db_remove_prefix(spec, recursive) del spec['type'] self._db_remove_prefix(spec, recursive) else: self._db_remove_prefix(spec) # write to audit table audit_params = { 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } for p in prefixes: audit_params['prefix_id'] = p['id'] audit_params['prefix_prefix'] = p['prefix'] audit_params['description'] = 'Removed prefix %s' % p['prefix'] audit_params['vrf_id'] = p['vrf_id'] audit_params['vrf_rt'] = p['vrf_rt'] audit_params['vrf_name'] = p['vrf_name'] sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) if p['pool_id'] is not None: pool = self._get_pool(auth, { 'id': p['pool_id'] }) audit_params2 = { 'pool_id': pool['id'], 'pool_name': pool['name'], 'prefix_id': p['id'], 'prefix_prefix': p['prefix'], 'description': 'Prefix %s removed from pool %s' % (p['prefix'], pool['name']), 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source } sql, params = self._sql_expand_insert(audit_params2) self._execute('INSERT INTO ip_net_log %s' % sql, params)
[ "def", "remove_prefix", "(", "self", ",", "auth", ",", "spec", ",", "recursive", "=", "False", ")", ":", "self", ".", "_logger", ".", "debug", "(", "\"remove_prefix called; spec: %s\"", "%", "unicode", "(", "spec", ")", ")", "# sanity check - do we have all attributes?", "if", "'id'", "in", "spec", ":", "# recursive requires a prefix, so translate id to prefix", "p", "=", "self", ".", "list_prefix", "(", "auth", ",", "spec", ")", "[", "0", "]", "del", "spec", "[", "'id'", "]", "spec", "[", "'prefix'", "]", "=", "p", "[", "'prefix'", "]", "spec", "[", "'vrf_id'", "]", "=", "p", "[", "'vrf_id'", "]", "elif", "'prefix'", "in", "spec", ":", "pass", "else", ":", "raise", "NipapMissingInputError", "(", "'missing prefix or id of prefix'", ")", "prefixes", "=", "self", ".", "list_prefix", "(", "auth", ",", "spec", ")", "if", "recursive", ":", "spec", "[", "'type'", "]", "=", "'host'", "self", ".", "_db_remove_prefix", "(", "spec", ",", "recursive", ")", "del", "spec", "[", "'type'", "]", "self", ".", "_db_remove_prefix", "(", "spec", ",", "recursive", ")", "else", ":", "self", ".", "_db_remove_prefix", "(", "spec", ")", "# write to audit table", "audit_params", "=", "{", "'username'", ":", "auth", ".", "username", ",", "'authenticated_as'", ":", "auth", ".", "authenticated_as", ",", "'full_name'", ":", "auth", ".", "full_name", ",", "'authoritative_source'", ":", "auth", ".", "authoritative_source", "}", "for", "p", "in", "prefixes", ":", "audit_params", "[", "'prefix_id'", "]", "=", "p", "[", "'id'", "]", "audit_params", "[", "'prefix_prefix'", "]", "=", "p", "[", "'prefix'", "]", "audit_params", "[", "'description'", "]", "=", "'Removed prefix %s'", "%", "p", "[", "'prefix'", "]", "audit_params", "[", "'vrf_id'", "]", "=", "p", "[", "'vrf_id'", "]", "audit_params", "[", "'vrf_rt'", "]", "=", "p", "[", "'vrf_rt'", "]", "audit_params", "[", "'vrf_name'", "]", "=", "p", "[", "'vrf_name'", "]", "sql", ",", "params", "=", "self", ".", "_sql_expand_insert", "(", "audit_params", ")", "self", ".", "_execute", "(", "'INSERT INTO ip_net_log %s'", "%", "sql", ",", "params", ")", "if", "p", "[", "'pool_id'", "]", "is", "not", "None", ":", "pool", "=", "self", ".", "_get_pool", "(", "auth", ",", "{", "'id'", ":", "p", "[", "'pool_id'", "]", "}", ")", "audit_params2", "=", "{", "'pool_id'", ":", "pool", "[", "'id'", "]", ",", "'pool_name'", ":", "pool", "[", "'name'", "]", ",", "'prefix_id'", ":", "p", "[", "'id'", "]", ",", "'prefix_prefix'", ":", "p", "[", "'prefix'", "]", ",", "'description'", ":", "'Prefix %s removed from pool %s'", "%", "(", "p", "[", "'prefix'", "]", ",", "pool", "[", "'name'", "]", ")", ",", "'username'", ":", "auth", ".", "username", ",", "'authenticated_as'", ":", "auth", ".", "authenticated_as", ",", "'full_name'", ":", "auth", ".", "full_name", ",", "'authoritative_source'", ":", "auth", ".", "authoritative_source", "}", "sql", ",", "params", "=", "self", ".", "_sql_expand_insert", "(", "audit_params2", ")", "self", ".", "_execute", "(", "'INSERT INTO ip_net_log %s'", "%", "sql", ",", "params", ")" ]
40.472222
0.002345
[ "def remove_prefix(self, auth, spec, recursive = False):\n", " \"\"\" Remove prefix matching `spec`.\n", "\n", " * `auth` [BaseAuth]\n", " AAA options.\n", " * `spec` [prefix_spec]\n", " Specifies prefixe to remove.\n", " * `recursive` [bool]\n", " When set to True, also remove child prefixes.\n", "\n", " This is the documentation of the internal backend function. It's\n", " exposed over XML-RPC, please also see the XML-RPC documentation for\n", " :py:func:`nipap.xmlrpc.NipapXMLRPC.remove_prefix` for full\n", " understanding.\n", " \"\"\"\n", "\n", " self._logger.debug(\"remove_prefix called; spec: %s\" % unicode(spec))\n", "\n", " # sanity check - do we have all attributes?\n", " if 'id' in spec:\n", " # recursive requires a prefix, so translate id to prefix\n", " p = self.list_prefix(auth, spec)[0]\n", " del spec['id']\n", " spec['prefix'] = p['prefix']\n", " spec['vrf_id'] = p['vrf_id']\n", " elif 'prefix' in spec:\n", " pass\n", " else:\n", " raise NipapMissingInputError('missing prefix or id of prefix')\n", "\n", " prefixes = self.list_prefix(auth, spec)\n", "\n", " if recursive:\n", " spec['type'] = 'host'\n", " self._db_remove_prefix(spec, recursive)\n", " del spec['type']\n", " self._db_remove_prefix(spec, recursive)\n", " else:\n", " self._db_remove_prefix(spec)\n", "\n", " # write to audit table\n", " audit_params = {\n", " 'username': auth.username,\n", " 'authenticated_as': auth.authenticated_as,\n", " 'full_name': auth.full_name,\n", " 'authoritative_source': auth.authoritative_source\n", " }\n", " for p in prefixes:\n", " audit_params['prefix_id'] = p['id']\n", " audit_params['prefix_prefix'] = p['prefix']\n", " audit_params['description'] = 'Removed prefix %s' % p['prefix']\n", " audit_params['vrf_id'] = p['vrf_id']\n", " audit_params['vrf_rt'] = p['vrf_rt']\n", " audit_params['vrf_name'] = p['vrf_name']\n", " sql, params = self._sql_expand_insert(audit_params)\n", " self._execute('INSERT INTO ip_net_log %s' % sql, params)\n", "\n", " if p['pool_id'] is not None:\n", " pool = self._get_pool(auth, { 'id': p['pool_id'] })\n", " audit_params2 = {\n", " 'pool_id': pool['id'],\n", " 'pool_name': pool['name'],\n", " 'prefix_id': p['id'],\n", " 'prefix_prefix': p['prefix'],\n", " 'description': 'Prefix %s removed from pool %s' % (p['prefix'], pool['name']),\n", " 'username': auth.username,\n", " 'authenticated_as': auth.authenticated_as,\n", " 'full_name': auth.full_name,\n", " 'authoritative_source': auth.authoritative_source\n", " }\n", " sql, params = self._sql_expand_insert(audit_params2)\n", " self._execute('INSERT INTO ip_net_log %s' % sql, params)" ]
[ 0.03571428571428571, 0.023255813953488372, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.029411764705882353, 0, 0, 0, 0, 0, 0.010101010101010102, 0, 0, 0, 0, 0, 0, 0.013888888888888888 ]
72
0.001561
def parse_comment(self, comment, filename, lineno, endlineno, include_paths=None, stripped=False): """ Returns a Comment given a string """ if not stripped and not self.__validate_c_comment(comment.strip()): return None title_offset = 0 column_offset = 0 raw_comment = comment if not stripped: try: while comment[column_offset * -1 - 1] != '\n': column_offset += 1 except IndexError: column_offset = 0 comment, title_offset = self.__strip_comment(comment) title_and_params, description = self.__extract_titles_params_and_description(comment) try: block_name, parameters, annotations, is_section = \ self.__parse_title_and_parameters(filename, title_and_params) except HotdocSourceException as _: warn('gtk-doc-bad-syntax', message=_.message, filename=filename, lineno=lineno + title_offset) return None params_offset = 0 for param in parameters: param.filename = filename param.lineno = lineno param_offset = param.line_offset param.line_offset = title_offset + params_offset + 1 params_offset += param_offset param.col_offset = column_offset if not block_name: return None description_offset = 0 meta = {} tags = [] if description is not None: n_lines = len(comment.split('\n')) description_offset = (title_offset + n_lines - len(description.split('\n'))) meta['description'], tags = self.__parse_description_and_tags(description) actual_parameters = OrderedDict({}) for param in parameters: if is_section: cleaned_up_name = param.name.lower().replace('_', '-') if cleaned_up_name in ['symbols', 'private-symbols', 'auto-sort', 'sources']: meta.update(self.__parse_yaml_comment(param, filename)) if cleaned_up_name == 'sources': sources_paths = [os.path.abspath(os.path.join(os.path.dirname(filename), path)) for path in meta[cleaned_up_name]] meta[cleaned_up_name] = sources_paths else: meta[param.name] = param.description else: actual_parameters[param.name] = param annotations = {annotation.name: annotation for annotation in annotations} tags = {tag.name.lower(): tag for tag in tags} block = Comment(name=block_name, filename=filename, lineno=lineno, endlineno=endlineno, annotations=annotations, params=actual_parameters, tags=tags, raw_comment=raw_comment, meta=meta, toplevel=is_section) block.line_offset = description_offset block.col_offset = column_offset return block
[ "def", "parse_comment", "(", "self", ",", "comment", ",", "filename", ",", "lineno", ",", "endlineno", ",", "include_paths", "=", "None", ",", "stripped", "=", "False", ")", ":", "if", "not", "stripped", "and", "not", "self", ".", "__validate_c_comment", "(", "comment", ".", "strip", "(", ")", ")", ":", "return", "None", "title_offset", "=", "0", "column_offset", "=", "0", "raw_comment", "=", "comment", "if", "not", "stripped", ":", "try", ":", "while", "comment", "[", "column_offset", "*", "-", "1", "-", "1", "]", "!=", "'\\n'", ":", "column_offset", "+=", "1", "except", "IndexError", ":", "column_offset", "=", "0", "comment", ",", "title_offset", "=", "self", ".", "__strip_comment", "(", "comment", ")", "title_and_params", ",", "description", "=", "self", ".", "__extract_titles_params_and_description", "(", "comment", ")", "try", ":", "block_name", ",", "parameters", ",", "annotations", ",", "is_section", "=", "self", ".", "__parse_title_and_parameters", "(", "filename", ",", "title_and_params", ")", "except", "HotdocSourceException", "as", "_", ":", "warn", "(", "'gtk-doc-bad-syntax'", ",", "message", "=", "_", ".", "message", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", "+", "title_offset", ")", "return", "None", "params_offset", "=", "0", "for", "param", "in", "parameters", ":", "param", ".", "filename", "=", "filename", "param", ".", "lineno", "=", "lineno", "param_offset", "=", "param", ".", "line_offset", "param", ".", "line_offset", "=", "title_offset", "+", "params_offset", "+", "1", "params_offset", "+=", "param_offset", "param", ".", "col_offset", "=", "column_offset", "if", "not", "block_name", ":", "return", "None", "description_offset", "=", "0", "meta", "=", "{", "}", "tags", "=", "[", "]", "if", "description", "is", "not", "None", ":", "n_lines", "=", "len", "(", "comment", ".", "split", "(", "'\\n'", ")", ")", "description_offset", "=", "(", "title_offset", "+", "n_lines", "-", "len", "(", "description", ".", "split", "(", "'\\n'", ")", ")", ")", "meta", "[", "'description'", "]", ",", "tags", "=", "self", ".", "__parse_description_and_tags", "(", "description", ")", "actual_parameters", "=", "OrderedDict", "(", "{", "}", ")", "for", "param", "in", "parameters", ":", "if", "is_section", ":", "cleaned_up_name", "=", "param", ".", "name", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "'-'", ")", "if", "cleaned_up_name", "in", "[", "'symbols'", ",", "'private-symbols'", ",", "'auto-sort'", ",", "'sources'", "]", ":", "meta", ".", "update", "(", "self", ".", "__parse_yaml_comment", "(", "param", ",", "filename", ")", ")", "if", "cleaned_up_name", "==", "'sources'", ":", "sources_paths", "=", "[", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ",", "path", ")", ")", "for", "path", "in", "meta", "[", "cleaned_up_name", "]", "]", "meta", "[", "cleaned_up_name", "]", "=", "sources_paths", "else", ":", "meta", "[", "param", ".", "name", "]", "=", "param", ".", "description", "else", ":", "actual_parameters", "[", "param", ".", "name", "]", "=", "param", "annotations", "=", "{", "annotation", ".", "name", ":", "annotation", "for", "annotation", "in", "annotations", "}", "tags", "=", "{", "tag", ".", "name", ".", "lower", "(", ")", ":", "tag", "for", "tag", "in", "tags", "}", "block", "=", "Comment", "(", "name", "=", "block_name", ",", "filename", "=", "filename", ",", "lineno", "=", "lineno", ",", "endlineno", "=", "endlineno", ",", "annotations", "=", "annotations", ",", "params", "=", "actual_parameters", ",", "tags", "=", "tags", ",", "raw_comment", "=", "raw_comment", ",", "meta", "=", "meta", ",", "toplevel", "=", "is_section", ")", "block", ".", "line_offset", "=", "description_offset", "block", ".", "col_offset", "=", "column_offset", "return", "block" ]
38.95
0.002191
[ "def parse_comment(self, comment, filename, lineno, endlineno,\n", " include_paths=None, stripped=False):\n", " \"\"\"\n", " Returns a Comment given a string\n", " \"\"\"\n", " if not stripped and not self.__validate_c_comment(comment.strip()):\n", " return None\n", "\n", " title_offset = 0\n", "\n", " column_offset = 0\n", "\n", " raw_comment = comment\n", " if not stripped:\n", " try:\n", " while comment[column_offset * -1 - 1] != '\\n':\n", " column_offset += 1\n", " except IndexError:\n", " column_offset = 0\n", " comment, title_offset = self.__strip_comment(comment)\n", "\n", " title_and_params, description = self.__extract_titles_params_and_description(comment)\n", " try:\n", " block_name, parameters, annotations, is_section = \\\n", " self.__parse_title_and_parameters(filename, title_and_params)\n", " except HotdocSourceException as _:\n", " warn('gtk-doc-bad-syntax',\n", " message=_.message,\n", " filename=filename,\n", " lineno=lineno + title_offset)\n", " return None\n", "\n", " params_offset = 0\n", " for param in parameters:\n", " param.filename = filename\n", " param.lineno = lineno\n", " param_offset = param.line_offset\n", " param.line_offset = title_offset + params_offset + 1\n", " params_offset += param_offset\n", " param.col_offset = column_offset\n", "\n", " if not block_name:\n", " return None\n", "\n", " description_offset = 0\n", " meta = {}\n", " tags = []\n", " if description is not None:\n", " n_lines = len(comment.split('\\n'))\n", " description_offset = (title_offset + n_lines -\n", " len(description.split('\\n')))\n", " meta['description'], tags = self.__parse_description_and_tags(description)\n", "\n", " actual_parameters = OrderedDict({})\n", " for param in parameters:\n", " if is_section:\n", " cleaned_up_name = param.name.lower().replace('_', '-')\n", " if cleaned_up_name in ['symbols', 'private-symbols', 'auto-sort', 'sources']:\n", " meta.update(self.__parse_yaml_comment(param, filename))\n", " if cleaned_up_name == 'sources':\n", " sources_paths = [os.path.abspath(os.path.join(os.path.dirname(filename), path)) for path in meta[cleaned_up_name]]\n", " meta[cleaned_up_name] = sources_paths\n", " else:\n", " meta[param.name] = param.description\n", " else:\n", " actual_parameters[param.name] = param\n", "\n", " annotations = {annotation.name: annotation for annotation in\n", " annotations}\n", " tags = {tag.name.lower(): tag for tag in tags}\n", "\n", " block = Comment(name=block_name, filename=filename, lineno=lineno,\n", " endlineno=endlineno,\n", " annotations=annotations, params=actual_parameters,\n", " tags=tags, raw_comment=raw_comment,\n", " meta=meta, toplevel=is_section)\n", " block.line_offset = description_offset\n", " block.col_offset = column_offset\n", "\n", " return block" ]
[ 0, 0.01694915254237288, 0.08333333333333333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0.010638297872340425, 0, 0, 0.007194244604316547, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05 ]
80
0.002378
def main(argv=None): """Send X10 commands when module is used from the command line. This uses syntax similar to sendCommands, for example: x10.py com2 A1 On, A2 Off, B All Off """ if len(argv): # join all the arguments together by spaces so that quotes # aren't required on the command line. commands = ' '.join(argv) # the comPort is everything leading up to the first space comPort, commands = commands.split(None, 1) sendCommands(comPort, commands) return 0
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "len", "(", "argv", ")", ":", "# join all the arguments together by spaces so that quotes", "# aren't required on the command line.", "commands", "=", "' '", ".", "join", "(", "argv", ")", "# the comPort is everything leading up to the first space", "comPort", ",", "commands", "=", "commands", ".", "split", "(", "None", ",", "1", ")", "sendCommands", "(", "comPort", ",", "commands", ")", "return", "0" ]
28.944444
0.001859
[ "def main(argv=None):\n", " \"\"\"Send X10 commands when module is used from the command line.\n", "\n", " This uses syntax similar to sendCommands, for example:\n", "\n", " x10.py com2 A1 On, A2 Off, B All Off\n", " \"\"\"\n", " if len(argv):\n", " # join all the arguments together by spaces so that quotes\n", " # aren't required on the command line.\n", " commands = ' '.join(argv)\n", "\n", " # the comPort is everything leading up to the first space\n", " comPort, commands = commands.split(None, 1)\n", "\n", " sendCommands(comPort, commands)\n", "\n", " return 0" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333 ]
18
0.00463
def lcsstr(self, src, tar): """Return the longest common substring of two strings. Longest common substring (LCSstr). Based on the code from https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring :cite:`Wikibooks:2018`. This is licensed Creative Commons: Attribution-ShareAlike 3.0. Modifications include: - conversion to a numpy array in place of a list of lists - conversion to Python 2/3-safe range from xrange via six Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- str The longest common substring Examples -------- >>> sstr = LCSstr() >>> sstr.lcsstr('cat', 'hat') 'at' >>> sstr.lcsstr('Niall', 'Neil') 'N' >>> sstr.lcsstr('aluminum', 'Catalan') 'al' >>> sstr.lcsstr('ATCG', 'TAGC') 'A' """ lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int) longest, i_longest = 0, 0 for i in range(1, len(src) + 1): for j in range(1, len(tar) + 1): if src[i - 1] == tar[j - 1]: lengths[i, j] = lengths[i - 1, j - 1] + 1 if lengths[i, j] > longest: longest = lengths[i, j] i_longest = i else: lengths[i, j] = 0 return src[i_longest - longest : i_longest]
[ "def", "lcsstr", "(", "self", ",", "src", ",", "tar", ")", ":", "lengths", "=", "np_zeros", "(", "(", "len", "(", "src", ")", "+", "1", ",", "len", "(", "tar", ")", "+", "1", ")", ",", "dtype", "=", "np_int", ")", "longest", ",", "i_longest", "=", "0", ",", "0", "for", "i", "in", "range", "(", "1", ",", "len", "(", "src", ")", "+", "1", ")", ":", "for", "j", "in", "range", "(", "1", ",", "len", "(", "tar", ")", "+", "1", ")", ":", "if", "src", "[", "i", "-", "1", "]", "==", "tar", "[", "j", "-", "1", "]", ":", "lengths", "[", "i", ",", "j", "]", "=", "lengths", "[", "i", "-", "1", ",", "j", "-", "1", "]", "+", "1", "if", "lengths", "[", "i", ",", "j", "]", ">", "longest", ":", "longest", "=", "lengths", "[", "i", ",", "j", "]", "i_longest", "=", "i", "else", ":", "lengths", "[", "i", ",", "j", "]", "=", "0", "return", "src", "[", "i_longest", "-", "longest", ":", "i_longest", "]" ]
30.115385
0.001855
[ "def lcsstr(self, src, tar):\n", " \"\"\"Return the longest common substring of two strings.\n", "\n", " Longest common substring (LCSstr).\n", "\n", " Based on the code from\n", " https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring\n", " :cite:`Wikibooks:2018`.\n", " This is licensed Creative Commons: Attribution-ShareAlike 3.0.\n", "\n", " Modifications include:\n", "\n", " - conversion to a numpy array in place of a list of lists\n", " - conversion to Python 2/3-safe range from xrange via six\n", "\n", " Parameters\n", " ----------\n", " src : str\n", " Source string for comparison\n", " tar : str\n", " Target string for comparison\n", "\n", " Returns\n", " -------\n", " str\n", " The longest common substring\n", "\n", " Examples\n", " --------\n", " >>> sstr = LCSstr()\n", " >>> sstr.lcsstr('cat', 'hat')\n", " 'at'\n", " >>> sstr.lcsstr('Niall', 'Neil')\n", " 'N'\n", " >>> sstr.lcsstr('aluminum', 'Catalan')\n", " 'al'\n", " >>> sstr.lcsstr('ATCG', 'TAGC')\n", " 'A'\n", "\n", " \"\"\"\n", " lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)\n", " longest, i_longest = 0, 0\n", " for i in range(1, len(src) + 1):\n", " for j in range(1, len(tar) + 1):\n", " if src[i - 1] == tar[j - 1]:\n", " lengths[i, j] = lengths[i - 1, j - 1] + 1\n", " if lengths[i, j] > longest:\n", " longest = lengths[i, j]\n", " i_longest = i\n", " else:\n", " lengths[i, j] = 0\n", " return src[i_longest - longest : i_longest]" ]
[ 0, 0.015873015873015872, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0392156862745098 ]
52
0.001059
def as_vartype(vartype): """Cast various inputs to a valid vartype object. Args: vartype (:class:`.Vartype`/str/set): Variable type. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` Returns: :class:`.Vartype`: Either :class:`.Vartype.SPIN` or :class:`.Vartype.BINARY`. See also: :func:`~dimod.decorators.vartype_argument` """ if isinstance(vartype, Vartype): return vartype try: if isinstance(vartype, str): vartype = Vartype[vartype] elif isinstance(vartype, frozenset): vartype = Vartype(vartype) else: vartype = Vartype(frozenset(vartype)) except (ValueError, KeyError): raise TypeError(("expected input vartype to be one of: " "Vartype.SPIN, 'SPIN', {-1, 1}, " "Vartype.BINARY, 'BINARY', or {0, 1}.")) return vartype
[ "def", "as_vartype", "(", "vartype", ")", ":", "if", "isinstance", "(", "vartype", ",", "Vartype", ")", ":", "return", "vartype", "try", ":", "if", "isinstance", "(", "vartype", ",", "str", ")", ":", "vartype", "=", "Vartype", "[", "vartype", "]", "elif", "isinstance", "(", "vartype", ",", "frozenset", ")", ":", "vartype", "=", "Vartype", "(", "vartype", ")", "else", ":", "vartype", "=", "Vartype", "(", "frozenset", "(", "vartype", ")", ")", "except", "(", "ValueError", ",", "KeyError", ")", ":", "raise", "TypeError", "(", "(", "\"expected input vartype to be one of: \"", "\"Vartype.SPIN, 'SPIN', {-1, 1}, \"", "\"Vartype.BINARY, 'BINARY', or {0, 1}.\"", ")", ")", "return", "vartype" ]
28.6
0.000966
[ "def as_vartype(vartype):\n", " \"\"\"Cast various inputs to a valid vartype object.\n", "\n", " Args:\n", " vartype (:class:`.Vartype`/str/set):\n", " Variable type. Accepted input values:\n", "\n", " * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``\n", " * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``\n", "\n", " Returns:\n", " :class:`.Vartype`: Either :class:`.Vartype.SPIN` or\n", " :class:`.Vartype.BINARY`.\n", "\n", " See also:\n", " :func:`~dimod.decorators.vartype_argument`\n", "\n", " \"\"\"\n", " if isinstance(vartype, Vartype):\n", " return vartype\n", "\n", " try:\n", " if isinstance(vartype, str):\n", " vartype = Vartype[vartype]\n", " elif isinstance(vartype, frozenset):\n", " vartype = Vartype(vartype)\n", " else:\n", " vartype = Vartype(frozenset(vartype))\n", "\n", " except (ValueError, KeyError):\n", " raise TypeError((\"expected input vartype to be one of: \"\n", " \"Vartype.SPIN, 'SPIN', {-1, 1}, \"\n", " \"Vartype.BINARY, 'BINARY', or {0, 1}.\"))\n", "\n", " return vartype" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05555555555555555 ]
35
0.001587
def convert_elementwise_add( params, w_name, scope_name, inputs, layers, weights, names ): """ Convert elementwise addition. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting elementwise_add ...') if 'broadcast' in params: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.add(x[0], x[1]) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]]) else: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) add = keras.layers.Add(name=tf_name) layers[scope_name] = add([model0, model1])
[ "def", "convert_elementwise_add", "(", "params", ",", "w_name", ",", "scope_name", ",", "inputs", ",", "layers", ",", "weights", ",", "names", ")", ":", "print", "(", "'Converting elementwise_add ...'", ")", "if", "'broadcast'", "in", "params", ":", "model0", "=", "layers", "[", "inputs", "[", "0", "]", "]", "model1", "=", "layers", "[", "inputs", "[", "1", "]", "]", "if", "names", "==", "'short'", ":", "tf_name", "=", "'A'", "+", "random_string", "(", "7", ")", "elif", "names", "==", "'keep'", ":", "tf_name", "=", "w_name", "else", ":", "tf_name", "=", "w_name", "+", "str", "(", "random", ".", "random", "(", ")", ")", "def", "target_layer", "(", "x", ")", ":", "layer", "=", "tf", ".", "add", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "return", "layer", "lambda_layer", "=", "keras", ".", "layers", ".", "Lambda", "(", "target_layer", ",", "name", "=", "tf_name", ")", "layers", "[", "scope_name", "]", "=", "lambda_layer", "(", "[", "layers", "[", "inputs", "[", "0", "]", "]", ",", "layers", "[", "inputs", "[", "1", "]", "]", "]", ")", "else", ":", "model0", "=", "layers", "[", "inputs", "[", "0", "]", "]", "model1", "=", "layers", "[", "inputs", "[", "1", "]", "]", "if", "names", "==", "'short'", ":", "tf_name", "=", "'A'", "+", "random_string", "(", "7", ")", "elif", "names", "==", "'keep'", ":", "tf_name", "=", "w_name", "else", ":", "tf_name", "=", "w_name", "+", "str", "(", "random", ".", "random", "(", ")", ")", "add", "=", "keras", ".", "layers", ".", "Add", "(", "name", "=", "tf_name", ")", "layers", "[", "scope_name", "]", "=", "add", "(", "[", "model0", ",", "model1", "]", ")" ]
29.891304
0.001408
[ "def convert_elementwise_add(\n", " params, w_name, scope_name, inputs, layers, weights, names\n", "):\n", " \"\"\"\n", " Convert elementwise addition.\n", "\n", " Args:\n", " params: dictionary with layer parameters\n", " w_name: name prefix in state_dict\n", " scope_name: pytorch scope name\n", " inputs: pytorch node inputs\n", " layers: dictionary with keras tensors\n", " weights: pytorch state_dict\n", " names: use short names for keras layers\n", " \"\"\"\n", " print('Converting elementwise_add ...')\n", " if 'broadcast' in params:\n", " model0 = layers[inputs[0]]\n", " model1 = layers[inputs[1]]\n", "\n", " if names == 'short':\n", " tf_name = 'A' + random_string(7)\n", " elif names == 'keep':\n", " tf_name = w_name\n", " else:\n", " tf_name = w_name + str(random.random())\n", "\n", " def target_layer(x):\n", " layer = tf.add(x[0], x[1])\n", " return layer\n", "\n", " lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n", " layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])\n", " else:\n", " model0 = layers[inputs[0]]\n", " model1 = layers[inputs[1]]\n", "\n", " if names == 'short':\n", " tf_name = 'A' + random_string(7)\n", " elif names == 'keep':\n", " tf_name = w_name\n", " else:\n", " tf_name = w_name + str(random.random())\n", "\n", " add = keras.layers.Add(name=tf_name)\n", " layers[scope_name] = add([model0, model1])" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.012195121951219513, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02 ]
46
0.0007
def pretty_repr(instance): """ A function assignable to the ``__repr__`` dunder method, so that the ``prettyprinter`` definition for the type is used to provide repr output. Usage: .. code:: python from prettyprinter import pretty_repr class MyClass: __repr__ = pretty_repr """ instance_type = type(instance) if not is_registered( instance_type, check_superclasses=True, check_deferred=True, register_deferred=True ): warnings.warn( "pretty_repr is assigned as the __repr__ method of " "'{}'. However, no pretty printer is registered for that type, " "its superclasses or its subclasses. Falling back to the default " "repr implementation. To fix this warning, register a pretty " "printer using prettyprinter.register_pretty.".format( instance_type.__qualname__ ), UserWarning ) return object.__repr__(instance) return pformat(instance)
[ "def", "pretty_repr", "(", "instance", ")", ":", "instance_type", "=", "type", "(", "instance", ")", "if", "not", "is_registered", "(", "instance_type", ",", "check_superclasses", "=", "True", ",", "check_deferred", "=", "True", ",", "register_deferred", "=", "True", ")", ":", "warnings", ".", "warn", "(", "\"pretty_repr is assigned as the __repr__ method of \"", "\"'{}'. However, no pretty printer is registered for that type, \"", "\"its superclasses or its subclasses. Falling back to the default \"", "\"repr implementation. To fix this warning, register a pretty \"", "\"printer using prettyprinter.register_pretty.\"", ".", "format", "(", "instance_type", ".", "__qualname__", ")", ",", "UserWarning", ")", "return", "object", ".", "__repr__", "(", "instance", ")", "return", "pformat", "(", "instance", ")" ]
29.514286
0.000937
[ "def pretty_repr(instance):\n", " \"\"\"\n", " A function assignable to the ``__repr__`` dunder method, so that\n", " the ``prettyprinter`` definition for the type is used to provide\n", " repr output. Usage:\n", "\n", " .. code:: python\n", "\n", " from prettyprinter import pretty_repr\n", "\n", " class MyClass:\n", " __repr__ = pretty_repr\n", "\n", " \"\"\"\n", "\n", " instance_type = type(instance)\n", " if not is_registered(\n", " instance_type,\n", " check_superclasses=True,\n", " check_deferred=True,\n", " register_deferred=True\n", " ):\n", " warnings.warn(\n", " \"pretty_repr is assigned as the __repr__ method of \"\n", " \"'{}'. However, no pretty printer is registered for that type, \"\n", " \"its superclasses or its subclasses. Falling back to the default \"\n", " \"repr implementation. To fix this warning, register a pretty \"\n", " \"printer using prettyprinter.register_pretty.\".format(\n", " instance_type.__qualname__\n", " ),\n", " UserWarning\n", " )\n", " return object.__repr__(instance)\n", "\n", " return pformat(instance)" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.03571428571428571 ]
35
0.00102
def aseg_on_mri(mri_spec, aseg_spec, alpha_mri=1.0, alpha_seg=1.0, num_rows=2, num_cols=6, rescale_method='global', aseg_cmap='freesurfer', sub_cortical=False, annot=None, padding=5, bkground_thresh=0.05, output_path=None, figsize=None, **kwargs): "Produces a collage of various slices from different orientations in the given 3D image" num_rows, num_cols, padding = check_params(num_rows, num_cols, padding) mri = read_image(mri_spec, bkground_thresh=bkground_thresh) seg = read_image(aseg_spec, bkground_thresh=0) mri, seg = crop_to_seg_extents(mri, seg, padding) num_slices_per_view = num_rows * num_cols slices = pick_slices(seg, num_slices_per_view) plt.style.use('dark_background') num_axes = 3 if figsize is None: figsize = [5 * num_axes * num_rows, 5 * num_cols] fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize) # displaying some annotation text if provided if annot is not None: fig.suptitle(annot, backgroundcolor='black', color='g') display_params_mri = dict(interpolation='none', aspect='equal', origin='lower', cmap='gray', alpha=alpha_mri, vmin=mri.min(), vmax=mri.max()) display_params_seg = dict(interpolation='none', aspect='equal', origin='lower', alpha=alpha_seg) normalize_labels = colors.Normalize(vmin=seg.min(), vmax=seg.max(), clip=True) fs_cmap = get_freesurfer_cmap(sub_cortical) label_mapper = cm.ScalarMappable(norm=normalize_labels, cmap=fs_cmap) ax = ax.flatten() ax_counter = 0 for dim_index in range(3): for slice_num in slices[dim_index]: plt.sca(ax[ax_counter]) ax_counter = ax_counter + 1 slice_mri = get_axis(mri, dim_index, slice_num) slice_seg = get_axis(seg, dim_index, slice_num) # # masking data to set no-value pixels to transparent # seg_background = np.isclose(slice_seg, 0.0) # slice_seg = np.ma.masked_where(seg_background, slice_seg) # slice_mri = np.ma.masked_where(np.logical_not(seg_background), slice_mri) seg_rgb = label_mapper.to_rgba(slice_seg) plt.imshow(seg_rgb, **display_params_seg) plt.imshow(slice_mri, **display_params_mri) plt.axis('off') # plt.subplots_adjust(wspace=0.0, hspace=0.0) plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99, wspace=0.05, hspace=0.02) # fig.tight_layout() if output_path is not None: output_path = output_path.replace(' ', '_') fig.savefig(output_path + '.png', bbox_inches='tight') # plt.close() return fig
[ "def", "aseg_on_mri", "(", "mri_spec", ",", "aseg_spec", ",", "alpha_mri", "=", "1.0", ",", "alpha_seg", "=", "1.0", ",", "num_rows", "=", "2", ",", "num_cols", "=", "6", ",", "rescale_method", "=", "'global'", ",", "aseg_cmap", "=", "'freesurfer'", ",", "sub_cortical", "=", "False", ",", "annot", "=", "None", ",", "padding", "=", "5", ",", "bkground_thresh", "=", "0.05", ",", "output_path", "=", "None", ",", "figsize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "num_rows", ",", "num_cols", ",", "padding", "=", "check_params", "(", "num_rows", ",", "num_cols", ",", "padding", ")", "mri", "=", "read_image", "(", "mri_spec", ",", "bkground_thresh", "=", "bkground_thresh", ")", "seg", "=", "read_image", "(", "aseg_spec", ",", "bkground_thresh", "=", "0", ")", "mri", ",", "seg", "=", "crop_to_seg_extents", "(", "mri", ",", "seg", ",", "padding", ")", "num_slices_per_view", "=", "num_rows", "*", "num_cols", "slices", "=", "pick_slices", "(", "seg", ",", "num_slices_per_view", ")", "plt", ".", "style", ".", "use", "(", "'dark_background'", ")", "num_axes", "=", "3", "if", "figsize", "is", "None", ":", "figsize", "=", "[", "5", "*", "num_axes", "*", "num_rows", ",", "5", "*", "num_cols", "]", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "num_axes", "*", "num_rows", ",", "num_cols", ",", "figsize", "=", "figsize", ")", "# displaying some annotation text if provided", "if", "annot", "is", "not", "None", ":", "fig", ".", "suptitle", "(", "annot", ",", "backgroundcolor", "=", "'black'", ",", "color", "=", "'g'", ")", "display_params_mri", "=", "dict", "(", "interpolation", "=", "'none'", ",", "aspect", "=", "'equal'", ",", "origin", "=", "'lower'", ",", "cmap", "=", "'gray'", ",", "alpha", "=", "alpha_mri", ",", "vmin", "=", "mri", ".", "min", "(", ")", ",", "vmax", "=", "mri", ".", "max", "(", ")", ")", "display_params_seg", "=", "dict", "(", "interpolation", "=", "'none'", ",", "aspect", "=", "'equal'", ",", "origin", "=", "'lower'", ",", "alpha", "=", "alpha_seg", ")", "normalize_labels", "=", "colors", ".", "Normalize", "(", "vmin", "=", "seg", ".", "min", "(", ")", ",", "vmax", "=", "seg", ".", "max", "(", ")", ",", "clip", "=", "True", ")", "fs_cmap", "=", "get_freesurfer_cmap", "(", "sub_cortical", ")", "label_mapper", "=", "cm", ".", "ScalarMappable", "(", "norm", "=", "normalize_labels", ",", "cmap", "=", "fs_cmap", ")", "ax", "=", "ax", ".", "flatten", "(", ")", "ax_counter", "=", "0", "for", "dim_index", "in", "range", "(", "3", ")", ":", "for", "slice_num", "in", "slices", "[", "dim_index", "]", ":", "plt", ".", "sca", "(", "ax", "[", "ax_counter", "]", ")", "ax_counter", "=", "ax_counter", "+", "1", "slice_mri", "=", "get_axis", "(", "mri", ",", "dim_index", ",", "slice_num", ")", "slice_seg", "=", "get_axis", "(", "seg", ",", "dim_index", ",", "slice_num", ")", "# # masking data to set no-value pixels to transparent", "# seg_background = np.isclose(slice_seg, 0.0)", "# slice_seg = np.ma.masked_where(seg_background, slice_seg)", "# slice_mri = np.ma.masked_where(np.logical_not(seg_background), slice_mri)", "seg_rgb", "=", "label_mapper", ".", "to_rgba", "(", "slice_seg", ")", "plt", ".", "imshow", "(", "seg_rgb", ",", "*", "*", "display_params_seg", ")", "plt", ".", "imshow", "(", "slice_mri", ",", "*", "*", "display_params_mri", ")", "plt", ".", "axis", "(", "'off'", ")", "# plt.subplots_adjust(wspace=0.0, hspace=0.0)", "plt", ".", "subplots_adjust", "(", "left", "=", "0.01", ",", "right", "=", "0.99", ",", "bottom", "=", "0.01", ",", "top", "=", "0.99", ",", "wspace", "=", "0.05", ",", "hspace", "=", "0.02", ")", "# fig.tight_layout()", "if", "output_path", "is", "not", "None", ":", "output_path", "=", "output_path", ".", "replace", "(", "' '", ",", "'_'", ")", "fig", ".", "savefig", "(", "output_path", "+", "'.png'", ",", "bbox_inches", "=", "'tight'", ")", "# plt.close()", "return", "fig" ]
36.55
0.001998
[ "def aseg_on_mri(mri_spec,\n", " aseg_spec,\n", " alpha_mri=1.0,\n", " alpha_seg=1.0,\n", " num_rows=2,\n", " num_cols=6,\n", " rescale_method='global',\n", " aseg_cmap='freesurfer',\n", " sub_cortical=False,\n", " annot=None,\n", " padding=5,\n", " bkground_thresh=0.05,\n", " output_path=None,\n", " figsize=None,\n", " **kwargs):\n", " \"Produces a collage of various slices from different orientations in the given 3D image\"\n", "\n", " num_rows, num_cols, padding = check_params(num_rows, num_cols, padding)\n", "\n", " mri = read_image(mri_spec, bkground_thresh=bkground_thresh)\n", " seg = read_image(aseg_spec, bkground_thresh=0)\n", " mri, seg = crop_to_seg_extents(mri, seg, padding)\n", "\n", " num_slices_per_view = num_rows * num_cols\n", " slices = pick_slices(seg, num_slices_per_view)\n", "\n", " plt.style.use('dark_background')\n", "\n", " num_axes = 3\n", " if figsize is None:\n", " figsize = [5 * num_axes * num_rows, 5 * num_cols]\n", " fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize)\n", "\n", " # displaying some annotation text if provided\n", " if annot is not None:\n", " fig.suptitle(annot, backgroundcolor='black', color='g')\n", "\n", " display_params_mri = dict(interpolation='none', aspect='equal', origin='lower',\n", " cmap='gray', alpha=alpha_mri,\n", " vmin=mri.min(), vmax=mri.max())\n", " display_params_seg = dict(interpolation='none', aspect='equal', origin='lower',\n", " alpha=alpha_seg)\n", "\n", " normalize_labels = colors.Normalize(vmin=seg.min(), vmax=seg.max(), clip=True)\n", " fs_cmap = get_freesurfer_cmap(sub_cortical)\n", " label_mapper = cm.ScalarMappable(norm=normalize_labels, cmap=fs_cmap)\n", "\n", " ax = ax.flatten()\n", " ax_counter = 0\n", " for dim_index in range(3):\n", " for slice_num in slices[dim_index]:\n", " plt.sca(ax[ax_counter])\n", " ax_counter = ax_counter + 1\n", "\n", " slice_mri = get_axis(mri, dim_index, slice_num)\n", " slice_seg = get_axis(seg, dim_index, slice_num)\n", "\n", " # # masking data to set no-value pixels to transparent\n", " # seg_background = np.isclose(slice_seg, 0.0)\n", " # slice_seg = np.ma.masked_where(seg_background, slice_seg)\n", " # slice_mri = np.ma.masked_where(np.logical_not(seg_background), slice_mri)\n", "\n", " seg_rgb = label_mapper.to_rgba(slice_seg)\n", " plt.imshow(seg_rgb, **display_params_seg)\n", " plt.imshow(slice_mri, **display_params_mri)\n", " plt.axis('off')\n", "\n", " # plt.subplots_adjust(wspace=0.0, hspace=0.0)\n", " plt.subplots_adjust(left=0.01, right=0.99,\n", " bottom=0.01, top=0.99,\n", " wspace=0.05, hspace=0.02)\n", " # fig.tight_layout()\n", "\n", " if output_path is not None:\n", " output_path = output_path.replace(' ', '_')\n", " fig.savefig(output_path + '.png', bbox_inches='tight')\n", "\n", " # plt.close()\n", "\n", " return fig" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.010752688172043012, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011904761904761904, 0, 0, 0.011904761904761904, 0, 0, 0.012048192771084338, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011363636363636364, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.07142857142857142 ]
80
0.001618
def get_expiration_seconds_v2(expiration): """Convert 'expiration' to a number of seconds in the future. :type expiration: Union[Integer, datetime.datetime, datetime.timedelta] :param expiration: Point in time when the signed URL should expire. :raises: :exc:`TypeError` when expiration is not a valid type. :rtype: int :returns: a timestamp as an absolute number of seconds since epoch. """ # If it's a timedelta, add it to `now` in UTC. if isinstance(expiration, datetime.timedelta): now = NOW().replace(tzinfo=_helpers.UTC) expiration = now + expiration # If it's a datetime, convert to a timestamp. if isinstance(expiration, datetime.datetime): micros = _helpers._microseconds_from_datetime(expiration) expiration = micros // 10 ** 6 if not isinstance(expiration, six.integer_types): raise TypeError( "Expected an integer timestamp, datetime, or " "timedelta. Got %s" % type(expiration) ) return expiration
[ "def", "get_expiration_seconds_v2", "(", "expiration", ")", ":", "# If it's a timedelta, add it to `now` in UTC.", "if", "isinstance", "(", "expiration", ",", "datetime", ".", "timedelta", ")", ":", "now", "=", "NOW", "(", ")", ".", "replace", "(", "tzinfo", "=", "_helpers", ".", "UTC", ")", "expiration", "=", "now", "+", "expiration", "# If it's a datetime, convert to a timestamp.", "if", "isinstance", "(", "expiration", ",", "datetime", ".", "datetime", ")", ":", "micros", "=", "_helpers", ".", "_microseconds_from_datetime", "(", "expiration", ")", "expiration", "=", "micros", "//", "10", "**", "6", "if", "not", "isinstance", "(", "expiration", ",", "six", ".", "integer_types", ")", ":", "raise", "TypeError", "(", "\"Expected an integer timestamp, datetime, or \"", "\"timedelta. Got %s\"", "%", "type", "(", "expiration", ")", ")", "return", "expiration" ]
37.518519
0.000962
[ "def get_expiration_seconds_v2(expiration):\n", " \"\"\"Convert 'expiration' to a number of seconds in the future.\n", "\n", " :type expiration: Union[Integer, datetime.datetime, datetime.timedelta]\n", " :param expiration: Point in time when the signed URL should expire.\n", "\n", " :raises: :exc:`TypeError` when expiration is not a valid type.\n", "\n", " :rtype: int\n", " :returns: a timestamp as an absolute number of seconds since epoch.\n", " \"\"\"\n", " # If it's a timedelta, add it to `now` in UTC.\n", " if isinstance(expiration, datetime.timedelta):\n", " now = NOW().replace(tzinfo=_helpers.UTC)\n", " expiration = now + expiration\n", "\n", " # If it's a datetime, convert to a timestamp.\n", " if isinstance(expiration, datetime.datetime):\n", " micros = _helpers._microseconds_from_datetime(expiration)\n", " expiration = micros // 10 ** 6\n", "\n", " if not isinstance(expiration, six.integer_types):\n", " raise TypeError(\n", " \"Expected an integer timestamp, datetime, or \"\n", " \"timedelta. Got %s\" % type(expiration)\n", " )\n", " return expiration" ]
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.047619047619047616 ]
27
0.001764
def buildhtml(self): """Build the HTML page Create the htmlheader with css / js Create html page """ self.buildcontent() self.buildhtmlheader() self.content = self._htmlcontent.decode('utf-8') # need to ensure unicode self._htmlcontent = self.template_page_highcharts.render(chart=self) return self._htmlcontent
[ "def", "buildhtml", "(", "self", ")", ":", "self", ".", "buildcontent", "(", ")", "self", ".", "buildhtmlheader", "(", ")", "self", ".", "content", "=", "self", ".", "_htmlcontent", ".", "decode", "(", "'utf-8'", ")", "# need to ensure unicode", "self", ".", "_htmlcontent", "=", "self", ".", "template_page_highcharts", ".", "render", "(", "chart", "=", "self", ")", "return", "self", ".", "_htmlcontent" ]
37.4
0.010444
[ "def buildhtml(self):\n", " \"\"\"Build the HTML page\n", " Create the htmlheader with css / js\n", " Create html page\n", " \"\"\"\n", " self.buildcontent()\n", " self.buildhtmlheader()\n", " self.content = self._htmlcontent.decode('utf-8') # need to ensure unicode\n", " self._htmlcontent = self.template_page_highcharts.render(chart=self)\n", " return self._htmlcontent" ]
[ 0, 0.03225806451612903, 0, 0, 0, 0, 0, 0.024390243902439025, 0, 0.03125 ]
10
0.00879
def eval(w,t,x,msk,s): """ Pythia server-side computation of intermediate PRF output. @w: ensemble key selector (any string, e.g. webserver ID) @t: tweak (any string, e.g. user ID) @x: message (any string) @msk: Pythia server's master secret key @s: state value from Pythia server's key table @returns: (y, kw, dummy=None) where: y: intermediate result kw: secret key bound to w (needed for proof) beta: H(kw,t,x) (needed for proof) """ # Verify types assertType(w, (str, int, long)) assertType(t, (str, int, long)) assertType(x, (str, int, long)) # Construct the key kw = genKw(w,msk,s) # Compute y beta = hashG1(t, x) y = beta*kw return y,kw,beta
[ "def", "eval", "(", "w", ",", "t", ",", "x", ",", "msk", ",", "s", ")", ":", "# Verify types", "assertType", "(", "w", ",", "(", "str", ",", "int", ",", "long", ")", ")", "assertType", "(", "t", ",", "(", "str", ",", "int", ",", "long", ")", ")", "assertType", "(", "x", ",", "(", "str", ",", "int", ",", "long", ")", ")", "# Construct the key", "kw", "=", "genKw", "(", "w", ",", "msk", ",", "s", ")", "# Compute y", "beta", "=", "hashG1", "(", "t", ",", "x", ")", "y", "=", "beta", "*", "kw", "return", "y", ",", "kw", ",", "beta" ]
29.24
0.011921
[ "def eval(w,t,x,msk,s):\n", " \"\"\"\n", " Pythia server-side computation of intermediate PRF output.\n", " @w: ensemble key selector (any string, e.g. webserver ID)\n", " @t: tweak (any string, e.g. user ID)\n", " @x: message (any string)\n", " @msk: Pythia server's master secret key\n", " @s: state value from Pythia server's key table\n", " @returns: (y, kw, dummy=None)\n", " where: y: intermediate result\n", " kw: secret key bound to w (needed for proof)\n", " beta: H(kw,t,x) (needed for proof)\n", " \"\"\"\n", " # Verify types\n", " assertType(w, (str, int, long))\n", " assertType(t, (str, int, long))\n", " assertType(x, (str, int, long))\n", "\n", " # Construct the key\n", " kw = genKw(w,msk,s)\n", "\n", " # Compute y\n", " beta = hashG1(t, x)\n", " y = beta*kw\n", " return y,kw,beta" ]
[ 0.17391304347826086, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08333333333333333, 0, 0, 0, 0, 0.15 ]
25
0.01629
def create_from_dict(cls, src_dict, roi_skydir=None, rescale=False): """Create a source object from a python dictionary. Parameters ---------- src_dict : dict Dictionary defining the properties of the source. """ src_dict = copy.deepcopy(src_dict) src_dict.setdefault('SpatialModel', 'PointSource') src_dict.setdefault('Spectrum_Filename', None) src_dict.setdefault('SpectrumType', 'PowerLaw') src_dict['SpatialType'] = get_spatial_type(src_dict['SpatialModel']) spectrum_type = src_dict['SpectrumType'] spatial_type = src_dict['SpatialType'] spectral_pars = src_dict.pop('spectral_pars', {}) spatial_pars = src_dict.pop('spatial_pars', {}) if not spectral_pars: spectral_pars = extract_pars_from_dict(spectrum_type, src_dict) norm_par_name = get_function_norm_par_name(spectrum_type) if norm_par_name is not None: spectral_pars[norm_par_name].setdefault('free', True) if not spatial_pars: spatial_pars = extract_pars_from_dict(spatial_type, src_dict) for k in ['RA', 'DEC', 'Prefactor']: if k in spatial_pars: del spatial_pars[k] spectral_pars = create_pars_from_dict(spectrum_type, spectral_pars, rescale) spatial_pars = create_pars_from_dict(spatial_type, spatial_pars, False) if 'file' in src_dict: src_dict['Spectrum_Filename'] = src_dict.pop('file') if spectrum_type == 'DMFitFunction' and src_dict['Spectrum_Filename'] is None: src_dict['Spectrum_Filename'] = os.path.join('$FERMIPY_DATA_DIR', 'gammamc_dif.dat') src_dict['spectral_pars'] = cast_pars_dict(spectral_pars) src_dict['spatial_pars'] = cast_pars_dict(spatial_pars) if 'name' in src_dict: name = src_dict['name'] src_dict['Source_Name'] = src_dict.pop('name') elif 'Source_Name' in src_dict: name = src_dict['Source_Name'] else: raise Exception('Source name undefined.') skydir = wcs_utils.get_target_skydir(src_dict, roi_skydir) src_dict['RAJ2000'] = skydir.ra.deg src_dict['DEJ2000'] = skydir.dec.deg radec = np.array([skydir.ra.deg, skydir.dec.deg]) return cls(name, src_dict, radec=radec)
[ "def", "create_from_dict", "(", "cls", ",", "src_dict", ",", "roi_skydir", "=", "None", ",", "rescale", "=", "False", ")", ":", "src_dict", "=", "copy", ".", "deepcopy", "(", "src_dict", ")", "src_dict", ".", "setdefault", "(", "'SpatialModel'", ",", "'PointSource'", ")", "src_dict", ".", "setdefault", "(", "'Spectrum_Filename'", ",", "None", ")", "src_dict", ".", "setdefault", "(", "'SpectrumType'", ",", "'PowerLaw'", ")", "src_dict", "[", "'SpatialType'", "]", "=", "get_spatial_type", "(", "src_dict", "[", "'SpatialModel'", "]", ")", "spectrum_type", "=", "src_dict", "[", "'SpectrumType'", "]", "spatial_type", "=", "src_dict", "[", "'SpatialType'", "]", "spectral_pars", "=", "src_dict", ".", "pop", "(", "'spectral_pars'", ",", "{", "}", ")", "spatial_pars", "=", "src_dict", ".", "pop", "(", "'spatial_pars'", ",", "{", "}", ")", "if", "not", "spectral_pars", ":", "spectral_pars", "=", "extract_pars_from_dict", "(", "spectrum_type", ",", "src_dict", ")", "norm_par_name", "=", "get_function_norm_par_name", "(", "spectrum_type", ")", "if", "norm_par_name", "is", "not", "None", ":", "spectral_pars", "[", "norm_par_name", "]", ".", "setdefault", "(", "'free'", ",", "True", ")", "if", "not", "spatial_pars", ":", "spatial_pars", "=", "extract_pars_from_dict", "(", "spatial_type", ",", "src_dict", ")", "for", "k", "in", "[", "'RA'", ",", "'DEC'", ",", "'Prefactor'", "]", ":", "if", "k", "in", "spatial_pars", ":", "del", "spatial_pars", "[", "k", "]", "spectral_pars", "=", "create_pars_from_dict", "(", "spectrum_type", ",", "spectral_pars", ",", "rescale", ")", "spatial_pars", "=", "create_pars_from_dict", "(", "spatial_type", ",", "spatial_pars", ",", "False", ")", "if", "'file'", "in", "src_dict", ":", "src_dict", "[", "'Spectrum_Filename'", "]", "=", "src_dict", ".", "pop", "(", "'file'", ")", "if", "spectrum_type", "==", "'DMFitFunction'", "and", "src_dict", "[", "'Spectrum_Filename'", "]", "is", "None", ":", "src_dict", "[", "'Spectrum_Filename'", "]", "=", "os", ".", "path", ".", "join", "(", "'$FERMIPY_DATA_DIR'", ",", "'gammamc_dif.dat'", ")", "src_dict", "[", "'spectral_pars'", "]", "=", "cast_pars_dict", "(", "spectral_pars", ")", "src_dict", "[", "'spatial_pars'", "]", "=", "cast_pars_dict", "(", "spatial_pars", ")", "if", "'name'", "in", "src_dict", ":", "name", "=", "src_dict", "[", "'name'", "]", "src_dict", "[", "'Source_Name'", "]", "=", "src_dict", ".", "pop", "(", "'name'", ")", "elif", "'Source_Name'", "in", "src_dict", ":", "name", "=", "src_dict", "[", "'Source_Name'", "]", "else", ":", "raise", "Exception", "(", "'Source name undefined.'", ")", "skydir", "=", "wcs_utils", ".", "get_target_skydir", "(", "src_dict", ",", "roi_skydir", ")", "src_dict", "[", "'RAJ2000'", "]", "=", "skydir", ".", "ra", ".", "deg", "src_dict", "[", "'DEJ2000'", "]", "=", "skydir", ".", "dec", ".", "deg", "radec", "=", "np", ".", "array", "(", "[", "skydir", ".", "ra", ".", "deg", ",", "skydir", ".", "dec", ".", "deg", "]", ")", "return", "cls", "(", "name", ",", "src_dict", ",", "radec", "=", "radec", ")" ]
38.984375
0.001173
[ "def create_from_dict(cls, src_dict, roi_skydir=None, rescale=False):\n", " \"\"\"Create a source object from a python dictionary.\n", "\n", " Parameters\n", " ----------\n", " src_dict : dict\n", " Dictionary defining the properties of the source.\n", "\n", " \"\"\"\n", " src_dict = copy.deepcopy(src_dict)\n", " src_dict.setdefault('SpatialModel', 'PointSource')\n", " src_dict.setdefault('Spectrum_Filename', None)\n", " src_dict.setdefault('SpectrumType', 'PowerLaw')\n", " src_dict['SpatialType'] = get_spatial_type(src_dict['SpatialModel'])\n", "\n", " spectrum_type = src_dict['SpectrumType']\n", " spatial_type = src_dict['SpatialType']\n", "\n", " spectral_pars = src_dict.pop('spectral_pars', {})\n", " spatial_pars = src_dict.pop('spatial_pars', {})\n", "\n", " if not spectral_pars:\n", " spectral_pars = extract_pars_from_dict(spectrum_type, src_dict)\n", " norm_par_name = get_function_norm_par_name(spectrum_type)\n", " if norm_par_name is not None:\n", " spectral_pars[norm_par_name].setdefault('free', True)\n", "\n", " if not spatial_pars:\n", " spatial_pars = extract_pars_from_dict(spatial_type, src_dict)\n", " for k in ['RA', 'DEC', 'Prefactor']:\n", " if k in spatial_pars:\n", " del spatial_pars[k]\n", "\n", " spectral_pars = create_pars_from_dict(spectrum_type, spectral_pars,\n", " rescale)\n", " spatial_pars = create_pars_from_dict(spatial_type, spatial_pars,\n", " False)\n", "\n", " if 'file' in src_dict:\n", " src_dict['Spectrum_Filename'] = src_dict.pop('file')\n", "\n", " if spectrum_type == 'DMFitFunction' and src_dict['Spectrum_Filename'] is None:\n", " src_dict['Spectrum_Filename'] = os.path.join('$FERMIPY_DATA_DIR',\n", " 'gammamc_dif.dat')\n", "\n", " src_dict['spectral_pars'] = cast_pars_dict(spectral_pars)\n", " src_dict['spatial_pars'] = cast_pars_dict(spatial_pars)\n", "\n", " if 'name' in src_dict:\n", " name = src_dict['name']\n", " src_dict['Source_Name'] = src_dict.pop('name')\n", " elif 'Source_Name' in src_dict:\n", " name = src_dict['Source_Name']\n", " else:\n", " raise Exception('Source name undefined.')\n", "\n", " skydir = wcs_utils.get_target_skydir(src_dict, roi_skydir)\n", "\n", " src_dict['RAJ2000'] = skydir.ra.deg\n", " src_dict['DEJ2000'] = skydir.dec.deg\n", "\n", " radec = np.array([skydir.ra.deg, skydir.dec.deg])\n", "\n", " return cls(name, src_dict, radec=radec)" ]
[ 0, 0.016666666666666666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.011494252873563218, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02127659574468085 ]
64
0.000772