content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_nblocks_ntraces(f,nblocks,ntraces,pts,nbheaders,dt,read_blockhead): """ Read n blocks from a Varian binary file which may have multiple traces per block. Parameters: * f File object of Varian binary file to read from. * nblocks Number of blocks to read. * ntraces Number of traces per block. * pts Number of points per trace. * nbheaders Number of block headers in each block. * dt Data type of data in binary file (real). * read_blockhead Set to True to read the varian blockheaders(s) into the returned dictionary. False ignores them. Returns: dic,data if read_blockhead is True, data if False """ # create an empty array to hold data data = np.empty( (nblocks*ntraces,pts), dtype=dt) if read_blockhead: bdic = [0]*nblock # read the data for i in xrange(nblocks): if read_blockhead: bdic[i],bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,True) data[i*ntraces:(i+1)*ntraces] = bdata else: bdata = get_block_ntraces(f,ntraces,pts,nbheaders,dt,False) data[i*ntraces:(i+1)*ntraces] = bdata if read_blockhead: return bdic,data else: return data
b99ddcf842dbc02e1afb9067e198e7e241d1a8c0
1,600
def calcMedian(list_o_tuples): """Given a list of tuples (A, B), where A = category, and B = counts, returns A that represents the median count value""" #calc total ct = 0 for (a, b) in list_o_tuples: ct += float(b) med = ct / 2 #find A ct = 0 for (i, (a, b)) in enumerate(list_o_tuples): ct += float(b) if ct > med: break #print (i, a, b) return a
f09a9ac4b1e7a84982bf6b33e4f43e1b2c9f64f6
1,601
def add(n1, n2): """Adds the 2 given numbers""" return n1 + n2
ca670819dab8230e355e1b236d9cc74ed0b3b868
1,602
def kwarg_any(kwarg_functions): """Resolve kwarg predicates with short-circuit evaluation. This optimization technique means we do not have to evaluate every predicate if one is already true. """ return any(kwarg_function() for kwarg_function in kwarg_functions)
3303e1a871bb41920ba0f41e4928e05b6d876c1e
1,603
def _behler_parrinello_cutoff_fn(dr: Array, cutoff_distance: float=8.0) -> Array: """Function of pairwise distance that smoothly goes to zero at the cutoff.""" # Also returns zero if the pairwise distance is zero, # to prevent a particle from interacting with itself. return jnp.where((dr < cutoff_distance) & (dr > 1e-7), 0.5 * (jnp.cos(jnp.pi * dr / cutoff_distance) + 1), 0)
707f3521edf1be13c6f3c830404f851a8b606613
1,604
from typing import Tuple from typing import Optional def _get_build_to_download(build: str) -> Tuple[str, Optional[str]]: """Get the build version to download. If the passed value is not an explict build number (eg. 15.0) then the build for the current day of that major/minor will be downloaded. :param build: The target build number. :return: The target build information. """ components = build.split(".") num_components = len(components) if num_components == 1: components.append("0") if num_components == 2: return ".".join(components), None # Always treat the last component as the 'build'. Unlike Houdini itself # which would treat a release candidate version as part of the build number # the web api will treat the candidate version as the build number and the # the 3 main components as the version. return ".".join(components[: num_components - 1]), components[-1]
96215d80af60c25877da3eb7ff65147b2652a592
1,605
def label(type=None, is_emphasis=True, is_label_show=False, label_pos=None, label_text_color="#000", label_text_size=12, formatter=None, **kwargs): """ Text label of , to explain some data information about graphic item like value, name and so on. In ECharts 3, to make the configuration structure flatter, labelis taken to be at the same level with itemStyle, and has two status normal and emphasis as itemStyle does. :param type: Chart type :param is_emphasis: It specifies whether to show laebl in emphasis status. :param is_label_show: It specifies whether to show laebl in normal status. :param label_pos: Label position.It can be 'top', 'left', 'right', 'bottom', 'inside','outside' :param label_text_color: Label text color. :param label_text_size: Label font size. :param formatter: Data label formatter,it can be 'series', 'name', 'value', 'precent' :param kwargs: :return: """ if label_pos is None: label_pos = "outside" if type in ["pie", "graph"] else "top" _label = { "normal": {"show": is_label_show, "position": label_pos, "textStyle": {"color": label_text_color, "fontSize": label_text_size}}, "emphasis": {"show": is_emphasis} } fmat = {"series": "{a} ", "name": "{b} ", "value": "{c} ", "percent": "{d}% "} if formatter is None: _formatter = "{b} {d}%" if type == "pie" else None else: _formatter = "".join([fmat.get(f) for f in formatter if fmat.get(f, None)]) if type != "graph": _label.get("normal").update(formatter=_formatter) return _label
728997988797baef2ad080c8a317936e91f5c579
1,606
def MPC_ComputeCrc(card_type: TechnologyType, frame: bytes) -> bytes: """Computes frame CRC Parameters ---------- card_type : TechnologyType Technology type frame : bytes Input frame Returns ------- bytes CRC bytes """ if not isinstance(card_type, TechnologyType): raise TypeError('card_type must be an instance of ' 'TechnologyType IntEnum') if not isinstance(frame, bytes): raise TypeError('frame must be an instance of bytes') _check_limits(c_uint32, len(frame), 'frame') crc1 = c_uint8() crc2 = c_uint8() CTS3Exception._check_error(_MPuLib.MPC_ComputeCrc( c_uint8(0), c_int32(card_type), frame, c_uint32(len(frame)), byref(crc1), byref(crc2))) return bytes([crc1.value, crc2.value])
3a545aea7adbae35d7fa3a8f4bd758a45dae78ae
1,607
def max_contiguous(input, value, _builder=None): """ Let the compiler knows that the `value` first values in :code:`input` are contiguous. """ value = _constexpr_to_value(value) return semantic.max_contiguous(input, value)
6e90d76487677c270ecd73ece7ebbf39e4fdb9bf
1,608
import torch def kl_reverse(logu: torch.Tensor) -> torch.Tensor: """ Log-space Csiszar function for reverse KL-divergence D_f(p,q) = KL(q||p). Also known as the exclusive KL-divergence and negative ELBO, minimizing results in zero-forcing / mode-seeking behavior. Args: logu (torch.Tensor): ``p.log_prob``s evaluated at samples from q. """ return -logu
fcc9035de183cb6d5b51e169dd764ff92ab290aa
1,609
import sys def twos_comp_to_signed(val: int, n_bits: int) -> int: """ Convert a "two's complement" representation (as an integer) to its signed version. Args: val: positive integer representing a number in two's complement format n_bits: number of bits (which must reflect a whole number of bytes) Returns: signed integer See http://stackoverflow.com/questions/1604464/twos-complement-in-python """ assert n_bits % 8 == 0, "Must specify a whole number of bytes" n_bytes = n_bits // 8 b = val.to_bytes(n_bytes, byteorder=sys.byteorder, signed=False) return int.from_bytes(b, byteorder=sys.byteorder, signed=True)
cfd9556b79cee7f07c9f7ba8cfa781907fa5da45
1,610
import os def delete_file(subject_file_id): """deletes a particular file :rtype tuple :return (subject_file_id, deleted_file_path) """ file_entity = SubjectFileEntity.query.filter_by(id=subject_file_id).one() file_path = file_entity.get_full_path(app.config['REDIDROPPER_UPLOAD_SAVED_DIR']) os.remove(file_path) file_entity.delete() db.session.commit() return (subject_file_id, file_path)
99cd73c52717fa18c1a5e965b78d788337df7db0
1,611
import os import shlex import subprocess def do_lint() -> str: """ Execute pylint """ check_command_exists("pylint") with safe_cd(SRC): lint_output_file_name = f"{PROBLEMS_FOLDER}/lint.txt" if os.path.isfile(lint_output_file_name): os.remove(lint_output_file_name) if IS_DJANGO: django_bits = "--load-plugins pylint_django " else: django_bits = "" command_text = ( f"{PIPENV} pipenv run pylint {django_bits} " f"--rcfile=.pylintrc {PROJECT_NAME} " f"--output-format=parseable".strip().replace(" ", " ") ) print(command_text) command = shlex.split(command_text) with open(lint_output_file_name, "w") as outfile: env = config_pythonpath() subprocess.call(command, stdout=outfile, env=env) full_text = open(lint_output_file_name, "r").read() lint_did_indeed_run = "Your code has been rated at" in full_text fatal_errors = sum( 1 for line in open(lint_output_file_name) if "no-member" in line or "no-name-in-module" in line or "import-error" in line or ": E" in line or ": F" in line ) if fatal_errors > 0: for line in open(lint_output_file_name): if ( "no-member" in line or "no-name-in-module" in line or "import-error" in line or ": E" in line or ": F" in line ): print(line) message = f"Fatal lint errors : {fatal_errors}" if IS_GITLAB: with open(lint_output_file_name) as error_file: print(error_file.read()) say_and_exit(message, "lint") return message for line in [ line for line in open(lint_output_file_name) if not ( "*************" in line or "---------------------" in line or "Your code has been rated at" in line or line == "\n" ) ]: print(line) if total_loc() > SMALL_CODE_BASE_CUTOFF: cutoff = MAXIMUM_LINT else: cutoff = 0 num_lines = sum( 1 for line in open(lint_output_file_name) if not ( "*************" in line or "---------------------" in line or "Your code has been rated at" in line or line == "\n" ) ) if num_lines > cutoff: print(f"Too many lines of lint : {num_lines}, max {cutoff}") exit(-1) num_lines_all_output = sum(1 for _ in open(lint_output_file_name)) if not lint_did_indeed_run and num_lines_all_output == 0: # should always have at least 'found 0 errors' in output if os.path.isfile(lint_output_file_name): # force lint to re-run, because empty file will be missing os.remove(lint_output_file_name) print("No lint messages at all, did pylint fail to run or is it installed?") exit(-1) return "pylint succeeded"
1d7454ea9df06599f6802e55cf4f7cd5e76d4e53
1,612
import json def json_configs(type, name): """ Base method that extracts the configuration info from the json file defined in SETTINGS Args: type - the name of the type of configuration object to look in name - the name of the object whose configs will be extracted Returns: a dict containing the settings for the object of type and name Raises: a value error if type or name are not defined in the SETTINGS json file """ f = open(SETTINGS) configs = json.load(f)[type] f.close() if name not in configs: raise ValueError('Unable to find configuration for %s %s' % (type, name)) return configs[name]
57507adc7a41666084df734972e067015d055cce
1,613
def reload() -> bool: """Gracefully reloads uWSGI. * http://uwsgi.readthedocs.io/en/latest/Management.html#reloading-the-server """ return False
f020356774d0a500b6755d53d548a804392c39d3
1,614
import re def predict_imagen(titulo=None, grados=None, ano_lanzamiento=None, paginas=None, codbarras=None): """ Predictor for Imagen from model/5a143f443980b50a74003699 Created using BigMLer """ tm_tokens = 'tokens_only' tm_full_term = 'full_terms_only' tm_all = 'all' def term_matches(text, field_name, term): """ Counts the number of occurences of term and its variants in text """ forms_list = term_forms[field_name].get(term, [term]) options = term_analysis[field_name] token_mode = options.get('token_mode', tm_tokens) case_sensitive = options.get('case_sensitive', False) first_term = forms_list[0] if token_mode == tm_full_term: return full_term_match(text, first_term, case_sensitive) else: # In token_mode='all' we will match full terms using equals and # tokens using contains if token_mode == tm_all and len(forms_list) == 1: pattern = re.compile(r'^.+\b.+$', re.U) if re.match(pattern, first_term): return full_term_match(text, first_term, case_sensitive) return term_matches_tokens(text, forms_list, case_sensitive) def full_term_match(text, full_term, case_sensitive): """Counts the match for full terms according to the case_sensitive option """ if not case_sensitive: text = text.lower() full_term = full_term.lower() return 1 if text == full_term else 0 def get_tokens_flags(case_sensitive): """Returns flags for regular expression matching depending on text analysis options """ flags = re.U if not case_sensitive: flags = (re.I | flags) return flags def term_matches_tokens(text, forms_list, case_sensitive): """ Counts the number of occurrences of the words in forms_list in the text """ flags = get_tokens_flags(case_sensitive) expression = r'(\b|_)%s(\b|_)' % '(\\b|_)|(\\b|_)'.join(forms_list) pattern = re.compile(expression, flags=flags) matches = re.findall(pattern, text) return len(matches) term_analysis = { "titulo": { "case_sensitive": False, "token_mode": 'all', }, } term_forms = { "titulo": { "fantásticos": ['fantásticos', 'fantásticas'], "gigante": ['gigante', 'gigantes'], }, } if (codbarras is None): return {"prediction": 1.82, "error": 5.53698} if (codbarras > 9789872414340): if (ano_lanzamiento is None): return {"prediction": 9, "error": 7.02326} if (ano_lanzamiento > 2008): if (paginas is None): return {"prediction": 10.5, "error": 5.88884} if (paginas > 90): if (titulo is None): return {"prediction": 9, "error": 5.08228} if (term_matches(titulo, "titulo", u"fantásticos") > 0): return {"prediction":8, "error":5.08228} if (term_matches(titulo, "titulo", u"fantásticos") <= 0): if (grados is None): return {"prediction": 9.5, "error": 5.26764} if (grados == "Elsa Pizzi"): return {"prediction":9, "error":5.26764} if (grados != "Elsa Pizzi"): return {"prediction":10, "error":5.26764} if (paginas <= 90): if (titulo is None): return {"prediction": 12, "error": 5.08228} if (term_matches(titulo, "titulo", u"gigante") > 0): return {"prediction":11, "error":5.08228} if (term_matches(titulo, "titulo", u"gigante") <= 0): if (grados is None): return {"prediction": 12.5, "error": 5.26764} if (grados == "Patricia Roggio"): return {"prediction":13, "error":5.26764} if (grados != "Patricia Roggio"): return {"prediction":12, "error":5.26764} if (ano_lanzamiento <= 2008): if (grados is None): return {"prediction": 6, "error": 5.08228} if (grados == "4°, 5°"): return {"prediction":7, "error":5.08228} if (grados != "4°, 5°"): if (grados == "5°, 6°"): return {"prediction":5, "error":5.26764} if (grados != "5°, 6°"): return {"prediction":6, "error":5.26764} if (codbarras <= 9789872414340): if (codbarras > 9789872414309): if (paginas is None): return {"prediction": 3, "error": 5.08228} if (paginas > 100): if (grados is None): return {"prediction": 2.5, "error": 5.26764} if (grados == "4°, 5°"): return {"prediction":2, "error":5.26764} if (grados != "4°, 5°"): return {"prediction":3, "error":5.26764} if (paginas <= 100): return {"prediction":4, "error":5.08228} if (codbarras <= 9789872414309): if (codbarras > 9789871989852): return {"prediction":1, "error":0.26071} if (codbarras <= 9789871989852): return {"prediction":0, "error":0.04286}
ecee556bf9eb563cb40bf759bb6c4bfdf74922a0
1,615
def traducir_texto(texto, lenguaje_destino): """ Permite traducir un texto de entrada. .. note:: Es importante tener en cuenta los siguientes aspectos al utilizar la \ función **traducir_texto**: * La función utiliza la librería googletrans, que hace uso de la API \ de Google Translate. Por lo tanto, se requiere tener una conexión \ a internet para su funcionamiento. * El límite máximo de caracteres en un solo texto es de 15.000. * Debido a las limitaciones de la versión web del traductor de Google,\ el uso de la API no garantiza que la librería funcione \ correctamente en todo momento. * Si desea utilizar una API estable, se recomienda el uso de la \ `API de traducción oficial de Google <https://cloud.google.com/translate/docs>`_. * Si recibe un error HTTP 5xx, probablemente se deba a que Google ha \ bloqueado su dirección IP. * Para mayor información puede consultar la \ `documentación de la librería googletrans <https://py-googletrans.readthedocs.io/en/latest/>`_. :param texto: Texto de entrada. :type texto: str :param lenguaje_destino: Indica el lenguaje al que desea traducir \ el texto. Para mayor información, consultar la sección de \ :ref:`Lenguajes soportados <seccion_lenguajes_soportados>`. :type lenguaje_destino: {'es', 'en', 'fr', 'ge'} :return: (str) Texto traducido. """ traductor = Translator() # Adecuar el lenguaje de destino al formato de la API lenguaje_destino = dict_lenguajes[lenguaje_destino] lenguaje_destino = dict_lenguajes_simplificado[lenguaje_destino] salida = traductor.translate(texto, dest=lenguaje_destino) if isinstance(texto, str): return salida.text else: return [i.text for i in salida]
d075b9216f7fc8a53778b41b3a5a7ac67e7556f1
1,616
def euler_to_axis_angle(roll: float, pitch: float, yaw: float) -> np.ndarray: """Converts Euler angle to Axis-angle format. Args: roll: rotation angle. pitch: up/down angle. yaw: left/right angle. Returns: Equivalent Axis-angle format. """ r = Rotation.from_euler('xyz', [roll, pitch, yaw]) return r.as_rotvec()
89fff617c2335c35110fa0cf2f7a2d1da194846c
1,617
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs): """ Add scalebars to axes Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes - axis : the axis to attach ticks to - matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params - hidex,hidey : if True, hide x-axis and y-axis of parent - **kwargs : additional arguments passed to AnchoredScaleBars Returns created scalebar object """ def get_tick_size(subaxis): tick_size = None tick_locs = subaxis.get_majorticklocs() if len(tick_locs)>1: tick_size = np.abs(tick_locs[1] - tick_locs[0]) return tick_size if matchx: sizex = get_tick_size(axis.xaxis) if matchy: sizey = get_tick_size(axis.yaxis) if 'sizex' in kwargs: sizex = kwargs['sizex'] if 'sizey' in kwargs: sizey = kwargs['sizey'] def autosize(value, maxvalue, scale, n=1, m=10): round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m while value > maxvalue: try: value = round_to_n(0.8 * maxvalue * scale, n, m) / scale except: value /= 10.0 m /= 10.0 return value if ymax is not None and sizey>ymax: sizey = autosize(sizey, ymax, scaley) if xmax is not None and sizex>xmax: sizex = autosize(sizex, xmax, scalex) kwargs['sizex'] = sizex kwargs['sizey'] = sizey if unitsx is None: unitsx = '' if unitsy is None: unitsy = '' if 'labelx' not in kwargs or kwargs['labelx'] is None: kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx) if 'labely' not in kwargs or kwargs['labely'] is None: kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy) # add space for scalebar if space is not None: ylim0, ylim1 = axis.get_ylim() ylim = (ylim0 - space, ylim1) if ylim0 > ylim1: # if y axis is inverted ylim = (ylim0 + space, ylim1) axis.set_ylim(ylim) scalebar = AnchoredScaleBar(axis, **kwargs) axis.add_artist(scalebar) if hidex: axis.xaxis.set_visible(False) if hidey: axis.yaxis.set_visible(False) if hidex and hidey: axis.set_frame_on(False) return scalebar
2d481f0313608a1a29d3c99fe4ac21faf1ab1d9d
1,618
import getopt import sys def parse_args(): """Parse and enforce command-line arguments.""" try: options, args = getopt(sys.argv[1:], "l:dh", ["listen=", "debug", "help"]) except GetoptError as e: print("error: %s." % e, file=sys.stderr) print_usage() sys.exit(1) listen = {"host": "127.0.0.1", "port": "8080"} debug = False for option, value in options: if option in ("-h", "--help"): print_usage() sys.exit(0) elif option in ("-l", "--listen"): fields = value.split(":") listen = {"host": fields[0].strip(), "port": int(fields[1]) if len(fields) > 1 else "8080"} elif option in ("-d", "--debug"): debug = True return (listen, debug)
15225a7d0802787c0ede62b3c94a9af09b92ed33
1,619
def create_medoids_summary(season, country, result, d, names): """ Create cluster based summary of medoids' description Parameters: season: str, season sued to cluster country: str, used to cluster result: cluster resutls joint to customer features d: trajectory clustering results names: list of cluster names """ results_with_clusters=result[pd.notnull(result['cluster'])] season_name=seasons[season.lower()] num_visitors_thousand=calc_num_visitors_in_thousands(result) num_clusters=calc_num_clusters(d) summary_text=f"""In the last {season_name} roughly {num_visitors_thousand} thousand tourists visited Tuscany from {country.title()}. The data shows us {num_clusters} clusters. Each line in the graph above represents a cluster's typical path that tourists from {country.title()} followed. These paths are displayed as differently-coloured lines in the map here above. \n""" summary_text=summary_text+create_medoid_basic_description(results_with_clusters, names, num_clusters) print(summary_text) write_file(country, season, summary_text, 'summary') return summary_text
b86a266a7bf31ccd836b653d1e137f60d65c02ff
1,620
from typing import Iterable import time def backtest( strategy, data, # Treated as csv path is str, and dataframe of pd.DataFrame commission=COMMISSION_PER_TRANSACTION, init_cash=INIT_CASH, data_format="c", plot=True, verbose=True, sort_by="rnorm", **kwargs ): """ Backtest financial data with a specified trading strategy {0} """ # Setting inital support for 1 cpu # Return the full strategy object to get all run information cerebro = bt.Cerebro(stdstats=False, maxcpus=1, optreturn=False) cerebro.addobserver(bt.observers.Broker) cerebro.addobserver(bt.observers.Trades) cerebro.addobserver(bt.observers.BuySell) # Convert all non iterables and strings into lists kwargs = { k: v if isinstance(v, Iterable) and not isinstance(v, str) else [v] for k, v in kwargs.items() } cerebro.optstrategy( STRATEGY_MAPPING[strategy], init_cash=[init_cash], transaction_logging=[verbose], **kwargs ) # Apply Total, Average, Compound and Annualized Returns calculated using a logarithmic approach cerebro.addanalyzer(btanalyzers.Returns, _name="returns") cerebro.addanalyzer(btanalyzers.SharpeRatio, _name="mysharpe") cerebro.broker.setcommission(commission=commission) # Treat `data` as a path if it's a string; otherwise, it's treated as a pandas dataframe if isinstance(data, str): if verbose: print("Reading path as pandas dataframe ...") data = pd.read_csv(data, header=0, parse_dates=["dt"]) # If data has `dt` as the index, set `dt` as the first column # This means `backtest` supports the dataframe whether `dt` is the index or a column if data.index.name == "dt": data = data.reset_index() pd_data = bt.feeds.PandasData( dataname=data, **DATA_FORMAT_MAPPING[data_format] ) cerebro.adddata(pd_data) cerebro.broker.setcash(init_cash) # Allows us to set buy price based on next day closing # (technically impossible, but reasonable assuming you use all your money to buy market at the end of the next day) cerebro.broker.set_coc(True) if verbose: print("Starting Portfolio Value: %.2f" % cerebro.broker.getvalue()) # clock the start of the process tstart = time.time() stratruns = cerebro.run() # clock the end of the process tend = time.time() params = [] metrics = [] if verbose: print("==================================================") for stratrun in stratruns: if verbose: print("**************************************************") for strat in stratrun: p = strat.p._getkwargs() p = { k: v for k, v in p.items() if k not in ["periodic_logging", "transaction_logging"] } returns = strat.analyzers.returns.get_analysis() sharpe = strat.analyzers.mysharpe.get_analysis() # Combine dicts for returns and sharpe m = { **returns, **sharpe, "pnl": strat.pnl, "final_value": strat.final_value, } params.append(p) metrics.append(m) if verbose: print("--------------------------------------------------") print(p) print(returns) print(sharpe) params_df = pd.DataFrame(params) metrics_df = pd.DataFrame(metrics) # Get indices based on `sort_by` metric optim_idxs = np.argsort(metrics_df[sort_by].values)[::-1] sorted_params_df = params_df.iloc[optim_idxs].reset_index(drop=True) sorted_metrics_df = metrics_df.iloc[optim_idxs].reset_index(drop=True) sorted_combined_df = pd.concat( [sorted_params_df, sorted_metrics_df], axis=1 ) # print out the result print("Time used (seconds):", str(tend - tstart)) # Save optimal parameters as dictionary optim_params = sorted_params_df.iloc[0].to_dict() optim_metrics = sorted_metrics_df.iloc[0].to_dict() print("Optimal parameters:", optim_params) print("Optimal metrics:", optim_metrics) if plot: has_volume = DATA_FORMAT_MAPPING[data_format]["volume"] is not None # Plot only with the optimal parameters when multiple strategy runs are required if params_df.shape[0] == 1: # This handles the Colab Plotting # Simple Check if we are in Colab try: iplot=False except: iplot=True cerebro.plot(volume=has_volume, figsize=(30, 15), iplot=iplot) else: print("=============================================") print("Plotting backtest for optimal parameters ...") backtest( strategy, data, # Treated as csv path is str, and dataframe of pd.DataFrame commission=commission, data_format=data_format, plot=plot, verbose=verbose, sort_by=sort_by, **optim_params ) return sorted_combined_df
4822d7f6ec2bdc260b084408b3668c0636c5f19c
1,621
def mapflatdeep(iteratee, *seqs): """ Map an `iteratee` to each element of each iterable in `seqs` and recurisvely flatten the results. Examples: >>> list(mapflatdeep(lambda n: [[n, n]], [1, 2])) [1, 1, 2, 2] Args: iteratee (object): Iteratee applied per iteration. *seqs (Iterable): Iterables to iterate over. Yields: Elements result from recursive flatten + map operations. """ return flattendeep(map(iteratee, *seqs))
df297c279f93edca4bff348189386241832c7e5b
1,622
def eHealthClass_getSkinConductanceVoltage(): """eHealthClass_getSkinConductanceVoltage() -> float""" return _ehealth.eHealthClass_getSkinConductanceVoltage()
f97edfe96d443991a16cb94a405806815c29546a
1,623
def construct_item(passage: str, labels): """ 根据输入的passage和labels构建item, 我在巴拉巴拉... ['B-ASP', 'I-ASP', 'I-ASP', 'I-ASP', ..., 'I-OPI', 'I-OPI', 'O'] 构造结果示例如下: { 'passage': '使用一段时间才来评价,淡淡的香味,喜欢!', 'aspect': [['香味', 14, 16]], 'opinion': [['喜欢', 17, 19]] } :return: """ assert len(passage) == len(labels) aspects, opinions = [], [] for i, char, label in zip(range(len(passage)), passage, labels): if label == "O": continue elif label.startswith("B"): if label.endswith("ASP"): aspects.append([char, i]) elif label.endswith("OPI"): opinions.append([char, i]) else: raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.") elif label.endswith("ASP"): if (i==0 or not labels[i-1].endswith("ASP")): aspects.append([char, i]) else: aspects[-1][0] += char elif label.endswith("OPI"): if (i==0 or not labels[i-1].endswith("OPI")): opinions.append([char, i]) else: opinions[-1][0] += char else: raise Exception("label must be in set {'B-ASP', 'I-ASP', 'B-OPI', 'I-OPI', 'O'}.") aspects = [[aspect[0], aspect[1], aspect[1]+len(aspect[0])] for aspect in aspects] opinions = [[opinion[0], opinion[1], opinion[1] + len(opinion[0])] for opinion in opinions] result = { "passage": passage, "aspects": aspects, "opinions": opinions } return result
b4a31b67df7c82b56e0eb388e964422f257a9293
1,624
def getStartingAddress(packet): """Get the address of a modbus request""" return ((ord(packet[8]) << 8) + ord(packet[9]))
83dc55585d67169b0716cc3e98008574c434213b
1,625
from typing import Union def rf_local_unequal(left_tile_col, rhs: Union[float, int, Column_type]) -> Column: """Cellwise inequality comparison between two tiles, or with a scalar value""" if isinstance(rhs, (float, int)): rhs = lit(rhs) return _apply_column_function('rf_local_unequal', left_tile_col, rhs)
3d2b95d35744f35c4f27802952366a5ffb1cf557
1,626
def get_normalized_star_spectrum(spectral_type, magnitude, filter_name): """ spec_data = get_normalized_star_spectrum(spectral_type, magnitude, filter_name) Returns a structure containing the synthetic spectrum of the star having the spectral type and magnitude in the specified input filter. Magnitude is in VEGAMAG-F(lambda) system. Spectra are from PICKLES, PASP, 110, 863 (1998) Absolute flux spectra, no effect of atmospheric and instrument transmission Parameters ---------- r0AtZenith: float overall r0 at zenith [m] spectral_type: string. spectral type and luminosity class (e.g. G2V or M4III) or 'vega' magnitude: float. magnitude in the filter_name filter filter_name: string. Name of the filter. See Filters.get() for the list of available filters Returns ------- spectrum: synphot.SourceSpectrum object defining the spectrum Examples -------- Plot the spectrum of a vega, A0V, G2V stars of mag=8 defined on JohnsonR filter >>> sp= get_normalized_star_spectrum('vega', 8, Filters.JOHNSON_R) >>> spA0V= get_normalized_star_spectrum('A0V', 8, Filters.JOHNSON_R) >>> spG2V= get_normalized_star_spectrum('G2V', 8, Filters.JOHNSON_R) >>> plt.plot(sp.waveset, sp(sp.waveset), label='Vega') >>> plt.plot(spA0V.waveset, spA0V(spA0V.waveset), label='A0V') >>> plt.plot(spG2V.waveset, spG2V(spG2V.waveset), label='G2V') >>> plt.grid(True) >>> plt.xlabel('nm') >>> plt.ylabel('FLAM') >>> plt.xlim(0, 10000) >>> plt.legend() """ # read the sourcespectrum if spectral_type == 'vega': spectrum = SourceSpectrum.from_vega() else: spectrum = SourceSpectrum.from_file( PickelsLibrary.filename(spectral_type)) bandpass = Filters.get(filter_name) spectrum_norm = spectrum.normalize( magnitude * synphot.units.VEGAMAG, bandpass, vegaspec=SourceSpectrum.from_vega()) return spectrum_norm
001406dc4ffa1bc08960c27950333965ffa07836
1,627
def header(**kwargs): """ Create header node and return it. Equivalent to :code:`return Element("header", attributes...)`. """ return Element("header", **kwargs)
e02784bbb8bf153fc7c815fb06a3e1246dbb1847
1,628
def bump_func(xs_arg, low, high, btype): """ Setup initial displacement distribution of a bump, either sine or triangular. """ # check the case of a single float as input if isinstance(xs_arg, (int, float)): xs_in = np.array([float(xs_arg)]) scalar = True else: xs_in = xs_arg scalar = False Ys_out = 0.0 * xs_in mid = (low + high) / 2.0 diff = high - low for i in range(0, len(Ys_out)): if ((xs_in[i] > low) and (xs_in[i] < high)): if (btype == 1): # triangle shape Ys_out[i] = 1.0 - abs((xs_in[i] - mid) / (0.5 * diff)) else: # sine bump Ys_out[i] = ( 1.0 + m.cos(2. * m.pi / diff * (xs_in[i] - mid))) / 2. if scalar is True: return Ys_out[0] else: return Ys_out
22991769be4f0c09992fcf3a069d05f98e6c0d55
1,629
def setup_session(username, password, check_url=None, session=None, verify=True): """ A special call to get_cookies.setup_session that is tailored for URS EARTHDATA at NASA credentials. """ if session is not None: # URS connections cannot be kept alive at the moment. session.headers.update({'Connection': 'close'}) session = get_cookies.setup_session('https://urs.earthdata.nasa.gov', username=username, password=password, session=session, check_url=check_url, verify=verify) return session
e5d6eae8c79343249a7d22b1a8e90f94d4ee2420
1,630
import numbers def is_number(item): """Check if the item is a number.""" return isinstance(item, numbers.Number)
6c3fb6817a0eda2b27fcedd22763461dceef6bc1
1,631
def from_list(commands): """ Given a list of tuples of form (depth, text) that represents a DFS traversal of a command tree, returns a dictionary representing command tree. """ def subtrees(commands, level): if not commands: return acc = [] parent, *commands = commands for command in commands: if command["level"] > level: acc.append(command) else: yield (parent, acc) parent = command acc.clear() yield (parent, acc) def walk(commands, level=0): return [ { "description": key["description"], "children": walk(subtree, level + 1), "id": key["id"], } for key, subtree in subtrees(commands, level) ] return walk(commands)
39dad022bf81712e074f6e8bb26813302da9ef9f
1,632
def get_cassandra_config_options(config): """Parse Cassandra's Config class to get all possible config values. Unfortunately, some are hidden from the default cassandra.yaml file, so this appears the only way to do this.""" return _get_config_options(config=config, config_class='org.apache.cassandra.config.Config')
2a52a50bed46105c7c1a1ea85e0c10a33ee02de1
1,633
def get_possible_zeros(coefficients: list) -> list: """Rational Zeros Theorem possible zeros of a polynomial function. Args: coefficients (list): The coefficients of a polynomial function, in order of degree including all from a_n to a_0. Returns: list: A list containing all possible zeros, negative and positive. """ possible_zeros = [] # Obtain factors of a_n and a_0 factors_an = get_factors(coefficients[0]) factors_a0 = get_factors(coefficients[-1]) # Generate all possible zeros, skipping duplicates. for i in factors_a0: possible_zeros.extend([i, -i]) for j in factors_an: frac = Fraction(i, j) if frac not in possible_zeros: possible_zeros.extend([frac, -frac]) # Sort the possible zeros in absolute value order. possible_zeros.sort(key=abs) return possible_zeros
751337e2a324ddba126ced8a896630eb816c63a6
1,634
import site def view() -> pn.Column: """# Bootstrap Dashboard Page. Creates a Bootstrap Dashboard Page with a Chart and a Table - inspired by the [GetBoostrap Dashboard Template] (https://getbootstrap.com/docs/4.4/examples/dashboard/) - implemented using the `awesome_panel' Python package and in particular the `awesome_panel.express.templates.BootstrapDashboardTemplate` Returns: pn.Column -- The Orders View """ table = pn.Pane( _get_table_data(), sizing_mode="stretch_width", ) pn.config.sizing_mode = "stretch_width" main = [ APPLICATION.intro_section(), pn.Column( pnx.SubHeader("Dashboard"), pn.pane.HoloViews(_holoviews_chart()), ), pn.Column( pnx.SubHeader("Section Title"), table, ), ] return site.create_template(title="Bootstrap Dashboard", main=main, main_max_width="800px")
d3365ca9064a2354d5e848dd2c7666d90241c456
1,635
import doctest def load_tests(loader, tests, ignore): """ Creates a ``DocTestSuite`` for each module named in ``DOCTEST_MODULES`` and adds it to the test run. """ for module in DOCTEST_MODULES: tests.addTests(doctest.DocTestSuite(module)) return tests
33befd96fc2401fe0ae4b03b4d8eb26098273150
1,636
def buildGeneMap(identifiers, separator="|"): """build map of predictions to genes. Use an identifier syntax of species|transcript|gene. If none is given, all transcripts are assumed to be from their own gene. """ map_id2gene, map_gene2ids = {}, {} for id in identifiers: f = id.split(separator) if len(f) < 3: gene = id else: gene = f[0] + separator + f[2] map_id2gene[id] = gene if gene not in map_gene2ids: map_gene2ids[gene] = [] map_gene2ids[gene].append(id) return map_id2gene, map_gene2ids
e854639142bd600338563ffc1160b43359876cdd
1,637
import warnings import requests import json def get_kline(symbol, end_date, freq, start_date=None, count=None): """获取K线数据 :param symbol: str 聚宽标的代码 :param start_date: datetime 截止日期 :param end_date: datetime 截止日期 :param freq: str K线级别,可选值 ['1min', '5min', '30min', '60min', 'D', 'W', 'M'] :param count: int K线数量,最大值为 5000 :return: pd.DataFrame >>> start_date = datetime.strptime("20200701", "%Y%m%d") >>> end_date = datetime.strptime("20200719", "%Y%m%d") >>> df1 = get_kline(symbol="000001.XSHG", start_date=start_date, end_date=end_date, freq="1min") >>> df2 = get_kline(symbol="000001.XSHG", end_date=end_date, freq="1min", count=1000) >>> df3 = get_kline(symbol="000001.XSHG", start_date='20200701', end_date='20200719', freq="1min") >>> df4 = get_kline(symbol="000001.XSHG", end_date='20200719', freq="1min", count=1000) """ if count and count > 5000: warnings.warn(f"count={count}, 超过5000的最大值限制,仅返回最后5000条记录") # 1m, 5m, 15m, 30m, 60m, 120m, 1d, 1w, 1M freq_convert = {"1min": "1m", "5min": '5m', '15min': '15m', "30min": "30m", "60min": '60m', "D": "1d", "W": '1w', "M": "1M"} end_date = pd.to_datetime(end_date) if start_date: start_date = pd.to_datetime(start_date) data = { "method": "get_price_period", "token": get_token(), "code": symbol, "unit": freq_convert[freq], "date": start_date.strftime("%Y-%m-%d"), "end_date": end_date.strftime("%Y-%m-%d"), # "fq_ref_date": end_date } elif count: data = { "method": "get_price", "token": get_token(), "code": symbol, "count": count, "unit": freq_convert[freq], "end_date": end_date.strftime("%Y-%m-%d"), # "fq_ref_date": end_date } else: raise ValueError("start_date 和 count 不能同时为空") r = requests.post(url, data=json.dumps(data)) df = text2df(r.text) df['symbol'] = symbol df.rename({'date': 'dt', 'volume': 'vol'}, axis=1, inplace=True) df = df[['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol']] for col in ['open', 'close', 'high', 'low', 'vol']: df.loc[:, col] = df[col].apply(lambda x: round(float(x), 2)) df.loc[:, "dt"] = pd.to_datetime(df['dt']) return df
5750784f2ef0a3080eac881faa1ee33d6b3b1a3d
1,638
from typing import Type from typing import Optional from typing import Dict from typing import Any def create_trial_instance( trial_def: Type[det.Trial], checkpoint_dir: str, config: Optional[Dict[str, Any]] = None, hparams: Optional[Dict[str, Any]] = None, ) -> det.Trial: """ Create a trial instance from a Trial class definition. This can be a useful utility for debugging your trial logic in any development environment. Arguments: trial_def: A class definition that inherits from the det.Trial interface. checkpoint_dir: The checkpoint directory that the trial will use for loading and saving checkpoints. config: An optional experiment configuration that is used to initialize the :class:`determined.TrialContext`. If not specified, a minimal default is used. """ determined_common.set_logger( util.debug_mode() or det.ExperimentConfig(config or {}).debug_enabled() ) env, rendezvous_info, hvd_config = det._make_local_execution_env(False, config, hparams) trial_context = trial_def.trial_context_class(env, hvd_config) return trial_def(trial_context)
2a8bec728d874e448686c4d67792885f915feb01
1,639
def magnitude(number: SnailNumber) -> int: """Calculates the magnitude of asnail number Args: number (SnailNumber): input number Returns: (int): mangitude Examples: >>> magnitude([[1, 1], [2, 2]]) 35 >>> magnitude([[[[0,7],4],[[7,8],[6,0]]],[8,1]]) 1384 """ if isinstance(number, int): return number return 3 * magnitude(number[0]) + 2 * magnitude(number[1])
d89099f97e2c96e2e41c1e47a3b7ee74f5f0111c
1,640
import tarfile def _load_model_from_tarball(tarball_path, gpg_home_dir): """Load a model from a tarball Args: tarball_path: a path to a model gzipped tar file gpg_home_dir: home directory for gpg to verify signed model (e.g. path/to/.gnupg) Returns: something of type SerializableModel """ with tarfile.open(tarball_path, "r") as tar_file: return model_metadata.load_from_tarfile(tar_file, gpg_home_dir=gpg_home_dir)
cf6e9dedc0278847011ffaf0470813230d83e2dc
1,641
def strip_begin_end_key(key) : """ Strips off newline chars, BEGIN PUBLIC KEY and END PUBLIC KEY. """ return key.replace("\n", "")\ .replace("-----BEGIN PUBLIC KEY-----", "").replace("-----END PUBLIC KEY-----", "")
ef989872c5c1c136114310c142a0f989e2f888ca
1,642
def add_expdvr(npoints: int, qmin: float, qmax: float) -> DVRSpecification: """Register a new exponential DVR Args: npoints (int): number of grid points qmin (float): minimal x value qmax (float): maximal x value """ return DVRSpecification("ExponentialDVR", npoints, qmin, qmax)
c0a6aa6f349433fc0c4266807343d73fad37a279
1,643
import os import logging def main(): """Main CLI function. :rtype: int :returns: exit code, which is sum of all exit codes """ opts, args = parse_args() setup_logging(opts.verbose) if opts.show_configs: print_configs() return 0 build_env = os.environ.copy() chpl_misc = get_chpl_misc(opts, args, build_env) build_configs = get_configs(opts) config_count_str = '{0} configuration{1}'.format( len(build_configs), 's' if len(build_configs) > 1 else '') logging.info('Building {0}.'.format(config_count_str)) def list_config_names(): """Return a complete formatted table showing all the chapel configs in this build.""" names = [] for i, build_config in enumerate(build_configs): if i == 0: # prepend header row build_config_name = build_config.pretty_str(header=True) if not build_config_name: build_config_name = 'None' names.append('') names.append(' ' + build_config_name) # normal table row build_config_name = build_config.pretty_str() if not build_config_name: build_config_name = 'None' names.append('%3d / %3d %s' % (i+1, len(build_configs), build_config_name)) return names logging.info('\n'.join(list_config_names())) make_logfile = chpl_misc['make_logfile'] chpl_home = chpl_misc['chpl_home'] if make_logfile: print('\n[BUILD_CONFIGS] CHPL_HOME={0}'.format(chpl_home), file=make_logfile) print('\n[BUILD_CONFIGS] Building {0}'.format(config_count_str), file=make_logfile) print('\n'.join(list_config_names()), file=make_logfile) statuses = [0,] with elapsed_time('All {0}'.format(config_count_str)): for i, build_config in enumerate(build_configs): result = build_chpl( '{0} / {1}'.format(i+1, len(build_configs)), build_config, build_env, chpl_misc, parallel=opts.parallel, verbose=opts.verbose, dry_run=opts.dry_run, ) statuses.append(result) # exit from this program. exit_code = max(statuses) return exit_code
dee02bd8c7c4de774f35f5bd5120b3fc4b1c989d
1,644
def sync_now(r, **attr): """ Manual synchronization of a repository @param r: the S3Request @param attr: controller options for the request """ T = current.T auth = current.auth response = current.response rheader = attr.get("rheader", None) if rheader: rheader = rheader(r) output = {"title": T("Manual Synchronization"), "rheader": rheader, } s3task = current.s3task sync = current.sync if not auth.s3_logged_in(): auth.permission.fail() if r.interactive: if r.http in ("GET", "POST"): repository = r.record if not repository: r.error(404, current.ERROR.BAD_RECORD) form = FORM(DIV(T("Click 'Start' to synchronize with this repository now:"), ), DIV(INPUT(_class = "tiny primary button", _type = "submit", _value = T("Start"), ), ), _class="sync-now-form", ) if form.accepts(r.post_vars, current.session): task_id = s3task.run_async("sync_synchronize", args = [repository.id], vars = {"user_id": auth.user.id, "manual": True, }, ) if task_id is False: response.error = T("Could not initiate manual synchronization.") elif task_id is None: # No scheduler running, has run synchronously response.flash = T("Manual synchronization completed.") else: sync.set_status(manual=True) response.flash = T("Manual synchronization started in the background.") else: r.error(405, current.ERROR.BAD_METHOD) else: r.error(415, current.ERROR.BAD_FORMAT) status = sync.get_status() if status.running: output["form"] = T("Synchronization currently active - refresh page to update status.") elif not status.manual: output["form"] = form else: output["form"] = T("Manual synchronization scheduled - refresh page to update status.") response.view = "update.html" return output
85ac3be60da29982ac203dee2b408e18422f431a
1,645
def checkdnsrr(): """Check DNS records corresponding to a given Internet host name or IP address""" return NotImplementedError()
7fda596230cc5f61e946e8a0949c67f365cf5563
1,646
from re import I def get_horizontal_rainbow_00(): """ Returns the main horizontal rainbow Programs that use this function: - Diagonal Ripple 1 - Diagonal Ripple 2 - Diagonal Ripple 3 - Diagonal Ripple 4 - Double Ripple 1 - Double Ripple 2 - Double Ripple 3 - Double Ripple 4 - Horizontal Ripple 1 - Horizontal Ripple 2 - Moving Horizontal Rainbow 1 - Moving Horizontal Rainbow 2 """ rainbow00 = [ [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W], [R, O, Y, G, B, I, V, W] ] return rainbow00
86c1d632f3a9e7df94c710a323cd099bb5602e19
1,647
def xml_attr_or_element(xml_node, name): """ Attempt to get the value of name from the xml_node. This could be an attribute or a child element. """ attr_val = xml_node.get(name, None) if attr_val is not None: return attr_val.encode('utf-8').strip() for child in xml_node.getchildren(): if child.tag == name: return child.text.encode('utf-8').strip() return None
4ec061a9a865291d8d26d8de474141175d5aab28
1,648
import numpy def kmeans_init_centroids(x_array, num_centroids_K): """ This function initializes K centroids that are to be used in K-means on the dataset x_array. Parameters ---------- x_array : array_like The dataset of size (m x n). num_centroids_K : int The number of clusters. Returns ------- rand_init_centroids : array_like Centroids of the clusters. This is a matrix of size (K x n). Instructions ------------ You should set centroids to randomly chosen examples from the dataset x_array. """ numpy.random.seed(seed=42) num_examples, num_features = x_array.shape rand_init_centroids = numpy.zeros((num_centroids_K, num_features)) randidx = numpy.random.permutation(num_examples) # Take the first K examples as centroids rand_init_centroids = x_array[randidx[:num_centroids_K], :] return rand_init_centroids
2e310dd3fe9eb6dd32999e32f583fc4a7fd0bbf0
1,649
def get_coinbase_candle_url(url, timestamp_from, pagination_id): """Get Coinbase candle URL.""" start = timestamp_from.replace(tzinfo=None).isoformat() url += f"&start={start}" if pagination_id: url += f"&end={pagination_id}" return url
a1bb4e975060ba5e3438b717d1c2281349cd51f1
1,650
def part2(): """This view will be at the path ``/part2``""" return "Part 2"
92a8789b669a66989a74be2c2126e8958e4beece
1,651
def create_strings_from_file(filename, count): """ Create all strings by reading lines in specified files """ strings = [] with open(filename, 'r') as f: lines = [l.strip()[0:200] for l in f.readlines()] if len(lines) == 0: raise Exception("No lines could be read in file") while len(strings) < count: if len(lines) > count - len(strings): strings.extend(lines[0:count - len(strings)]) else: strings.extend(lines) return strings
35eac3661e31f8e6e895c7575ad725a7ea27d846
1,652
def drive_around(a_th_gld=1.2): """ Function that implements the logic with which the robot will decide to navigate in 2D space, it is essentially based on the (frontal and lateral) distance values of the golden tokens obtained by find_obstacles() Args: dist_left (float): distance of the closest golden token on the left dist_right (float): distance of the closest golden token on the right dist_front (float): distance of the closest golden token in the frontal portion of plane a_th_gld (float): threshold for the frontal golden token, default: 1.2 Inner Functions: find_obstacles(range_front,range_lat) : see find_obstacles() function header """ def find_obstacles(range_front=30,range_lat=[80,100]): """ Function to find the mean of the distances of the closest golden token (i.e. of the obstacles) on the frontal, the left and the right portions of the robot view. Args: range_front (float): positive range in which we want to find the frontal token, default: 30 degrees range_lat (int[]):list of the two positive angles (that correspond to the lateral areas) in which the robot will search for, default: [80,100] degrees Returns: dist_front (float): distance of the closest golden token on the front dist_left (float): distance of the closest golden token on the left dist_right (float): distance of the closest golden token on the right """ dist_left=dist_right=dist_front= 100 for token in R.see(): if(token.info.marker_type is MARKER_TOKEN_GOLD and token.dist < 2.5): if token.dist < dist_front and -range_front < token.rot_y < +range_front: dist_front=token.dist if token.dist < dist_left and -range_lat[1] < token.rot_y < -range_lat[0] : dist_left = token.dist if token.dist < dist_right and range_lat[0] < token.rot_y < range_lat[1] : dist_right = token.dist return dist_front,dist_left,dist_right dist_front,dist_left,dist_right=find_obstacles() if(dist_front<a_th_gld): #check if the frontal distance is lower than a_th_gld if(dist_left<=dist_right): #checks if the distance of the left golden token is lower than the one of the right token if(1.5*dist_left<dist_right): #in this case the the left distance (dist_left) is at least 1.5 times smaller than the right distance (dist_right), so i only need to turn to the right turn(45,0.1) print("right a bit...") else: #the two lateral distances are too similar, better to go forward while turning drive(20,0.1) turn(20,0.1) print("slightly turn to the right...") elif(1.5*dist_right<dist_left): #if the cycle arrives here, it means that dist_right<dist_left print("left a bit...") turn(-45,0.1) else: drive(20,0.1) turn(-35,0.1) print("slightly turn to the left...") else: #if none of the previous conditions occured, then go forward drive(80,0.15) print("going forward...")
c4ea2b855fdf40bc268232d11ff58f45362878b5
1,653
def a_request(session_request): """AnonymousUser request""" session_request.user = AnonymousUser() return session_request
ea651f420e62c455ee63ae1d158a07be719fa48c
1,654
def subplot_index(nrow, ncol, k, kmin=1): """Return the i, j index for the k-th subplot.""" i = 1 + (k - kmin) // ncol j = 1 + (k - kmin) % ncol if i > nrow: raise ValueError('k = %d exceeds number of rows' % k) return i, j
2d2b7ef9bf9bc82d06637157949ca9cb3cc01105
1,655
import os import glob def read_transport_maps(input_dir, ids=None, time=None): """ Find and parse all transport maps in a directory. Returns a list containing the transport maps and start/end timepoints. Parameters ---------- input_dir : str The directory in which to look for transport maps. Alternatively, a pattern may be given, resulting in shell expansion before each directory is processed. ids : list of str, optional Ids to keep the transport maps for. If not None, any id not in this list will be filtered out of the maps. The order of ids in the resulting transport maps is also guaranteed to be the same as this parameter. time : int or float, optional If ids is not None, specifies the time at which the ids were measured. Returns ------- transport_maps : list of { 't1': float, 't2': float, 'transport_map': anndata.AnnData } The list of all transport maps Raises ------ ValueError If exactly one of (ids, time) is None. Must be both or none. If no transport map is found in the given directory. If several transport maps are found for the same timepoints. Notes ----- Time points are determined by the filename. Filenames must end in `_{t1}_{t2}.extension`. Any transport map not following this convention will be ignored. If any other dataset file is present in the listed directories and uses this naming convention, it might be interpreted as a transport map, yielding unpredictable results. All wot commands are guaranteed to enforce this naming convention. """ transport_maps_inputs = [] # file, start, end is_pattern = not os.path.isdir(input_dir) files = os.listdir(input_dir) if not is_pattern else glob.glob(input_dir) if (ids is None) != (time is None): raise ValueError("Only one of time and ids is None. Must be both or none") tmap_times = set() for path in files: path = os.path.join(os.path.dirname(input_dir), path) if not is_pattern else path if os.path.isfile(path): file_info = wot.io.get_filename_and_extension(os.path.basename(path)) basename = file_info[0] tokens = basename.split('_') t1 = tokens[len(tokens) - 2] t2 = tokens[len(tokens) - 1] try: t1 = float(t1) t2 = float(t2) except ValueError: continue ds = wot.io.read_dataset(path) if ids is not None and t1 == time: # subset rows indices = ds.obs.index.isin(ids) ds = anndata.AnnData(ds.X[indices], ds.obs.iloc[indices], ds.var) if ids is not None and t2 == time: # subset columns indices = ds.var.index.isin(ids) ds = anndata.AnnData(ds.X[:, indices], ds.obs, ds.var.iloc[indices]) if (t1, t2) in tmap_times: raise ValueError("Multiple transport maps found for times ({},{})".format(t1, t2)) else: tmap_times.add((t1, t2)) transport_maps_inputs.append( {'transport_map': ds, 't1': t1, 't2': t2}) if not transport_maps_inputs: raise ValueError("No transport maps found in the given directories") transport_maps_inputs.sort(key=lambda x: x['t1']) # sort by t1 (start time) return transport_maps_inputs
d296e0033b2c58b19e92cdcfe7574c27036d57c3
1,656
def _split_keys(keypath, separator): """ Splits keys using the given separator: eg. 'item.subitem[1]' -> ['item', 'subitem[1]']. """ if separator: return keypath.split(separator) return [keypath]
2f67a35a2e08efce863d5d9e64d8a28f8aa47765
1,657
from contextlib import suppress import subprocess def get_code_base_url() -> str | None: """Get current code base url.""" code_base = None with suppress(subprocess.CalledProcessError): code_base = subprocess.check_output("git config --get remote.origin.url".split()).decode("utf-8").strip() return code_base
f2e04852a4f3325cd3c8510dbb8051799b0b4ae6
1,658
def _get_referenced_type_equivalences(graphql_types, type_equivalence_hints): """Filter union types with no edges from the type equivalence hints dict.""" referenced_types = set() for graphql_type in graphql_types.values(): if isinstance(graphql_type, (GraphQLObjectType, GraphQLInterfaceType)): for _, field in graphql_type.fields.items(): if isinstance(field.type, GraphQLList): referenced_types.add(field.type.of_type.name) return { original: union for original, union in type_equivalence_hints.items() if union.name in referenced_types }
f92f4dba0d694e17ebecdb62922b121eeba23f87
1,659
import numpy def E(poly, dist=None, **kws): """ Expected value operator. 1st order statistics of a probability distribution or polynomial on a given probability space. Args: poly (Poly, Dist) : Input to take expected value on. dist (Dist) : Defines the space the expected value is taken on. It is ignored if `poly` is a distribution. **kws (optional) : Extra keywords passed to dist.mom. Returns: (ndarray) : The expected value of the polynomial or distribution, where `expected.shape==poly.shape`. Examples: >>> x = chaospy.variable() >>> Z = chaospy.Uniform() >>> print(chaospy.E(Z)) 0.5 >>> print(chaospy.E(x**3, Z)) 0.25 """ if not isinstance(poly, (distributions.Dist, polynomials.Poly)): print(type(poly)) print("Approximating expected value...") out = quadrature.quad(poly, dist, veceval=True, **kws) print("done") return out if isinstance(poly, distributions.Dist): dist, poly = poly, polynomials.variable(len(poly)) if not poly.keys: return numpy.zeros(poly.shape, dtype=int) if isinstance(poly, (list, tuple, numpy.ndarray)): return [E(_, dist, **kws) for _ in poly] if poly.dim < len(dist): poly = polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) keys = poly.keys mom = dist.mom(numpy.array(keys).T, **kws) A = poly.A if len(dist) == 1: mom = mom[0] out = numpy.zeros(poly.shape) for i in range(len(keys)): out += A[keys[i]]*mom[i] out = numpy.reshape(out, shape) return out
c3f051e522e0cecf0b951c194c43850b4016dd17
1,660
def spacify(string, spaces=2): """Add spaces to the beginning of each line in a multi-line string.""" return spaces * " " + (spaces * " ").join(string.splitlines(True))
7ab698d8b38a6d940ad0935b5a4ee8365e35f5da
1,661
def simple_line_plot(x, y = None, title = "", xlabel = "", ylabel = "", context = 'notebook', xlim = None, ylim = None, color = 'blue', parse_axes = None, return_axes = False, label = ""): """ plot the mean of a 2D data set with the 95% confidence interval filled """ #sns.set() plt.style.use(['science','ieee']) sns.set_context(context) if parse_axes is None: fig, ax1 = plt.subplots() else: ax1 = parse_axes if y is not None: ax1.plot(x, y, color = color, label = label) else: ax1.plot(x, color = color, label = label) if xlim is not None: ax1.set_xlim(xlim) if ylim is not None: ax1.set_ylim(ylim) ax1.set_title(title) ax1.set_xlabel(xlabel) ax1.set_ylabel(ylabel) if return_axes: return ax1 else: plt.show()
6839b12307c2ab8e2747f8d8fe8f913126253a28
1,662
def generate(parsed_data, template, opath, dme_vault, helper, **kwargs): """Generates collection and data-object metadata needed for DME upload. For each collection (directory) and data-object (file), an output file is generated in JSON format. 'opath' dictates where these files will be saved. Returns a dictionary of collection information where [key]= DME PATH of the collection to initialized or updated and [value] = abolute PATH of the collection metadata json file. """ template = json2dict(template) collections = helper(parsed_data, template, opath, dme_vault, **kwargs) return collections
885c25f4d7dcb933449bbf028f00a6cecae4233a
1,663
from pysnmp.entity.rfc3413.oneliner import cmdgen def _get_snmp(oid, hostname, community): """SNMP Wrapper function. Returns tuple of oid, value Keyword Arguments: oid -- community -- """ cmd_gen = cmdgen.CommandGenerator() error_indication, error_status, error_index, var_bind = cmd_gen.getCmd( cmdgen.CommunityData(community), cmdgen.UdpTransportTarget((hostname, 161)), oid) if error_indication: print(error_indication) else: if error_status: print ('%s at %s' % ( error_status.prettyPrint(), error_index and var_bind[int(error_index)-1] or '?') ) else: for name, value in var_bind: return (name.prettyPrint(), value.prettyPrint())
8911dbeece2bac9cc398fafc6a8fde8033752d00
1,664
from fontbakery.utils import get_glyph_name def com_google_fonts_check_048(ttFont): """Font has **proper** whitespace glyph names?""" def getGlyphEncodings(font, names): result = set() for subtable in font['cmap'].tables: if subtable.isUnicode(): for codepoint, name in subtable.cmap.items(): if name in names: result.add(codepoint) return result if ttFont['post'].formatType == 3.0: yield SKIP, "Font has version 3 post table." else: failed = False space_enc = getGlyphEncodings(ttFont, ["uni0020", "space"]) nbsp_enc = getGlyphEncodings( ttFont, ["uni00A0", "nonbreakingspace", "nbspace", "nbsp"]) space = get_glyph_name(ttFont, 0x0020) if 0x0020 not in space_enc: failed = True yield FAIL, Message("bad20", ("Glyph 0x0020 is called \"{}\":" " Change to \"space\"" " or \"uni0020\"").format(space)) nbsp = get_glyph_name(ttFont, 0x00A0) if 0x00A0 not in nbsp_enc: if 0x00A0 in space_enc: # This is OK. # Some fonts use the same glyph for both space and nbsp. pass else: failed = True yield FAIL, Message("badA0", ("Glyph 0x00A0 is called \"{}\":" " Change to \"nbsp\"" " or \"uni00A0\"").format(nbsp)) if failed is False: yield PASS, "Font has **proper** whitespace glyph names."
baa2c101859bc08060ff23d9bc12d2c3e573bc44
1,665
import re def ensure_format_is_valid( r, dataset_name ): """ This extracts the format from the given resource and maps it according to the formats mapping, if provided.""" if not 'format' in r: log.error( '%s resources-object is missing format-property. Cannot save this value', dataset_name ) # TODO create error message and exit return None format_ = r['format'].strip().lower() format_ = re.sub( r'[^a-zA-Z0-9]', '_', format_ ) # replace special character in format-attribute with _ format_ = re.sub( r'^_+', '', format_ ) # replace leading _ format_ = re.sub( r'_+$', '', format_ ) # replace trailing _ format_ = re.sub( r'__*', '_', format_ ) # replace double __ if not format_: log.error( 'Format is not valid after cleanup, original: %s. Will continue with next resource', r['format'] ) return None format_ = ensure_format_in_dictionary( format_ ) log.info( 'Found valid format "%s"', format_ ) return format_
08311e9642d9cb3415e24f2764c792347e45c986
1,666
def find_nearest(array, value): """ Inputs: array - array... value - value to search for in array Outputs: array[idx] - nearest value in array """ array = np.asarray(array) idx = (np.abs(array - value)).argmin() return array[idx]
f65156f8b084e8dff388fe51f56162be668a1bbc
1,667
def sync_contacts(contacts, create_missing=True, quiet=True): """ contacts is a list of dictionaries like this: [{ u'E-Mail': u'total-berlin-admin@total.de', u'Gender': 2, u'First Name': u'Admin', u'Last Name': u'von Total Berlin', ... }, ...] The dictionary keys are mapped to emarsys field ids using settings.EMARSYS_FIELDS, which can be generated with `get_fields()`. Fields in settings.EMARSYS_CREATE_ONLY_FIELDS are not sent when updating a contact. """ def log_debug(message): if not quiet: print("{}\n".format(message)) def chunked(it, n): """ From http://stackoverflow.com/a/8991553 """ it = iter(it) while True: chunk = tuple(slice(it, n)) if not chunk: return yield chunk total_updated = 0 total_created = 0 # emails of contacts that couldn't be updated because they don't exist at # emarsys missing_contacts = [] # emails of contacts that couldn't be updated or created due to an error at # emarsys failed_contacts = [] contacts = map(_transform_contact_data, contacts) # Filter contact data using whitelist if settings.EMARSYS_RECIPIENT_WHITELIST is not None: contacts = filter(lambda contact: contact[3] # 3=email in settings.EMARSYS_RECIPIENT_WHITELIST, contacts) update_contacts, create_contacts = tee(contacts, 2) # Filter out fields in create_only_fields for updating create_only_field_ids = [settings.EMARSYS_FIELDS[field_name][0] for field_name in settings.EMARSYS_CREATE_ONLY_FIELDS] update_contacts = [{k: v for k, v in contact.items() if k not in create_only_field_ids} for contact in update_contacts] # Update contacts for chunk_of_contacts in chunked(update_contacts, BATCH_SIZE): log_debug("Updating a chunk of {} users." .format(len(chunk_of_contacts))) num_successful, errors = _update_contacts(chunk_of_contacts) log_debug('{} users updated, {} users errored.' .format(num_successful, len(errors))) total_updated += num_successful missing_contacts.extend(email for email, error_dict in errors.items() if '2008' in error_dict) failed_contacts.extend((email, error_dict) for email, error_dict in errors.items() if '2008' not in error_dict) if create_missing: # Find contacts to create in original contact list create_contacts = filter(lambda contact: contact[3] in missing_contacts, create_contacts) # Create contacts for chunk_of_contacts in chunked(create_contacts, BATCH_SIZE): log_debug("Creating a chunk of {} users." .format(len(chunk_of_contacts))) num_successful, errors = _create_contacts(chunk_of_contacts) log_debug('{} users created, {} users errored.' .format(num_successful, len(errors))) total_created += num_successful failed_contacts.extend((email, error_dict) for email, error_dict in errors.items()) # All contacts were either updated or the update or create failed. missing_contacts = [] return total_updated, total_created, missing_contacts, failed_contacts
f5b893868ffd9e967ed17593a9d89b3cc45141f0
1,668
def vader_entity_sentiment(df, textacy_col, entity, inplace=True, vader_sent_types=['neg', 'neu', 'pos', 'compound'], keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']): """ Pull the descriptive sentiment stats of text sentence with a specified entity in it. Parameters ---------- df : DataFrame Dataframe which holds the text textacy_col : str The name to give to the column with the textacy doc objects entity : str The entity to search the textacy Doc object for inplace : bool Whether to return the entire df with the sentiment info or the sentiment info alone Default is False vader_sent_types : list The type of sentiment to extract. neg: negative, pos: positive, neu: neutral, compound is comination of all three types of all keep_stats : list A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method Returns ------- DataFrame Either the dataframe passed as arg with the sentiment info as trailing columns or the sentiment descriptive stats by itself """ vader_analyzer = SentimentIntensityAnalyzer() sentiment_rows = [] for text in df[textacy_col].values: text_entities = list(entity_statements(text, entity)) # Iterate through all sentences and get sentiment analysis entity_sentiment_info = [vader_analyzer.polarity_scores(sentence) for sentence in text_entities] # After taking sentiments, turn into a dataframe and describe try: # Indices and columns to keep keep_stats = keep_stats keep_cols = vader_sent_types # Describe those columns summary_stats = pd.DataFrame(entity_sentiment_info).describe().loc[keep_stats, keep_cols] # Add row to list sentiment_rows.append(pivot_df_to_row(summary_stats)) # If there's nothing to describe except ValueError as e: # Create a summary stats with nulls summary_stats = pd.DataFrame(index=keep_stats, columns=keep_cols) # Add to list of rows sentiment_rows.append(pivot_df_to_row(summary_stats)) # Concatenate All rows together into one dataframe sentiment_df = pd.concat(sentiment_rows).add_prefix(entity+'_') if not inplace: return sentiment_df.reset_index(drop=True) else: # Return original df with new sentiment attached return pd.concat([df, sentiment_df], axis=1)
6ef61a7f79c5ff148cf35309e3f828ffa17947f6
1,669
def post_list(request): """ Create a view that will return a list of Posts that were published prior to 'now' and render them to the 'blogposts.html' template :param request: :return: """ posts = Post.objects.filter(published_date__lte=timezone.now() ).order_by('-published_date') return render(request, "blogposts.html", {'posts': posts})
043aff58ba50934d06b6a8221bcf270d1d0f98f5
1,670
def get_model(): """ Returns a compiled convolutional neural network model. Assume that the `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`. The output layer should have `NUM_CATEGORIES` units, one for each category. """ # initialize a convolutional model model = tf.keras.models.Sequential([ # add 3 convolutional layers and 3 max pooling layers to extract features from the images tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.AveragePooling2D(pool_size=(2, 2)), tf.keras.layers.Conv2D(64, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.AveragePooling2D(pool_size=(2, 2)), tf.keras.layers.Conv2D(128, (3, 3), activation="relu", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), tf.keras.layers.AveragePooling2D(pool_size=(2, 2)), # flatten the units tf.keras.layers.Flatten(), # add a hidden layer tf.keras.layers.Dense(128, activation="relu"), #add dropout tf.keras.layers.Dropout(0.5), # add the output layer with NUM_CATEGORIES number of output nodes tf.keras.layers.Dense(NUM_CATEGORIES, activation="softmax") ]) # compile the model model.compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) return model
735d6fc343a1e1eb1ab3cce51387284bfee113ba
1,671
def get_app_icon_path(): """Path to OpenPype icon.""" return resources.get_openpype_icon_filepath()
20985ef5f0ada38ff466bb9160ea2264d53a83f4
1,672
def post_create_ipsec_endpoint_tunnel( api_client, endpoint_id, remote_subnet=None, local_subnet=None, enabled=None, ping_ipaddress=None, ping_interface=None, ping_interval=None, description=None, **kwargs ): # noqa: E501 """post_create_ipsec_endpoint_tunnel # noqa: E501 Create IPsec endpoint tunnel # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = await api.post_create_ipsec_endpoint_tunnel(endpoint_id, remote_subnet=remote_subnet, , async_req=True) :param int endpoint_id: ID for IPsec endpoint (required) :param remote_subnet str: :param local_subnet str: :param enabled str: :param ping_ipaddress str: :param ping_interface str: :param ping_interval int: :param description str: :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = [ "remote_subnet", "local_subnet", "enabled", "ping_ipaddress", "ping_interface", "ping_interval", "description", ] collection_formats = {} path_params = {"endpoint_id": endpoint_id} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = {} for param in [p for p in request_params if local_var_params.get(p) is not None]: body_params[param] = local_var_params[param] # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # HTTP header `Content-Type` header_params["Content-Type"] = api_client.select_header_content_type( # noqa: E501 ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/ipsec/endpoints/{endpoint_id}/tunnels", "POST", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
c876113db138db252274d3f07ecec30ba4796701
1,673
from typing import List from datetime import datetime def cow_service( wfo: List[str] = Query( [], min_length=3, max_length=4, title="WFO Identifiers" ), begints: datetime = Query(...), endts: datetime = Query(...), phenomena: List[str] = Query(None, max_length=2), lsrtype: List[str] = Query(None, max_length=2), hailsize: float = Query(1), lsrbuffer: float = Query(15), warningbuffer: float = Query(1), wind: float = Query(58), windhailtag: str = Query("N"), limitwarns: str = Query("N"), fcster: str = None, ): """Replaced by __doc__.""" return handler( wfo, begints, endts, phenomena, lsrtype, hailsize, lsrbuffer, warningbuffer, wind, windhailtag, limitwarns, fcster, )
2e565deb0e2534c125fec70823c51ea6952ccbf4
1,674
def add_edges_reverse_indices(edge_indices, edge_values=None, remove_duplicates=True, sort_indices=True): """Add the edges for (i,j) as (j,i) with the same edge values. If they do already exist, no edge is added. By default, all indices are sorted. Args: edge_indices (np.array): Index list of shape (N,2). edge_values (np.array): Edge values of shape (N,M) matching the edge_indices remove_duplicates (bool): Remove duplicate edge indices. Default is True. sort_indices (bool): Sort final edge indices. Default is True. Returns: np.array: edge_indices or [edge_indices, edge_values] """ clean_edge = None edge_index_flip = np.concatenate([edge_indices[:,1:2] ,edge_indices[:,0:1]],axis=-1) edge_index_flip_ij = edge_index_flip[edge_index_flip[:,1] != edge_index_flip[:,0]] # Do not flip self loops clean_index = np.concatenate([edge_indices,edge_index_flip_ij],axis=0) if edge_values is not None: edge_to_add = edge_values[edge_index_flip[:,1] != edge_index_flip[:,0]] clean_edge = np.concatenate([edge_values,edge_to_add],axis=0) if remove_duplicates: un, unis = np.unique(clean_index, return_index=True, axis=0) mask_all = np.zeros(clean_index.shape[0], dtype=np.bool) mask_all[unis] = True mask_all[:edge_indices.shape[0]] = True # keep old indices untouched clean_index = clean_index[mask_all] if edge_values is not None: # clean_edge = clean_edge[unis] clean_edge = clean_edge[mask_all] if sort_indices: order1 = np.argsort(clean_index[:, 1], axis=0, kind='mergesort') # stable! ind1 = clean_index[order1] if edge_values is not None: clean_edge = clean_edge[order1] order2 = np.argsort(ind1[:, 0], axis=0, kind='mergesort') clean_index = ind1[order2] if edge_values is not None: clean_edge = clean_edge[order2] if edge_values is not None: return clean_index, clean_edge else: return clean_index
0b35d67b6322371d7c7fac35d45b3c4a7da3bda1
1,675
from typing import Any def isint(var:Any, raise_error:bool=False)-> bool: """Check if var is an integer Args: var (str): variable to check raise_error (bool, optional): TypeError raised if set to `True`. Defaults to `False`. Raises: TypeError: raised if var is not an integer Returns: bool: `True` if var is an integer """ is_ =isinstance(var, int) if not is_ and bool(raise_error): raise TypeError(f'Integer expected: {var=} is not an int') return is_
c4698e4cead5fdd7c73b3efebd3c475c3cbdeca3
1,676
def convert_interpolate2d(g, op, x): """Operator converter for interpolate 2D(dims == 4).""" def get_interpolate_mode(op): """conver 'interp_method' attr of paddle to tvm""" interp_method = op.attr("interp_method") align_corners = op.attr("align_corners") align_mode = op.attr("align_mode") rounding_method = "" if interp_method == "nearest": interp_method = "nearest_neighbor" coordinate_transformation_mode = "asymmetric" rounding_method = "floor" elif interp_method == "bilinear": interp_method = "linear" if not align_corners and align_mode == 0: coordinate_transformation_mode = "half_pixel" else: if align_corners: coordinate_transformation_mode = "align_corners" else: coordinate_transformation_mode = "asymmetric" elif interp_method == "bicubic": interp_method = "cubic" if align_corners: coordinate_transformation_mode = "align_corners" else: coordinate_transformation_mode = "half_pixel" else: msg = "interp_method {} is not supported for PaddlePaddle's interpolate" raise tvm.error.OpAttributeInvalid(msg.format(interp_method)) return rounding_method, interp_method, coordinate_transformation_mode layout = op.attr("data_layout") out_h = op.attr("out_h") out_w = op.attr("out_w") out_size = [out_h, out_w] input_out_size = op.input("OutSize") input_size_tensor = op.input("SizeTensor") input_scale = op.input("Scale") if input_size_tensor: out_size = g.get_node(input_size_tensor[0]) out_size = _infer_value(out_size, g.get_params()) elif input_out_size: out_size = g.get_node(input_out_size[0]) out_size = _infer_value(out_size, g.get_params()) else: input_shape = infer_shape(x) if layout == "NCHW": in_h, in_w = input_shape[2], input_shape[3] else: in_h, in_w = input_shape[1], input_shape[2] if input_scale: scale_data = g.get_node(input_scale[0]) scale_data = infer_value(scale_data, g.get_params()).numpy().tolist() if len(scale_data) > 1: out_h = int(scale_data[0] * in_h) out_w = int(scale_data[1] * in_w) else: out_h = int(scale_data[0] * in_h) out_w = int(scale_data[0] * in_w) out_size = [out_h, out_w] else: scale = op.attr("scale") scale = [float(i) for i in scale] if len(scale) > 1: out_h = int(scale[0] * in_h) out_w = int(scale[1] * in_w) out_size = [out_h, out_w] rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op) out = _op.image.resize2d( x, size=out_size, layout=layout, method=interp_method, coordinate_transformation_mode=coordinate_transformation_mode, rounding_method=rounding_method, cubic_alpha=-0.75, ) g.add_node(op.output("Out")[0], out)
9c089baf0a5d8d8ff78d3cadff1e17d8a19b7b0d
1,677
def get_stats_on_spatial_predictions_4x5_2x25_by_lat(res='4x5', ex_str='', target='Iodide', use_annual_mean=False, filename=None, folder=None, ds=None, var2template='Chance2014_STTxx2_I', debug=False): """ Evaluate the spatial predictions between models, binned by latitude Parameters ------- target (str): Name of the target variable (e.g. iodide) res (str): horizontal resolution of dataset (e.g. 4x5) debug (bool): print out debugging output? var2template (str): variable to use a template for making new variables in ds use_annual_mean (bool): use the annual mean of the variable Returns ------- (pd.DataFrame) """ if isinstance(ds, type(None)): # If filename or folder not given, then use defaults if isinstance(filename, type(None)): filename = 'Oi_prj_predicted_{}_{}.nc'.format(target, res) if isinstance(folder, type(None)): data_root = utils.get_file_locations('data_root') folder = '{}/{}/outputs/'.format(data_root, target) ds = xr.open_dataset(folder + filename) # Variables to consider vars2analyse = list(ds.data_vars) # Add LWI to array ds = utils.add_LWI2array(ds=ds, var2template=var2template, res=res) # - Get general annual stats df = pd.DataFrame() # take annual average if use_annual_mean: ds_tmp = ds.mean(dim='time') else: ds_tmp = ds for var_ in vars2analyse: # Mask to only consider (100%) water boxes arr = ds_tmp[var_].values if debug: print(arr.shape, (ds_tmp['IS_WATER'] == False).shape) arr[(ds_tmp['IS_WATER'] == False).values] = np.NaN # Update values to include np.NaN ds_tmp[var_].values = arr # Setup series objects to hold stats s_mean = pd.Series() s_75 = pd.Series() s_50 = pd.Series() s_25 = pd.Series() # Loop by latasave to dataframe for lat_ in ds['lat'].values: vals = ds_tmp[var_].sel(lat=lat_).values stats_ = pd.Series(vals.flatten()).dropna().describe() # At poles all values will be the same (masked) value # if len( set(vals.flatten()) ) == 1: # pass # else: # save quartiles and mean # try: s_mean[lat_] = stats_['mean'] s_25[lat_] = stats_['25%'] s_75[lat_] = stats_['75%'] s_50[lat_] = stats_['50%'] # except KeyError: # print( 'Values not considered for lat={}'.format( lat_ ) ) # Save variables to DataFrame var_str = '{} - {}' stats_dict = {'mean': s_mean, '75%': s_75, '25%': s_25, 'median': s_50} for stat_ in stats_dict.keys(): df[var_str.format(var_, stat_)] = stats_dict[stat_] return df
dbcd103a3f3a7b6ab97439dec7ca43ced776a434
1,678
import math def calc_dif_mod_cn (x, y): """ Check if the difference between the modulus of consecutive numbers is a prime number """ modx = math.sqrt(x.real ** 2 + x.imag ** 2) # modulus of the first complex number mody = math.sqrt(y.real ** 2 + y.imag ** 2) # modulus of the second complex number dif = modx-mody d = 0 # the number of the divisors of dif if dif == int(dif): # first, we check if the dif is an integer for i in range(2, int(int(dif)/2 + 1)): # then, we check if it's a prime number if dif % i == 0: d = d + 1 # if d = 0, then dif is a prime number if (d == 0 or dif==2): return dif else: return 0 else: return 0
88e353e4a948c3b6adc65b91265a5f6d2e68a1c1
1,679
import re def mitochondrialGTF(concatenated_gff, output_directory): """Convert GFF file with information for mitochondrial genes to GTF and return the path to GTF file. Keyword arguments: concatenated_gff -- path to concatenated GFF file output_directory -- path to directory where GTF file should be placed Important: transcript name must follow 'compXX_cXX_seqXX' Notes: Make a separate GTF file with tagged mitochondrial genes (MT_) so that later it is merged it with the post-processed original GTF file (after removal of mitochondrial genes). """ output = output_directory + "/concatenated.gtf" with open(concatenated_gff, 'r') as infile: with open(output, 'w') as outfile: for line in infile: line=line.strip('\n') elementList=line.split('\t') p=re.search(r'((comp\d+_c\d+)_seq\d+)', elementList[0]) geneID=p.group(2) transcriptID=p.group(1) outfile.write(elementList[0]+'\tmitofinder\tgene\t'+elementList[3]+'\t'+elementList[4]+'\t'+elementList[5]+'\t'+elementList[6]+'\t'+elementList[7]+'\tgene_id "'+geneID+'"; transcript_id "'+transcriptID+'"; gene_name "'+'MT_'+elementList[0]+'_'+elementList[8]+'";'+'\n') return output
24c0987d5c2208b1a96020b8eea19791c86b3f5e
1,680
def get_none_zero_region(im, margin): """ get the bounding box of the non-zero region of an ND volume """ input_shape = im.shape if(type(margin) is int ): margin = [margin]*len(input_shape) assert(len(input_shape) == len(margin)) indxes = np.nonzero(im) idx_min = [] idx_max = [] for i in range(len(input_shape)): idx_min.append(indxes[i].min()) idx_max.append(indxes[i].max()) for i in range(len(input_shape)): idx_min[i] = max(idx_min[i] - margin[i], 0) idx_max[i] = min(idx_max[i] + margin[i], input_shape[i] - 1) return idx_min, idx_max
6ca56b94becd254b0ecbaf85e25475bb574586a9
1,681
import json from datetime import datetime def create_short_link(request): """Given an URL, return a shortened link. Args: url: URL to be shortened. Returns: short_link: Shortened result of URL. expires_at: Timestamp before which the link is valid. """ payload = json.loads(request.body) url = payload.get("url") try: URLValidator(["http", "https"])(url) except ValidationError: return JsonResponse({"error": "Valid URL required."}, status=422) expires_at = datetime.now(tz=timezone.utc) + timedelta(hours=1) try: short_link, _ = ShortLink.objects.update_or_create( url=url, alias=get_alias(url), defaults={"expires_at": expires_at} ) except IntegrityError: # Add timestamp as randoms to avoid collision short_link = ShortLink.objects.create( url=url, alias=get_alias(f"{url}{datetime.utcnow()}"), expires_at=expires_at ) return JsonResponse( { "short_link": short_link.build_url(), "expires_at": expires_at.isoformat(), }, status=201, )
4d870b21484ced46e5fd538545cb5cca6c1b3b8a
1,682
import gzip def get_mapped_tracks_file(mode='r', **kwargs): """ Returns a file descriptor-like object to the file containing the raw track. (A mapped track is a collection of tuples). Each tuple is: - linking pairs - paths - linking pairs - points Arguments: mode: r/w mode driver_id: string, the id of the driver """ fname = get_data_dir() + Template(mapped_tracks_tpl).substitute(**kwargs) return gzip.open(fname, mode)
281ba7bd0f05b61fb13041c2193caed1de1f918f
1,683
def ne_2beta(r, ne0, rc_outer, beta_outer, f_inner, rc_inner, beta_inner): """ Electron number density [cm^-3] in the double-beta profile of the hydrostratic equilibrium model. r : distance from the center of the cluster [kpc] ne0 : central electron number density [cm^-3] rc_outer : core radius from the outer component [kpc] (default: 100.) beta_outer : slope from the outer component (default: 1.) f_inner : fractional contribution from inner component (default: 0.) rc_inner : core radius from the inner component [kpc] (default: 10.) beta_inner : slope from the inner component (default: 1.) """ def outer(rr): return (1. + rr**2./rc_outer ** 2.)**(-1.5*beta_outer) # outer contribution def inner(rr): return (1. + rr**2./rc_inner ** 2.)**(-1.5*beta_inner) # inner contribution return ne0*(f_inner*inner(r) + (1.-f_inner)*outer(r))
cac79f2f9a76cb94c2ad08c344844ddec6ff3a45
1,684
def sell(): """Sell shares of stock""" """Sell shares of stock""" if request.method == "POST": symbol = request.form.get("symbol") amount = request.form.get("shares") try: amount = int(amount) except: return apology("enter a proper value") print(amount) if not symbol: return apology("Missing stock symbol!") elif not amount: return apology("Missing number of shares!") elif int(amount)<= 0: return apology("enter a proper value") else: amount = int(amount) stock = lookup(symbol) if stock is None: return apology("invalid symbol") rows = db.execute(""" SELECT symbol,SUM(shares) as totalShares FROM history WHERE user_id=:user_id GROUP BY symbol HAVING totalShares >0;""" ,user_id=session["user_id"]) for row in rows: if row["symbol"] == symbol: if amount >row["totalShares"]: return apology("too many shares") price = float(stock["price"]) rows = db.execute("SELECT cash FROM users WHERE id=:id",id=session["user_id"]) cash = rows[0]["cash"] updated_cash = cash + amount * price db.execute("UPDATE users SET cash=:updated_cash WHERE id=:id",updated_cash=updated_cash,id=session["user_id"]) db.execute("INSERT INTO history (user_id,stock_name,shares,price,symbol) VALUES (:user_id,:stock_name,:shares,:price,:symbol)",user_id=session["user_id"],stock_name=stock['name'],shares= -1*int(amount),price=stock['price'],symbol=symbol) flash("Sold!!") return redirect("/stock") else: rows = db.execute(""" SELECT symbol FROM history WHERE user_id=:user_id GROUP BY symbol HAVING SUM(shares) >0; """, user_id=session["user_id"]) return render_template("sellstock.html", symbols = [row["symbol"] for row in rows])
ae385e94eb6765eca7b9b4b74ef3320c2a265e62
1,685
from typing import Any def assemble_block(n_rows: Int, n_cols: Int, pdf: pd.DataFrame, cov_matrix: NDArray[(Any, Any), Float], row_mask: NDArray[Any]) -> NDArray[Float]: """ Creates a dense n_rows by n_cols matrix from the array of either sparse or dense vectors in the Pandas DataFrame corresponding to a group. This matrix represents a block. Args: n_rows : The number of rows in the resulting matrix n_cols : The number of columns in the resulting matrix pdf : Pandas DataFrame corresponding to a group cov_matrix: 2D numpy array representing covariate columns that should be prepended to matrix X from the block. Can be empty if covariates are not being applied. row_mask: 1D numpy array of size n_rows containing booleans used to mask rows from the block X before return. Returns: Dense n_rows - n_masked by n_columns matrix where the columns have been 0-centered and standard scaled. """ mu = pdf['mu'].to_numpy() sig = pdf['sig'].to_numpy() if 0 in sig: raise ValueError(f'Standard deviation cannot be 0.') if row_mask.size == 0: row_mask = np.full(n_rows, True) if 'indices' not in pdf.columns: X_raw = np.column_stack(pdf['values'].array) else: X_raw = np.zeros([n_rows, n_cols]) for column, row in enumerate(pdf[['indices', 'values']].itertuples()): X_raw[row.indices, column] = row.values X = ((X_raw - mu) / sig) if cov_matrix.any(): return np.column_stack((cov_matrix, X))[row_mask, :] else: return X[row_mask, :]
25f8c3923050c64cb4c7a7b9842007a9e8d7723b
1,686
def calc_xixj_from_braggphi( det_cent=None, det_nout=None, det_ei=None, det_ej=None, det_outline=None, summit=None, nout=None, e1=None, e2=None, bragg=None, phi=None, option=None, strict=None, ): """ Several options for shapes de_cent, det_nout, det_ei and det_ej are always of shape (3,) option: 0: (summit, e1, e2).shape = (3,) (bragg, phi).shape = (nbragg,) => (xi, xj).shape = (nbragg,) 1: (summit, e1, e2).shape = (3, nlamb, npts, nbragg) (bragg, phi).shape = (nlamb, npts, nbragg) => (xi, xj).shape = (nlamb, npts, nbragg) """ # check inputs if strict is None: strict = True # Check option gdet = [det_cent, det_nout, det_ei, det_ej] g0 = [summit, nout, e1, e2] g1 = [bragg, phi] # check nbroadcastable _are_broadcastable(bragg=bragg, phi=phi) assert all([gg.shape == (3,) for gg in gdet]), "gdet no broadcast!" assert all([gg.shape == g0[0].shape for gg in g0]), "g0 no broadcast!" lc = [ g0[0].size == 3 and g1[0].ndim == 1, g0[0].ndim in [4, 5] and g0[0].shape[0] == 3 and phi.shape == g0[0].shape[1:], ] if np.sum(lc) == 0: lstr = [ '\t- {}: {}'.format(kk, vv.shape) for kk, vv in [ ('summit', summit), ('nout', nout), ('e1', e1), ('e2', e2), ('bragg', bragg), ('phi', phi), ] ] msg = ( "Please provide either:\n" + "\t- option 0:\n" + "\t\t- (summit, nout, e1, e2).shape[0] = 3\n" + "\t\t- (bragg, phi).ndim = 1\n" + "\t- option 1:\n" + "\t\t- (summit, nout, e1, e2).ndim in [4, 5]\n" + "\t\t- (bragg, phi).shape[0] = 3\n\n" + "You provided:\n" + "\n".join(lstr) ) raise Exception(msg) elif all(lc): msg = ("Multiple options!") raise Exception(msg) if option is None: option = lc.index(True) assert (lc[0] and option == 0) or (lc[1] and option == 1) if option == 0: summit = summit.ravel() nout, e1, e2 = nout.ravel(), e1.ravel(), e2.ravel() det_cent = det_cent[:, None] det_nout = det_nout[:, None] det_ei, det_ej = det_ei[:, None], det_ej[:, None] summit, nout = summit[:, None], nout[:, None], e1, e2 = e1[:, None], e2[:, None] else: det_cent = det_cent[:, None, None, None] det_nout = det_nout[:, None, None, None] det_ei = det_ei[:, None, None, None] det_ej = det_ej[:, None, None, None] if g0[0].ndim == 5: det_cent = det_cent[..., None] det_nout = det_nout[..., None] det_ei = det_ei[..., None] det_ej = det_ej[..., None] # Not necessary for broadcasting (last dims first) # bragg = bragg[None, ...] # phi = phi[None, ...] # Compute vect = ( -np.sin(bragg)*nout + np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2) ) k = np.sum( (det_cent-summit)*det_nout, axis=0 ) / np.sum(vect*det_nout, axis=0) pts = summit + k[None, ...]*vect xi = np.sum((pts - det_cent)*det_ei, axis=0) xj = np.sum((pts - det_cent)*det_ej, axis=0) # Optional: eliminate points outside the det outline if det_outline is not None and strict is True: ind = ( (xi < np.min(det_outline[0, :])) | (xi > np.max(det_outline[0, :])) | (xj < np.min(det_outline[1, :])) | (xj > np.max(det_outline[1, :])) ) xi[ind] = np.nan xj[ind] = np.nan return xi, xj, strict
f11829d0d3dfe6b523a8336c8b7dbf5cfaa9f36e
1,687
def _check_index_good(X): """Check the index of X and return boolean.""" # check the first index elements for "__total" tot_chk = np.any(X.index.get_level_values(level=0).isin(["__total"])) return tot_chk
8e39b7d18af2c247c427a4e9658fc1e18ff1dd89
1,688
def seguimientos_list_csv(request, codigo): """Lista todos los eventos de seguimiento para cada proyecto de ley. --- type: codigo: required: true type: string parameters: - name: codigo description: código del proyecto de ley incluyendo legislatura, por ejemplo 00002-2011 type: string paramType: path required: true """ codigo, legislatura = split_code_input(codigo) try: proy = Proyecto.objects.get( codigo=codigo, legislatura=legislatura, ) except Proyecto.DoesNotExist: msg = 'error,proyecto no existe' return HttpResponse(msg, content_type='text/csv') seguimientos = get_seguimientos_from_proyecto_id(proy.id) seguimientos.append({ 'headline': 'Fecha de presentación', 'startDate': convert_date_to_string(proy.fecha_presentacion), }) proyecto = "Proyecto No: " + str(proy.numero_proyecto).replace("/", "_") data = [] for i in seguimientos: data.append({ 'proyecto': proyecto, 'headline': i['headline'], 'startDate': i['startDate'].replace(',', '-'), }) if request.method == 'GET': return CSVResponse(data)
846d8da6dac93d23eb444b023e1de4fb123a579f
1,689
import torch def endpoint_error(estimate, ground_truth): """Computes the average end-point error of the optical flow estimates.""" error = torch.norm( estimate - ground_truth[:, :2, :, :], 2, 1, keepdim=False) if ground_truth.size(1) == 3: mask = (ground_truth[:, 2, :, :] > 0).float() else: mask = torch.ones_like(error) epe = error * mask epe = torch.sum(epe, (1, 2)) / torch.sum(mask, (1, 2)) return epe.mean().reshape(1)
f40de83768ad6760620120c59eb377d1e12ff8b1
1,690
import functools def output(log_message=None, success_message=None, fail_message=None): """This is a decorator to trap the typical exceptions that occur when applying and removing modules. It returns the proper output corresponding to the error messages automatically. If the function returns output (success_flag, message) then those are returned, otherwise success is assumed and the success_message returned. Using this removes a lot of potential boiler-plate code, however it is not necessary. Keyword arguments can be used in the message string. Default values can be found in the message_args @property, however a driver can add whatever it see fit, by setting message_args to a dict in the configure call (see above). Thus if you set self.message_args = {'my_key': 'my_key_val'} then the message string could look like "My key is '$(my_key)s'". """ success_message = success_message or "Success" fail_message = fail_message or "Fail" def output_decorator(func): """This is the actual decorator.""" @functools.wraps(func) def wrapper(*args, **kwargs): """Here's where we handle the error messages and return values from the actual function. """ log_msg = log_message success_msg = success_message fail_msg = fail_message if isinstance(args[0], ModuleDriver): # Try and insert any message args if they exist in the driver message_args = args[0].message_args if message_args: try: log_msg = log_msg % message_args success_msg = success_msg % message_args fail_msg = fail_msg % message_args except Exception: # if there's a problem, just log it and drive on LOG.warning(_("Could not apply message args: %s") % message_args) pass if log_msg: LOG.info(log_msg) success = False try: rv = func(*args, **kwargs) if rv: # Use the actual values, if there are some success, message = rv else: success = True message = success_msg except exception.ProcessExecutionError as ex: message = (_("%(msg)s: %(err)s") % {'msg': fail_msg, 'err': ex.stderr}) LOG.exception(message) except exception.TroveError as ex: message = (_("%(msg)s: %(err)s") % {'msg': fail_msg, 'err': ex._error_string}) LOG.exception(message) except Exception as ex: message = (_("%(msg)s: %(err)s") % {'msg': fail_msg, 'err': ex.message}) LOG.exception(message) return success, message return wrapper return output_decorator
18d39d94c930a0ec99e0fb122f20e3a18d62328d
1,691
def custom_mape(approxes, targets): """Competition metric is a slight variant on MAPE.""" nominator = np.abs(np.subtract(approxes, targets)) denominator = np.maximum(np.abs(targets), 290000) return np.mean(nominator / denominator)
7c8b8eab352f516c63202ef0b26253265acc68a4
1,692
import collections import re def article_text_to_dict(article_text: str): """ Translates an article text into a dict. """ data = collections.defaultdict(list) field = '' for line in re.split(r'\n+', article_text): # Fix little bug with isi files if line.startswith('null'): line = line[4:] name = line[:2] value = line[3:] if not name.isspace(): field = name if not field.isspace() and field != 'ER': data[field].append(value) return dict(data)
84ac8d96840696e1392de3dbf87ab7026978d36c
1,693
def frohner_cor_3rd_order(sig1,sig2,sig3,n1,n2,n3): """ Takes cross-sections [barns] and atom densities [atoms/barn] for three thicknesses of the same sample, and returns extrapolated cross section according to Frohner. Parameters ---------- sig1 : array_like Cross section of the thinnest of the three samples. sig2 : array_like Cross section of the mid-thickness of the three samples. sig3 : array_like Cross section of the thickest of the three samples. n1 : float Atom density of the thinnest sample n2 : float Atom density of the mid-thickness sample n3 : float Atom density of the thickest sample Returns ------- sig0 : array_like The extrapolated cross section from sig1, sig2, and sig3 """ # two terms in the numerator numer1 = (n1*sig2-n2*sig1)*(n3**2-n1**2-(n1-n3)/(n1-n2)*(n2**2-n1**2)) numer2 = (n1*n2**2-n1**2*n2)*(sig3-sig2-(n1-n3)/(n1-n2)*(sig2-sig1)) denom = (n1-n2)*(n3**2-n1**2) - (n1-n3)*(n2**2-n1**2) return (numer1-numer2)/denom
d6f0b39368c19aeda899265eb187190bb4beb944
1,694
def nodeInTree(root, k): """ Checks if the node exists in the tree or not """ if root == None: return False if root.data == k or nodeInTree(root.left, k) or nodeInTree(root.right, k): return True return False
14db01c8d2370bfaa01220d3608798165ea1a096
1,695
import struct # for handling binary data import os def read_xvec(path, base_type, N=-1): """ A utility function to read YAEL format files (xxx.ivec, xxx.fvec, and xxx.bvec) :param path: The path of xvec file :param base_type: The type of xvec; 'f', 'i' or 'b' :param N: The number of vectors to be read. If this is not specified, read all :return: a N x dim array """ assert(base_type == 'f' or base_type == 'i' or base_type == 'b') if base_type == "b": # Convert 'b' to 'B' because unsinged char is 'B' for struct base_type = 'B' data_type, byte_size = { 'f': (np.float32, 4), 'i': (np.int32, 4), 'B': (np.uint8, 1) }[base_type] size = os.path.getsize(path) with open(path, 'rb') as fin: bdata = fin.read(4) # the first 4 byte is a number of dimension dim = struct.unpack('i', bdata)[0] N_all = int(size / (4 + dim * byte_size)) # All size if N == -1: N = N_all assert(N <= N_all) fin.seek(0) # Set cursor to the initial position vec = np.empty((N, dim), dtype=data_type) for n in range(N): bdata = fin.read(4) # the fist 4 byte is always dim, so skip it bdata = fin.read(byte_size * dim) # Read a vector vec[n, :] = np.array(struct.unpack(base_type * dim, bdata), dtype=data_type) return vec
1bc4f857a0c1f0b1e6994d12f61006a3de9ac7c8
1,696
def split_and_filter(intermediate_str, splitter): """ Split string with given splitter - practically either one of "," or "/'". Then filter ones that includes "https" in the split pickles :param intermediate_str : string that in the middle of parsing :param splitter :return: chunk of string(s) as a list """ intermediate_split = intermediate_str.split(splitter) intermediate_filter = [elem for elem in intermediate_split if 'https' in elem] return intermediate_filter[0]
a4b800df1aca89ca1e8eedfc65a5016a995acd48
1,697
def organism_code(genus, species): """Return code from genus and species.""" return ( f"{genus[:GENUS_CODE_LEN].lower()}{species[:SPECIES_CODE_LEN].lower()}" )
6b52342ccf8388a2b8a98cc3d640dc3ea5b97e66
1,698
import os def verify_image(filename): """Verifies whether the file exists""" image_extensions = ['tif', 'jpg', 'gif', 'png', 'jpeg'] if type(filename) is str: extension = filename.split('.') if len(extension) == 2: if extension[1].lower() in image_extensions: return os.path.isfile(filename) return False
84e9845ab3e146d94f2ba3e0a2fb3ecd458b822f
1,699