content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def count_increasing(ratings, n): """ Only considering the increasing case """ arr = [1] * n cnt = 1 for i in range(1, n): cnt = cnt + 1 if ratings[i - 1] < ratings[i] else 1 arr[i] = cnt return arr
9fe274527fbba505467a195bf555c77d2f3e6aed
2,412
def decorate(rvecs): """Output range vectors into some desired string format""" return ', '.join(['{%s}' % ','.join([str(x) for x in rvec]) for rvec in rvecs])
31a3d4414b0b88ffd92a5ddd8eb09aaf90ef3742
2,413
def get_color(card): """Returns the card's color Args: card (webelement): a visible card Returns: str: card's color """ color = card.find_element_by_xpath(".//div/*[name()='svg']/*[name()='use'][2]").get_attribute("stroke") # both light and dark theme if (color == "#ff0101" or color == "#ffb047"): color = "red" elif (color == "#800080" or color == "#ff47ff"): color = "purple" else: color = "green" return color
452266b81d70973149fed4ab2e6cbc9c93591180
2,414
def add_chr_prefix(band): """ Return the band string with chr prefixed """ return ''.join(['chr', band])
08a99220023f10d79bdacdb062a27efcb51086ce
2,415
def disable_text_recog_aug_test(cfg, set_types=None): """Remove aug_test from test pipeline of text recognition. Args: cfg (mmcv.Config): Input config. set_types (list[str]): Type of dataset source. Should be None or sublist of ['test', 'val'] Returns: cfg (mmcv.Config): Output config removing `MultiRotateAugOCR` in test pipeline. """ assert set_types is None or isinstance(set_types, list) if set_types is None: set_types = ['val', 'test'] for set_type in set_types: if cfg.data[set_type].pipeline[1].type == 'MultiRotateAugOCR': cfg.data[set_type].pipeline = [ cfg.data[set_type].pipeline[0], *cfg.data[set_type].pipeline[1].transforms ] return cfg
bda3a5420d32d55062b23a6af27cee3e203b878c
2,416
def delta_in_ms(delta): """ Convert a timedelta object to milliseconds. """ return delta.seconds*1000.0+delta.microseconds/1000.0
4ed048155daf4a4891488e28c674e905e1bbe947
2,418
def selection_sort(data): """Sort a list of unique numbers in ascending order using selection sort. O(n^2). The process includes repeatedly iterating through a list, finding the smallest element, and sorting that element. Args: data: data to sort (list of int) Returns: sorted list """ sorted_data = data[:] for i, value in enumerate(sorted_data): # find smallest value in unsorted subset min_value = min(sorted_data[i:]) index_min = sorted_data.index(min_value) # place smallest value at start of unsorted subset sorted_data[i], sorted_data[index_min] = min_value, value return sorted_data
8b745be41c857669aedecb25b3006bbdc1ef04eb
2,419
def player_count(conn, team_id): """Returns the number of players associated with a particular team""" c = conn.cursor() c.execute("SELECT id FROM players WHERE team_id=?", (team_id,)) return len(c.fetchall())
cfced6da6c8927db2ccf331dca7d23bba0ce67e5
2,420
import math def format_timedelta(value, time_format="{days} days, {hours2}:{minutes2}:{seconds2}"): """Format a datetie.timedelta. See """ if hasattr(value, 'seconds'): seconds = value.seconds + value.days * 24 * 3600 else: seconds = int(value) seconds_total = seconds minutes = int(math.floor(seconds / 60)) minutes_total = minutes seconds -= minutes * 60 hours = int(math.floor(minutes / 60)) hours_total = hours minutes -= hours * 60 days = int(math.floor(hours / 24)) days_total = days hours -= days * 24 years = int(math.floor(days / 365)) years_total = years days -= years * 365 return time_format.format( **{ 'seconds': seconds, 'seconds2': str(seconds).zfill(2), 'minutes': minutes, 'minutes2': str(minutes).zfill(2), 'hours': hours, 'hours2': str(hours).zfill(2), 'days': days, 'years': years, 'seconds_total': seconds_total, 'minutes_total': minutes_total, 'hours_total': hours_total, 'days_total': days_total, 'years_total': years_total, })
19dc2b175beb1d030f14ae7fe96cb16d66f6c219
2,421
def max_delta(model, new_model): """Return the largest difference between any two corresponding values in the models""" return max( [(abs(model[i] - new_model[i])).max() for i in range(len(model))] )
faf4a9fb2b24f7e7b4f357eef195e435950ea218
2,422
def child_is_flat(children, level=1): """ Check if all children in section is in same level. children - list of section children. level - integer, current level of depth. Returns True if all children in the same level, False otherwise. """ return all( len(child) <= level + 1 or child[(level + 1) :][0].isalpha() for child in children )
e14f9210a90b40b419d21fffa1542212429d80be
2,423
import os def is_processable(path: str, should_match_extension: str): """ Process scandir entries, copying the file if necessary """ if not os.path.isfile(path): return False filename = os.path.basename(path) _, extension = os.path.splitext(filename) if extension.lower() != should_match_extension.lower(): return False return True
5d99b821d3653ff452acac1e5fe48cab559c509e
2,424
import time def timestamp(): """Get the unix timestamp now and retuen it. Attention: It's a floating point number.""" timestamp = time.time() return timestamp
8e56a61659da657da9d5dda364d4d9e8f3d58ed2
2,425
import itertools def cycle(iterable): """Make an iterator returning elements from the iterable and saving a copy of each. When the iterable is exhausted, return elements from the saved copy. Repeats indefinitely. This function uses single dispatch. .. seealso:: :func:`itertools.cycle` """ return itertools.cycle(iterable)
13f479fca709dffa77eeca3d32ff7265c81588bf
2,427
def find_process_in_list( proclist, pid ): """ Searches for the given 'pid' in 'proclist' (which should be the output from get_process_list(). If not found, None is returned. Otherwise a list [ user, pid, ppid ] """ for L in proclist: if pid == L[1]: return L return None
19eab54b4d04b40a54a39a44e50ae28fbff9457c
2,428
import os import random def select_images(img_dir, sample_size=150, random_seed=42): """Selects a random sample of image paths.""" img_paths = [] for file in os.listdir(img_dir): if file.lower().endswith('.jpeg'): img_paths.append(os.path.join(img_dir, file)) if sample_size is not None: if random_seed is not None: random.seed(a=random_seed) img_paths = random.sample(img_paths, sample_size) return img_paths
999bf71eb6b8072bd91cbb98d9fe1b50d5e9b8ac
2,429
def get_dp_logs(logs): """Get only the list of data point logs, filter out the rest.""" filtered = [] compute_bias_for_types = [ "mouseout", "add_to_list_via_card_click", "add_to_list_via_scatterplot_click", "select_from_list", "remove_from_list", ] for log in logs: if log["type"] in compute_bias_for_types: filtered.append(log) return filtered
e0a7c579fa9218edbf942afdbdb8e6cf940d1a0c
2,430
def subtraction(x, y): """ Subtraction x and y >>> subtraction(-20, 80) -100 """ assert isinstance(x, (int, float)), "The x value must be an int or float" assert isinstance(y, (int, float)), "The y value must be an int or float" return x - y
203233897d31cb5bc79fca0f8c911b03d7deb5ba
2,431
def apo(coalg): """ Extending an anamorphism with the ability to halt. In this version, a boolean is paired with the value that indicates halting. """ def run(a): stop, fa = coalg(a) return fa if stop else fa.map(run) return run
a1e64d9ed49a8641095c8a8c20ae08c1cc6e9c19
2,432
def heap_sort(li): """ [list of int] => [list of int] Heap sort: divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element from it and inserting it into the sorted region. It does not waste time with a linear-time scan of the unsorted region; rather, heap sort maintains the unsorted region in a heap data structure to more quickly find the largest element in each step. To implement a heap using arrays, we will use the rule li[k] >= li[2*k+1] and li[k] >= li[2*k+2] (left child and right child respectively). More generally, the array must satisfy the heap quality: For any given node C, if P is a parent node of C, then the value of P is greater than or equal to the key of C (for max heaps) Graphically, this would look like: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 """ def heapify(lst, heap_size, root): """ ([list of int], int, int) => [list of int] Rearranges the list to satisfy the heap quality. Root is index of the largest element in the lst. """ # the largest node largest = root left_child = 2 * largest + 1 right_child = 2 * largest + 2 # check if left_child and root need to be swapped if left_child < heap_size and lst[largest] < lst[left_child]: largest = left_child # check if right_child and root need to be swapped if right_child < heap_size and lst[largest] < lst[right_child]: largest = right_child # change root, if needed if largest != root: lst[root], lst[largest] = lst[largest], lst[root] # continue to heapify the root heapify(lst, heap_size, largest) # Build a maxheap by iterating through the list backwards for i in range(len(li), -1, -1): heapify(li, len(li), i) print(li) # extract elements one by one for i in range(len(li) - 1, 0, -1): """remember, heap sort differs from insertion sort in that # it searches for the maximum, rather than minimum, element. li[0:end] is a heap (like a tree, but elements are not guaranteed to be sorted) and li[end:len(li)] is in sorted order.""" li[i], li[0] = li[0], li[i] # return to heap, since the heap was messed up by swapping heapify(li, i, 0) return li
a72be31e5256c880c157636aa7a15df013ce651d
2,434
def filter_rows(df, condition, reason): """ :param reason: :param df: :param condition: boolean, true for row to keep :return: filter country_city_codes df """ n_dropped = (condition == False).sum() print( f"\nexcluding {n_dropped} locations ({n_dropped / df.shape[0]:.1%}) due to {reason}" ) return df[condition]
7e5e6925bfb7d90bc90b42fda202d80e8ef5e3f6
2,435
import random def attack(health, power, percent_to_hit): """Calculates health from percent to hit and power of hit Parameters: health - integer defining health of attackee power - integer defining damage of attacker percent to hit - float defining percent chance to hit of attacker Returns: new health """ random_number = random.random() # number between 0.0 and 1.0 # if our random number falls between 0 and percent to hit if random_number <= percent_to_hit: # then a hit occurred so we reduce health by power health = health - power # return the new health value return health
83a74908f76f389c798b28c5d3f9035d2d8aff6a
2,436
import json def load_data(path): """Load JSON data.""" with open(path) as inf: return json.load(inf)
531fc2b27a6ab9588b1f047e25758f359dc21b6d
2,437
def time_pet(power,energy): """Usage: time_pet(power,energy)""" return energy/power
11e9c82b8c1be84995f9517e04ed5e1270801e27
2,442
def create_graph(edge_num: int, edge_list: list) -> dict: """ Create a graph expressed with adjacency list :dict_key : int (a vertex) :dict_value : set (consisted of vertices adjacent to key vertex) """ a_graph = {i: set() for i in range(edge_num)} for a, b in edge_list: a_graph[a - 1].add(b - 1) # All graphs always need this line a_graph[b - 1].add(a - 1) # Only undirected graph needs this line return a_graph
6ec1a71cf82a3a669090df42ac7d53e1286fda2d
2,443
import random def seed_story(text_dict): """Generate random seed for story.""" story_seed = random.choice(list(text_dict.keys())) return story_seed
0c0f41186f6eaab84a1d197e9335b4c28fd83785
2,444
from pathlib import Path import sys def detect_conda_env(): """Inspect whether `sys.executable` is within a conda environment and if it is, return the environment name and Path of its prefix. Otherwise return None, None""" prefix = Path(sys.prefix) if not (prefix / 'conda-meta').is_dir(): # Not a conda env return None, None if (prefix / 'envs').is_dir(): # It's the base conda env: return 'base', prefix # Not the base env: its name is the directory basename: return prefix.name, prefix
2cb88ebfbb8a2919300e1d0072540e448dcf35ad
2,445
def same_datatypes(lst): """ Überprüft für eine Liste, ob sie nur Daten vom selben Typ enthält. Dabei spielen Keys, Länge der Objekte etc. eine Rolle :param lst: Liste, die überprüft werden soll :type lst: list :return: Boolean, je nach Ausgang der Überprüfung """ datatype = type(lst[0]).__name__ for item in lst: if type(item).__name__ != datatype: # return False, wenn die Liste verschiedene Datentypen enthält return False # Datentypen sind gleich, aber sind deren Strukturen auch gleich? (für komplexe Datentypen) if datatype == "dict": keys = lst[0].keys() for item in lst: if item.keys() != keys: # return False, wenn die Keys der Dictionaries verschieden sind return False elif datatype == "list": if sum([len(x) for x in lst]) / len(lst) != len(lst[0]): # return False, falls die Listen in der Liste verschiedene Längen haben return False datatypes = list(map(lambda x: type(x).__name__, lst[0])) for item in lst: if list(map(lambda x: type(x).__name__, item)) != datatypes: # return False, falls die Elemente der inneren Listen verschiedene Datenytpen haben return False return True
9c49376ec34ed0970171597f77de4c4c224350b4
2,446
def round_int(n, d): """Round a number (float/int) to the closest multiple of a divisor (int).""" return round(n / float(d)) * d
372c0f8845994aaa03f99ebb2f65243e6490b341
2,447
def check_hostgroup(zapi, region_name, cluster_id): """check hostgroup from region name if exists :region_name: region name of hostgroup :returns: true or false """ return zapi.hostgroup.exists(name="Region [%s %s]" % (region_name, cluster_id))
b237b544ac59331ce94dd1ac471187a60d527a1b
2,448
def encode_mecab(tagger, string): """ string을 mecab을 이용해서 형태소 분석 :param tagger: 형태소 분석기 객체 :param string: input text :return tokens: 형태소 분석 결과 :return indexs: 띄어쓰기 위치 """ string = string.strip() if len(string) == 0: return [], [] words = string.split() nodes = tagger.pos(" ".join(words)) tokens = [] for node in nodes: surface = node[0].strip() if 0 < len(surface): for s in surface.split(): # mecab 출력 중 '영치기 영차' 처리 tokens.append(s) indexs = [] index, start, end = -1, 0, 100000 for i, token in enumerate(tokens): # 분류가 잘 되었는지 검증 if end < len(words[index]): start = end end += len(token) else: index += 1 start = 0 end = len(token) indexs.append(i) # values 중 실제 시작 위치 기록 assert words[index][start:end] == token, f"{words[index][start:end]} != {token}" return tokens, indexs
847278728ebe7790d8aef2a125a420d5779adc6b
2,449
import subprocess def run_cmd(cmd, cwd=None): """ Runs the given command and return the output decoded as UTF-8. """ return subprocess.check_output(cmd, cwd=cwd, encoding="utf-8", errors="ignore")
5d2f5b85878291efaa16dcb4bb8a8c72b3d22230
2,452
def refresh_wrapper(trynum, maxtries, *args, **kwargs): """A @retry argmod_func to refresh a Wrapper, which must be the first arg. When using @retry to decorate a method which modifies a Wrapper, a common cause of retry is etag mismatch. In this case, the retry should refresh the wrapper before attempting the modifications again. This method may be passed to @retry's argmod_func argument to effect such a refresh. Note that the decorated method must be defined such that the wrapper is its first argument. """ arglist = list(args) # If we get here, we *usually* have an etag mismatch, so specifying # use_etag=False *should* be redundant. However, for scenarios where we're # retrying for some other reason, we want to guarantee a fresh fetch to # obliterate any local changes we made to the wrapper (because the retry # should be making those changes again). arglist[0] = arglist[0].refresh(use_etag=False) return arglist, kwargs
089b859964e89d54def0058abc9cc7536f5d8877
2,453
def pad_rect(rect, move): """Returns padded rectangles given specified padding""" if rect['dx'] > 2: rect['x'] += move[0] rect['dx'] -= 1*move[0] if rect['dy'] > 2: rect['y'] += move[1] rect['dy'] -= 1*move[1] return rect
48bdbdc9d4736e372afc983ab5966fc80a221d4d
2,454
import math def number_format(interp, num_args, number, decimals=0, dec_point='.', thousands_sep=','): """Format a number with grouped thousands.""" if num_args == 3: return interp.space.w_False ino = int(number) dec = abs(number - ino) rest = "" if decimals == 0 and dec >= 0.5: if number > 0: ino += 1 else: ino -= 1 elif decimals > 0: s_dec = str(dec) if decimals + 2 < len(s_dec): if ord(s_dec[decimals + 2]) >= ord('5'): dec += math.pow(10, -decimals) if dec >= 1: if number > 0: ino += 1 else: ino -= 1 rest = "0" * decimals else: s_dec = str(dec) if not rest: rest = s_dec[2:decimals + 2] else: rest = s_dec[2:] + "0" * (decimals - len(s_dec) + 2) s = str(ino) res = [] i = 0 while i < len(s): res.append(s[i]) if s[i] != '-' and i != len(s) - 1 and (len(s) - i - 1) % 3 == 0: for item in thousands_sep: res.append(item) i += 1 if decimals > 0: for item in dec_point: res.append(item) return interp.space.wrap("".join(res) + rest)
9d5ab0b9ed5dd6054ce4f356e6811c1b155e2062
2,455
def selectTopFive(sortedList): """ 从sortedList中选出前五,返回对应的名字与commit数量列成的列表 :param sortedList:按值从大到小进行排序的authorDict :return:size -- [commit数量] labels -- [名字] """ size = [] labels = [] for i in range(5): labels.append(sortedList[i][0]) size.append(sortedList[i][1]) return size, labels
747ad379ed73aeb6ccb48487b48dc6150350204e
2,456
def get_license(file): """Returns the license from the input file. """ # Collect the license lic = '' for line in file: if line.startswith('#include') or line.startswith('#ifndef'): break else: lic += line return lic
126fff2dd0464ef1987f3ab672f6b36b8fa962f7
2,457
async def user_has_pl(api, room_id, mxid, pl=100): """ Determine if a user is admin in a given room. """ pls = await api.get_power_levels(room_id) users = pls["users"] user_pl = users.get(mxid, 0) return user_pl == pl
5678af17469202e0b0a0232e066e7ed5c8212ee6
2,458
def sanitize_for_json(tag): """eugh the tags text is in comment strings""" return tag.text.replace('<!--', '').replace('-->', '')
211c07864af825ad29dfc806844927db977e6ce0
2,459
def custom_field_check(issue_in, attrib, name=None): """ This method allows the user to get in the comments customfiled that are not common to all the project, in case the customfiled does not existe the method returns an empty string. """ if hasattr(issue_in.fields, attrib): value = str(eval('issue_in.fields.%s'%str(attrib))) if name != None: return str("%s : %s"%(name,value)) else: return str(value) else: return str("")
d9c051fa922f34242d3b5e94e8534b4dc8038f19
2,460
def arrangements(ns): """ prime factors of 19208 lead to the "tribonacci" dict; only needed up to trib(4) """ trib = {0: 1, 1: 1, 2: 2, 3: 4, 4: 7} count = 1 one_seq = 0 for n in ns: if n == 1: one_seq += 1 if n == 3: count *= trib[one_seq] one_seq = 0 return count # # one-liner... # return reduce(lambda c, n: (c[0]*trib[c[1]], 0) if n == 3 else (c[0], c[1]+1), ns, (1,0))[0]
01f3defb25624d7a801be87c7336ddf72479e489
2,462
from bs4 import BeautifulSoup def render_checkbox_list(soup_body: object) -> object: """As the chosen markdown processor does not support task lists (lists with checkboxes), this function post-processes a bs4 object created from outputted HTML, replacing instances of '[ ]' (or '[]') at the beginning of a list item with an unchecked box, and instances of '[x]' (or '[X]') at the beginning of a list item with a checked box. Args: soup_body: bs4 object input Returns: modified bs4 object """ if not isinstance(soup_body, BeautifulSoup): raise TypeError('Input must be a bs4.BeautifulSoup object') for ul in soup_body.find_all('ul'): for li in ul.find_all('li', recursive=False): if (li.contents[0].string[:2] == '[]') or (li.contents[0].string[:3] == '[ ]'): unchecked = soup_body.new_tag("input", disabled="", type="checkbox") li.contents[0].string.replace_with(li.contents[0].string.replace('[] ', u'\u2002')) li.contents[0].string.replace_with(li.contents[0].string.replace('[ ] ', u'\u2002')) li.contents[0].insert_before(unchecked) li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;' elif (li.contents[0].string[:3] == '[x]') or (li.contents[0].string[:3] == '[X]'): checked = soup_body.new_tag("input", disabled="", checked="", type="checkbox") li.contents[0].string.replace_with(li.contents[0].string.replace('[x] ', u'\u2002')) li.contents[0].string.replace_with(li.contents[0].string.replace('[X] ', u'\u2002')) li.contents[0].insert_before(checked) li.find_parent('ul')['style'] = 'list-style-type: none; padding-left: 0.5em; margin-left: 0.25em;' return soup_body
640f00d726a1268eb71134e29dbde53ef0ec44f5
2,463
import numpy as np def manualcropping(I, pointsfile): """This function crops a copy of image I according to points stored in a text file (pointsfile) and corresponding to aponeuroses (see Args section). Args: I (array): 3-canal image pointsfile (text file): contains points' coordinates. Pointsfile must be organized such that: - column 0 is the ID of each point - column 1 is the X coordinate of each point, that is the corresponding column in I - column 2 is the Y coordinate, that is the row in I - row 0 is for txt columns' names - rows 1 and 2 are for two points of the scale - rows 3 to 13 are aponeuroses' points in panoramic images // raws 3 to 10 in simple images - following rows are for muscle fascicles (and are optional for this function) Other requirements: pointsfile's name must 1) include extension 2) indicates whether I is panoramic or simple by having 'p' or 's' just before the point of the extension. Returns: I2 (array) : array of same type than I. It is the cropped image of I according to the aponeuroses' points manually picked and stored in pointsfile. point_of_intersect (tuple) : point at right of the image; should correspond to the point of intersection of deep and upper aponeuroses. min_raw, max_raw, min_col, max_col: indices of the location of the cropped image in the input raw image """ data = open(pointsfile, 'r') #finds whether the image is panoramic or simple search_point = -1 while (pointsfile[search_point] != '.') and (search_point > (-len(pointsfile))): search_point = search_point-1 if (search_point == -len(pointsfile)): raise TypeError("Input pointsfile's name is not correct. Check extension.") else: imagetype = pointsfile[search_point-1] #extract points from the input file picked_points = [] for line in data: line = line.strip('\n') x = line.split('\t') picked_points.append((x[1], x[2])) #keep aponeuroses points according to image type if imagetype == 'p': #keep points 3 to 13 included apos = np.asarray(picked_points[3:14], dtype=np.float64, order='C') elif imagetype == 's': #keep points 3 to 10 included apos = np.asarray(picked_points[3:11], dtype=np.float64, order='C') else: raise ValueError("pointsfile's name does not fulfill conditions. See docstrings") #find max and min indexes for columns and raws to crop image I #with a margin of 10 pixels (5 pixels for min_raw). #Coordinates are inverted in apos min_raw = max(0, np.min(apos[:, 1])-10) max_raw = min(I.shape[0], np.max(apos[:, 1])+20) min_col = max(0, np.min(apos[:, 0])-10) max_col = min(I.shape[1], np.max(apos[:, 0])+10) i_cropped = np.copy(I[int(min_raw):int(max_raw), int(min_col):int(max_col), :]) index = np.argmax(apos[:, 0]) point_of_intersect = (apos[index][1] - min_raw, apos[index][0] - min_col) #close file data.close() return i_cropped, point_of_intersect, int(min_raw), int(max_raw), int(min_col), int(max_col)
eb3f49b5b46d1966946fc3d00bcae113f51c60d1
2,464
def num_from_bins(bins, cls, reg): """ :param bins: list The bins :param cls: int Classification result :param reg: Regression result :return: computed value """ bin_width = bins[0][1] - bins[0][0] bin_center = float(bins[cls][0] + bins[cls][1]) / 2 return bin_center + reg * bin_width
468e56075cf214f88d87298b259f7253d013a3f3
2,466
def rotate90(matrix: list) -> tuple: """return the matrix rotated by 90""" return tuple(''.join(column)[::-1] for column in zip(*matrix))
770a8a69513c4f88c185778ad9203976d5ee6147
2,467
def get_event_details(event): """Extract event image and timestamp - image with no tag will be tagged as latest. :param dict event: start container event dictionary. :return tuple: (container image, last use timestamp). """ image = str(event['from'] if ":" in event['from'] else event['from'] + ":latest") timestamp = event['time'] return image, timestamp
c9b4ded7f343f0d9486c298b9a6f2d96dde58b8c
2,468
import json def copyJSONable(obj): """ Creates a copy of obj and ensures it is JSONable. :return: copy of obj. :raises: TypeError: if the obj is not JSONable. """ return json.loads(json.dumps(obj))
1cc3c63893c7716a4c3a8333e725bb518b925923
2,469
def min_max_two(first, second): """Pomocna funkce, vrati dvojici: (mensi ze zadanych prvku, vetsi ze zadanych prvku). K tomu potrebuje pouze jedno porovnani.""" return (first, second) if first < second else (second, first)
7ddda1ad69056c22d9ba890e19e62464f56c08e1
2,470
import re import requests from bs4 import BeautifulSoup def searchCVE(service, version): """Return a list of strings""" re.search url = "https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword="+service+"+"+version res = requests.get(url) soup = BeautifulSoup(res.content, "lxml") listCVE = [] for elt in soup.find_all('a', attrs={'href' : re.compile("^/cgi-bin/")}): listCVE.append(elt.get_text()) return url, listCVE
f9daf52e0c508496273c8a07b279051ebf662198
2,472
import argparse def init_parser(): """ initialize argument parser for S1 processing utilities """ parser = argparse.ArgumentParser() parser.add_argument('-t', '--transform', action='store_true', help='transform the final DEM to UTM coordinates') parser.add_argument('-l', '--logfiles', action='store_true', help='create logfiles of the executed GAMMA commands') parser.add_argument('-i', '--intermediates', action='store_true', help='keep intermediate files') parser.add_argument('-q', '--quiet', action='store_true', help='suppress standard console prints') parser.add_argument('-tr', '--targetresolution', default=20, help='the target resolution in meters for x and y', type=int) parser.add_argument('-fg', '--func_geoback', default=2, help='backward geocoding interpolation function; ' '0 - Nearest Neighbor, 1 - Bicubic Spline, 2 - Bicubic Spline-Log; ' 'method 1: negative values possible (e.g. in urban areas) - use method 2 to avoid this', type=int) parser.add_argument('-fi', '--func_interp', default=0, help='function for interpolation of layover/shadow/foreshortening/DEM gaps; ' '0 - set to 0, 1 - linear interpolation, 2 - actual value, 3 - nn-thinned', type=int) parser.add_argument('-poe', '--poedir', default=None, help='directory containing aux_poeorb (precise orbit ephemerides) orbit state vector files') parser.add_argument('-res', '--resdir', default=None, help='directory containing aux_resorb (restituted orbit) orbit state vector files') parser.add_argument('zipfile', help='S1 zipped scene archive to be used') parser.add_argument('tempdir', help='temporary directory for intermediate files') parser.add_argument('outdir', help='output directory') parser.add_argument('srtmdir', help='directory containing SRTM hgt tiles (subdirectories possible)') return parser
427eeff31ee83105afba1a9e22e8c789059efb23
2,473
def deco_inside_ctx_method_self(target): """decorator: wrap a class method inside a `with self: ...` context""" def tgt(self, *args, **kwargs): with self: return target(self, *args, **kwargs) return tgt
6a29ad468840229c026e6abf87556018a3e16718
2,475
def validate_blacklist(password): """ It does not contain the strings ab, cd, pq, or xy """ for blacklisted in ['ab', 'cd', 'pq', 'xy']: if blacklisted in password: return False return True
93ad092d5622e0567171f487522c2db824089eb9
2,477
import time def f(x): """Squares something""" time.sleep(10) return x * x
6c1ab07ebaaeca6258601ec33f181e75086a355a
2,478
def get_added_after( fetch_full_feed, initial_interval, last_fetch_time=None, filter_args=None ): """ Creates the added_after param, or extracts it from the filter_args :param fetch_full_feed: when set to true, will limit added_after :param initial_interval: initial_interval if no :param last_fetch_time: last_fetch time value (str) :param filter_args: set of filter_args defined by the user to be merged with added_after :return: added_after """ if fetch_full_feed: return initial_interval if not filter_args or "added_after" not in filter_args: return last_fetch_time or initial_interval return filter_args["added_after"]
281cb7d7429071bf8dca0d04eedee9130a29b28d
2,479
def _a_in_b(first, second): """Check if interval a is inside interval b.""" return first.start >= second.start and first.stop <= second.stop
e4ca21e1861b691510252eb3be53eed16c8bc8cf
2,482
def get_password(config, name): """Read password""" passfile = config.passstore / name with open(passfile, 'r') as fd: return fd.read()
caae733030077eedc4428555eb0b106cfe586e50
2,483
def attribute_string(s): """return a python code string for a string variable""" if s is None: return "\"\"" # escape any ' characters #s = s.replace("'", "\\'") return "\"%s\"" % s
9ed9d4f26e797119a339d2a3827772b945e29839
2,484
import os def read_dfcpp_test_results(d4cpp_output_dir): """ Returns test results """ test_results = {} for dir_name in os.listdir(d4cpp_output_dir): path_to_dir = os.path.join(d4cpp_output_dir, dir_name) if not os.path.isdir(path_to_dir): continue case = dir_name.split('-')[-1] result_path = os.path.join(path_to_dir, f"{case}.test") with open(result_path, 'r') as f: test_results[case] = f.read().strip() return test_results
d99f749dbf20179bc317a026d0e0112aea17ae12
2,485
async def ping_server(): """ Ping Server =========== Returns the message "The Optuna-server is alive!" if the server is running. Parameters ---------- None Returns ------- msg : str A message witnessing that the server is running. """ msg = 'The Optuna-server is alive!' return msg
2098f2167a14f08105824490824d62dd34b4c49e
2,487
def test_from_rsid(rsids, start_rsid): """Continue collecting publications for rsids in list, beginning with start_rsid Args: rsids (list): list of rsids to collect publications on start_rsid (str): rsid identifier to resume collecting publications on Returns: runtime_rsids (list): [start_rsid, onward...] start_rsid (str): starting rsid start_idx (str): starting rsid index rsids (list): [original list of ALL rsids] """ start_idx = rsids.index(start_rsid) # start_rsid index print(f"STARTING POINT SET TO: | INDEX: {start_idx} / {len(rsids)} | RSID: {rsids[start_idx]}") runtime_rsids = rsids[start_idx:] # runtime rsids return runtime_rsids, start_rsid, start_idx, rsids
bf2be86f28645addc08737e64f08695cd6b3a6d3
2,489
import os def _abs_user_path(fpath): """don't overload the ap type""" return os.path.abspath(os.path.expanduser(fpath))
c5ee29b13783afcd5c6ad99d9e751f7ed5db58be
2,490
def get_base_url(host_name, customer_id): """ :arg host_name: the host name of the IDNow gateway server :arg customer_id: your customer id :returns the base url of the IDNow API and the selected customer """ return 'https://{0}/api/v1/{1}'.format(host_name, customer_id)
5a24a87f597cf01c61ab6a01202b2e01e3b00bf8
2,491
import requests def is_url_ok(url: str) -> bool: """Check if the given URL is down.""" try: r = requests.get(url) return r.status_code == 200 except Exception: return False
97e0ba4b609282ef0dc166f0f0407e4aacdf30b2
2,492
import math def sort_by_value(front, values): """ This function sorts the front list according to the values :param front: List of indexes of elements in the value :param values: List of values. Can be longer than the front list :return: """ copied_values = values.copy() # Copy so we can modify it sorted_list = [] while len(sorted_list) != len(front): min_value = copied_values.index(min(copied_values)) if min_value in front: sorted_list.append(min_value) copied_values[min_value] = math.inf return sorted_list
2d259ebbc0117f9aa043d78394b6423e596f176e
2,493
import re def cigar_segment_bounds(cigar, start): """ Determine the start and end positions on a chromosome of a non-no-matching part of an RNA-seq read based on a read's cigar string. cigar string meaning: http://bioinformatics.cvr.ac.uk/blog/tag/cigar-string/ Example: '50M25N50M' with start = 100 -> [100, 149, 175, 224]. Note that start and end integers are inclusive, i.e. all positions at or between 100 and 149 and at or between 175 and 224 are covered by reads. :param cigar: str a read's cigar string, e.g. "49M165N51M" :param start: int a read's start position on a chromosome :return: list of integers representing cigar match start, end points, in order of matching subsequences """ # if CIGAR string is a single full match (i.e. "<positive integer>M") # extract length of the match, return match segment. full_match = re.match(r'(\d+)M$', cigar) if full_match is not None: extension = int(cigar[:(full_match.span()[-1] - 1)]) - 1 return [start, start + extension] # break up cigar string into list of 2-tuples (letter indicative of match/no match, run length integer). cigar_split = [(v, int(k)) for k, v in re.findall(r'(\d+)([A-Z]?)', cigar)] # initialize parse params. # Allow for "hard clipping" where aligned read can start with non-matching region (https://bit.ly/2K6TJ5Y) augment = False any_match = False # output storage. match_idx_list = list() for idx in range(len(cigar_split)): segment = cigar_split[idx] if segment[0] == 'M': any_match = True extension = segment[1] - 1 # end of a match run is inclusive. augment = True match_idx_list += [start, start + extension] # append a match run to output. else: if augment: extension = segment[1] + 1 augment = False else: extension = segment[1] start += extension # if no matching regions found, throw error. if not any_match: raise ValueError('CIGAR string {0} has no matching region.'.format(cigar)) return match_idx_list
c870dfb9b11e2fd1df9fb347528252f114b8d70f
2,496
import functools import asyncio def no_block(func): """Turns a blocking function into a non-blocking coroutine function.""" @functools.wraps(func) async def no_blocking_handler(*args, **kwargs): partial = functools.partial(func, *args, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, partial) return no_blocking_handler
5681fe7275a89c522384b28f9473fded8bba846b
2,497
def get_ua_list(): """ 获取ua列表 """ with open('zhihu_spider/misc/ua_list.txt', 'r') as f: return [x.replace('\n', '') for x in f.readlines()]
6ebcf5d85650ad6644ccdf48aafed0160bd52ec0
2,499
from typing import List def load_numbers_sorted(txt: str) -> List[int]: """ファイルから番号を読み込みソートしてリストを返す Args: txt (str): ファイルのパス Returns: List[int]: 番号のリスト """ numbers = [] with open(txt) as f: numbers = sorted(map(lambda e: int(e), f)) return numbers
6f10badd417a2ceefefa9f28a5c40583ea077d43
2,501
def translate_pt(p, offset): """Translates point p=(x,y) by offset=(x,y)""" return (p[0] + offset[0], p[1] + offset[1])
9fdc578d461219e9e5d1b557b9fde3d7a0946815
2,502
def truncate(sequence): """ Do nothing. Just a placeholder. """ string = str(sequence) return string.split()[0]
2e8eeffb08d6d3d5d6ad5e6a83e596ec61a2eea2
2,503
import os def get_files(dir="."): """ Gets all the files recursivly from a given base directory. Args: dir (str): The base directory path. Returns: list: A list that contains all files. """ folder_queue = [dir] files = set() while(folder_queue): next_folder = folder_queue.pop(0) with os.scandir(next_folder) as it: for entry in it: if entry.is_file(): files.add(entry) else: folder_queue.append(entry.path) files = list(files) return files
4c29efa262c2b1be04952beb9acb6a2d8b622a3a
2,505
import os import zipfile import json def load_project_resource(file_path: str): """ Tries to load a resource: 1. directly 2. from the egg zip file 3. from the egg directory This is necessary, because the files are bundled with the project. :return: the file as json """ ... if not os.path.isfile(file_path): try: egg_path = __file__.split(".egg")[0] + ".egg" if os.path.isfile(egg_path): print(f"Try to load instances from ZIP at {egg_path}") with zipfile.ZipFile(egg_path) as z: f = z.open(file_path) data = json.load(f) else: print(f"Try to load instances from directory at {egg_path}") with open(egg_path + '/' + file_path) as f: data = json.load(f) except Exception: raise FileNotFoundError(f"Could not find '{file_path}'. " "Make sure you run the script from the correct directory.") else: with open(file_path) as f: data = json.load(f) return data
b9d46e1363fc1ca8b397b1512642b7795a8ea9c9
2,506
import torch def hsic(k_x: torch.Tensor, k_y: torch.Tensor, centered: bool = False, unbiased: bool = True) -> torch.Tensor: """Compute Hilbert-Schmidt Independence Criteron (HSIC) :param k_x: n by n values of kernel applied to all pairs of x data :param k_y: n by n values of kernel on y data :param centered: whether or not at least one kernel is already centered :param unbiased: if True, use unbiased HSIC estimator of Song et al (2007), else use original estimator of Gretton et al (2005) :return: scalar score in [0*, inf) measuring dependence of x and y * note that if unbiased=True, it is possible to get small values below 0. """ if k_x.size() != k_y.size(): raise ValueError("RDMs must have the same size!") n = k_x.size()[0] if not centered: h = torch.eye(n, device=k_y.device, dtype=k_y.dtype) - 1/n k_y = h @ k_y @ h if unbiased: # Remove the diagonal k_x = k_x * (1 - torch.eye(n, device=k_x.device, dtype=k_x.dtype)) k_y = k_y * (1 - torch.eye(n, device=k_y.device, dtype=k_y.dtype)) # Equation (4) from Song et al (2007) return ((k_x *k_y).sum() - 2*(k_x.sum(dim=0)*k_y.sum(dim=0)).sum()/(n-2) + k_x.sum()*k_y.sum()/((n-1)*(n-2))) / (n*(n-3)) else: # The original estimator from Gretton et al (2005) return torch.sum(k_x * k_y) / (n - 1)**2
7c91aa5991b90f396abbf835111a456208cbc50a
2,509
def int_converter(value): """check for *int* value.""" int(value) return str(value)
ba1b780c7886fccf1203225de249ef129561fd36
2,510
from typing import Callable def map_filter(filter_function: Callable) -> Callable: """ returns a version of a function that automatically maps itself across all elements of a collection """ def mapped_filter(arrays, *args, **kwargs): return [filter_function(array, *args, **kwargs) for array in arrays] return mapped_filter
a5f9f97d1a0d4acdaa39b9fb72a73b95a81553bb
2,511
import binascii def generate_ngrams_and_hashit(tokens, n=3): """The function generates and hashes ngrams which gets from the tokens sequence. @param tokens - list of tokens @param n - count of elements in sequences """ return [binascii.crc32(bytearray(tokens[i:i + n])) for i in range(len(tokens) - n + 1)]
eb627add56f51a533c773e0dfea029bfcdb808ee
2,513
from datetime import datetime def get_clip_name_from_unix_time(source_guid, current_clip_start_time): """ """ # convert unix time to readable_datetime = datetime.fromtimestamp(int(current_clip_start_time)).strftime('%Y_%m_%d_%H_%M_%S') clipname = source_guid + "_" + readable_datetime return clipname, readable_datetime
0a212a76a69507ae3020c1e05ec354a927ad3dae
2,514
def normalize_sides(sides): """ Description: Squares the sides of the rectangles and averages the points so that they fit together Input: - sides - Six vertex sets representing the sides of a drawing Returns: - norm_sides - Squared and fit sides list """ sides_list = [] # Average side vertices and make perfect rectangles def square_sides(sides): # Find the min/max x and y values x = [] y = [] for vert in sides: x.append(vert[0][0]) y.append(vert[0][1]) minx = 0 miny = 0 maxx = max(x)-min(x) maxy = max(y)-min(y) # Construct new squared vertex set with format |1 2| # |3 4| squared_side = [[minx,miny],[maxx,miny],[maxx,maxy],[minx,maxy]] #squared_side = [[minx, maxy], [maxx, maxy], [minx, miny], [maxx, miny]] return squared_side squared_right = square_sides(sides[0]) squared_left = square_sides(sides[1]) squared_top = square_sides(sides[2]) squared_back = square_sides(sides[3]) squared_front = square_sides(sides[4]) squared_bottom = square_sides(sides[5]) return squared_front,squared_left,squared_back,squared_right,squared_top,squared_bottom
855fcc45d14db2eede9fd7ec2fa6bf2f6854950d
2,516
def ratingRange(app): """ Get the rating range of an app. """ rating = 'Unknown' r = app['rating'] if r >= 0 and r <= 1: rating = '0-1' elif r > 1 and r <= 2: rating = '1-2' elif r > 2 and r <= 3: rating = '2-3' elif r > 3 and r <= 4: rating = '3-4' elif r > 4 and r <= 5: rating = '4-5' return rating
69056c367a87e331cd3b606423540250b20f6485
2,517
import io def generate_table_definition(schema_and_table, column_info, primary_key=None, foreign_keys=None, diststyle=None, distkey=None, sortkey=None): """Return a CREATE TABLE statement as a string.""" if not column_info: raise Exception('No columns specified for {}'.format(schema_and_table)) out = io.StringIO() out.write('CREATE TABLE {} (\n'.format(schema_and_table)) columns_count = len(column_info) for i, (column, type_) in enumerate(column_info): out.write(' "{}" {}'.format(column, type_)) if (i < columns_count - 1) or primary_key or foreign_keys: out.write(',') out.write('\n') if primary_key: out.write(' PRIMARY KEY({})'.format(primary_key)) if foreign_keys: out.write(',') out.write('\n') foreign_keys = foreign_keys or [] foreign_keys_count = len(foreign_keys) for i, (key, reftable, refcolumn) in enumerate(foreign_keys): out.write(' FOREIGN KEY({}) REFERENCES {}({})'.format( key, reftable, refcolumn )) if i < foreign_keys_count - 1: out.write(',') out.write('\n') out.write(')\n') if diststyle: out.write('DISTSTYLE {}\n'.format(diststyle)) if distkey: out.write('DISTKEY({})\n'.format(distkey)) if sortkey: if isinstance(sortkey, str): out.write('SORTKEY({})\n'.format(sortkey)) elif len(sortkey) == 1: out.write('SORTKEY({})\n'.format(sortkey[0])) else: out.write('COMPOUND SORTKEY({})\n'.format(', '.join(sortkey))) return out.getvalue()
383cdc8ed13fbaa45adadec26f31ad0f5ac52fbc
2,519
def gradient_descent_update(x, gradx, learning_rate): """ Performs a gradient descent update. """ # Return the new value for x return x - learning_rate * gradx
db5ec512883352f473990eca124c8ad302ec3564
2,520
def next_line(grd_file): """ next_line Function returns the next line in the file that is not a blank line, unless the line is '', which is a typical EOF marker. """ done = False while not done: line = grd_file.readline() if line == '': return line, False elif line.strip(): return line, True
337f188930a03142bae59cdb378b09f1ac5e2ecb
2,522
from pathlib import Path import hashlib def file_md5_is_valid(fasta_file: Path, checksum: str) -> bool: """ Checks if the FASTA file matches the MD5 checksum argument. Returns True if it matches and False otherwise. :param fasta_file: Path object for the FASTA file. :param checksum: MD5 checksum string. :return: boolean indicating if the file validates. """ md5_hash = hashlib.md5() with fasta_file.open(mode="rb") as fh: # Read in small chunks to avoid memory overflow with large files. while chunk := fh.read(8192): md5_hash.update(chunk) return md5_hash.hexdigest() == checksum
ec400afbe29d940d0638a581da7f2ee001b9e985
2,523
def combine_to_int(values): """Combine several byte values to an integer""" multibyte_value = 0 for byte_id, byte in enumerate(values): multibyte_value += 2**(4 * byte_id) * byte return multibyte_value
58ff7cbee356cdcbe5b26e973de16c5b1cc40afc
2,524
def error_response(error, message): """ returns error response """ data = { "status": "error", "error": error, "message": message } return data
f3e52ea42cb48378f08ecb65f58d2291960e6488
2,525
def getFBA(fba): """AC factory. reads a fileobject and creates a dictionary for easy insertation into a postgresdatabase. Uses Ohlbergs routines to read the files (ACfile) """ word = fba.getSpectrumHead() while word is not None: stw = fba.stw mech = fba.Type(word) datadict = { 'stw': stw, 'mech_type': mech, } return datadict raise EOFError
e5c5f52fe831938400eec5ae15c043ecbf8cf7d1
2,526
import requests from bs4 import BeautifulSoup def get_soup(page_url): """ Returns BeautifulSoup object of the url provided """ try: req = requests.get(page_url) except Exception: print('Failed to establish a connection with the website') return if req.status_code == 404: print('Page not found') return content = req.content soup = BeautifulSoup(content, 'html.parser') return soup
d837e3b6aa6184285857428b2c796172379f3a1f
2,527
def foreign_key_constraint_sql(table): """Return the SQL to add foreign key constraints to a given table""" sql = '' fk_names = list(table.foreign_keys.keys()) for fk_name in sorted(fk_names): foreign_key = table.foreign_keys[fk_name] sql += "FOREIGN KEY({fn}) REFERENCES {tn}({kc}), ".format( fn=foreign_key.from_col, tn=foreign_key.to_table.name, kc=foreign_key.to_col ) return sql
0883050d2b9d302ab9099ef27abd400e4d4fe69e
2,528
from pathlib import Path def get_world_paths() -> list: """ Returns a list of paths to the worlds on the server. """ server_dir = Path(__file__).resolve().parents[1] world_paths = [] for p in server_dir.iterdir(): if p.is_dir and (p / "level.dat").is_file(): world_paths.append(p.absolute()) return world_paths
bf1c23c6a1c928dc66470db2e11b49ad2fc9e5d9
2,529
import hmac import hashlib def is_valid_webhook_request(webhook_token: str, request_body: str, webhook_signature_header: str) -> bool: """This method verifies that requests to your Webhook URL are genuine and from Buycoins. Args: webhook_token: your webhook token request_body: the body of the request webhook_signature_header: the X-Webhook-Signature header from BuyCoins Returns: a Boolean stating whether the request is valid or not """ hmac_request_body = hmac.new(webhook_token.encode(), request_body.encode(), hashlib.sha1) return hmac.compare_digest(hmac_request_body.hexdigest(), webhook_signature_header)
1ce1ef0a9e1386ebbea7773d8cd9d40df2544792
2,530
def is_even(x): """ True if obj is even. """ return (x % 2) == 0
f19563063515eb4d39b8b607cf68f6f188af409e
2,531
def _preprocess_stored_query(query_text, config): """Inject some default code into each stored query.""" ws_id_text = " LET ws_ids = @ws_ids " if 'ws_ids' in query_text else "" return '\n'.join([ config.get('query_prefix', ''), ws_id_text, query_text ])
bc63391724773cd4a60f3dc9686d243d6d733b40
2,532
import numpy def get_object_ratio(obj): """Calculate the ratio of the object's size in comparison to the whole image :param obj: the binarized object image :type obj: numpy.ndarray :returns: float -- the ratio """ return numpy.count_nonzero(obj) / float(obj.size)
fd18e460be32037c73fe75c8fa5eef5ba6c1c217
2,533
def get_region(ds, region): """ Return a region from a provided DataArray or Dataset Parameters ---------- region_mask: xarray DataArray or list Boolean mask of the region to keep """ return ds.where(region, drop=True)
102b672f8040b722ec346435775cba1056485ae2
2,534
def print_scale(skill, points): """Return TeX lines for a skill scale.""" lines = ['\\cvskill{'] lines[0] += skill lines[0] += '}{' lines[0] += str(points) lines[0] += '}\n' return lines
c88de0c6db9e7b92dbcee025f42f56817a4aa033
2,536
import operator def device_sort (device_set): """Sort a set of devices by self_id. Can't be used with PendingDevices!""" return sorted(device_set, key = operator.attrgetter ('self_id'))
92a22a87b5b923771cd86588180a8c6eb15b9fdf
2,537
def _ontology_value(curie): """Get the id component of the curie, 0000001 from CL:0000001 for example.""" return curie.split(":")[1]
7ef1f0874e698c498ccef16294c0469f67cd5233
2,538
import re def parse_IS(reply: bytes, device: str): """Parses the reply to the shutter IS command.""" match = re.search(b"\x00\x07IS=([0-1])([0-1])[0-1]{6}\r$", reply) if match is None: return False if match.groups() == (b"1", b"0"): if device in ["shutter", "hartmann_right"]: return "open" else: return "closed" elif match.groups() == (b"0", b"1"): if device in ["shutter", "hartmann_right"]: return "closed" else: return "open" else: return False
827b5ebf5c98bcc65b823276d5ab5b8086a2c069
2,539
import os def generate_s3_strings(path): """Generates s3 bucket name, s3 key and s3 path with an endpoint from a path with path (string): s3://BUCKETNAME/KEY x --> path.find(start) returns index 0 + len(start) returns 5 --> 0 + 5 = 5 Y --> path[len(start):] = BUCKENAME/KEY --> .find(end) looking for forward slash in BUCKENAME/KEY --> returns 10 Y --> now we have to add len(start) to 10 because the index was relating to BUCKENAME/KEY and not to s3://BUCKETNAME/KEY bucket_name = path[X:Y] Prefix is the string behind the slash that is behind the bucket_name - so path.find(bucket_name) find the index of the bucket_name, add len(bucket_name) to get the index to the end of the bucket name - add 1 because we do not want the slash in the Key Args: path (string): s3://BUCKETNAME/KEY Returns: strings: path = s3://endpoint@BUCKETNAME/KEY prefix = KEY bucket_name = BUCKETNAME """ start = 's3://' end = '/' bucket_name = path[path.find(start)+len(start):path[len(start):].find(end)+len(start)] prefix = path[path.find(bucket_name)+len(bucket_name)+1:] if not prefix.endswith('/'): prefix = prefix+'/' path = 's3://'+os.environ['S3_ENDPOINT']+'@'+bucket_name+'/'+prefix return bucket_name, prefix, path
601b20514c93e2159f9d5747063ce70d265d6e6e
2,540
def only_t1t2(src, names): """ This function... :param src: :param names: :return: """ if src.endswith("TissueClassify"): # print "Keeping T1/T2!" try: names.remove("t1_average_BRAINSABC.nii.gz") except ValueError: pass try: names.remove("t2_average_BRAINSABC.nii.gz") except ValueError: pass else: names.remove("TissueClassify") # print "Ignoring these files..." # for name in names: # print "\t" + name return names
60116fbc602bbe03f7c18776b623ef3680b9dfc1
2,541