content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def load_data(connection_string: str): """ Load data from a source. Source could be: - A JSON File - A MongoDB Load data from a file --------------------- If you want to load data from a File, you must to provide this connection string: >>> connection_string = "/path/to/my/file.json" or using URI format: >>> connection_string = "file:///path/to/my/file.json" Load file from a MongoDB ------------------------ If you want to load data from a MongoDB database, you must to provide a connection string like: >>> connection_string = "mongodb://mongo.example.com:27017" Or event more complicate: >>> connection_string = "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test" :param connection_string: :type connection_string: :return: :rtype: """ assert isinstance(connection_string, str) if connection_string.startswith("mongodb://"): data = _load_from_mongo(connection_string) elif connection_string.startswith("file://"): data = _load_from_file(connection_string) else: data = _load_from_file("file://{}".format(connection_string)) # Load JSON info return APITest(**data)
abb806e62510077abf8a0b686a5882f637502275
3,658,837
def himmelblau(xy): """ Himmelblau's function, as a set of residuals (cost = sum(residuals**2)) The standard Himmelbau's function is with data as [11, 7], and four minimum at (3.0, 2.0), ~(-2.8, 3.1), ~(-3.8, -3.3), ~(3.6, -1.8). Himmelblau's function is a quadratic model in both x and y. Its data- space dimension (2) is equal to its model-space dimension (2), so there is only parameter-effect curvature. Parameters ---------- - xy : 2-element list-like The x,y parameters of the model. Returns ------- 2-element list-like The residuals of the model. Notes ------ https://en.wikipedia.org/wiki/Himmelblau%27s_function """ x, y = xy r1 = x*x + y r2 = y*y + x return np.array([r1, r2])
6951c77afd39596e7a799fe413bc2fc96a4818c2
3,658,838
from typing import Dict def parse_instrument_data(smoothie_response: str) -> Dict[str, bytearray]: """ Parse instrument data. Args: smoothie_response: A string containing a mount prefix (L or R) followed by : and a hex string. Returns: mapping of the mount prefix to the hex string. """ try: items = smoothie_response.split("\n")[0].strip().split(":") mount = items[0] if mount not in {"L", "R"}: raise ParseError( error_message=f"Invalid mount '{mount}'", parse_source=smoothie_response ) # data received from Smoothieware is stringified HEX values # because of how Smoothieware handles GCODE messages data = bytearray.fromhex(items[1]) except (ValueError, IndexError, TypeError, AttributeError): raise ParseError( error_message="Unexpected argument to parse_instrument_data", parse_source=smoothie_response, ) return {mount: data}
59f02a5d83b600f5fb4104f72f860925487f6422
3,658,839
def _volume_sum_check(props: PropsDict, sum_to=1, atol=1e-3) -> bool: """Check arrays all sum to no more than 1""" check_broadcastable(**props) sum_ar = np.zeros((1,)) for prop in props: sum_ar = sum_ar + props[prop] try: assert sum_ar.max() <= sum_to + atol except AssertionError: raise ValueError(f"Volume fractions for {props.keys()} sum to greater than one") return True
631743276b833fd9ea58ae766614b851764ee771
3,658,841
def small_view(data, attribute): """ Extract a downsampled view from a dataset, for quick statistical summaries """ shp = data.shape view = tuple([slice(None, None, np.intp(max(s / 50, 1))) for s in shp]) return data[attribute, view]
62273269f87cbe6803ef0b5a8e47a681ca1f4d26
3,658,843
def playerStandings(): """Returns a list of the players and their win records, sorted by wins. The first entry in the list should be the player in first place, or a player tied for first place if there is currently a tie. Returns: A list of tuples, each of which contains (id, name, wins, matches): id: the player's unique id (assigned by the database) name: the player's full name (as registered) wins: the number of matches the player has won matches: the number of matches the player has played """ ## connecting with db db = connect() ## creating a cursor object c = db.cursor() ## get the scores table from the matches table using the below sql query query = ''' SELECT wins_table.id, wins_table.team_name, wins_table.wins, wins_table.wins + loses_table.loses as total FROM (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.winner = TEAMS.id) AS WINS FROM TEAMS) as wins_table, (SELECT TEAMS.*, (SELECT COUNT(*) FROM MATCHES WHERE MATCHES.loser = TEAMS.id) AS LOSES FROM TEAMS) as loses_table WHERE wins_table.id = loses_table.id ORDER BY wins_table.wins desc; ''' ## execute the query c.execute(query) ## query result result = c.fetchall() ## closing the connection with the database db.close() return result
c6554d1ff34dd08f756d1ad19665deacac4467de
3,658,844
def get_all_feature_names(df: pd.DataFrame, target: str = None) -> list: """Get a list of all feature names in a dataframe. Args: df (pd.DataFrame): dataframe of features and target variable target (str): name of target column in df Returns: all_feature_names (list): list of all feature names """ # if using the main df if target in df.columns.tolist(): df = df.loc[ :, ~df.columns.isin([target])] all_feature_names = df.columns.tolist() # if using samples_df with true and predicted labels else: df = df.loc[ :, ~df.columns.isin( [ 'true_label', 'predicted_label' ] ) ] all_feature_names = df.columns.tolist() return all_feature_names
b0b1964832c6f56200a3d7fbbccd1030e9c52a93
3,658,845
import random def generate_enhancer_promoter_pair(ep_df): """ """ std_ep_pair = ep_df[['chrom-Enh','chromStart','chromEnd','TSS']] min_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).min()) max_ep_gap = abs((std_ep_pair['chromEnd']-std_ep_pair['chromStart']).max()) fake_samples = [] for enhancer in std_ep_pair[['chrom-Enh','chromStart','chromEnd']].values: for promoter in std_ep_pair['TSS'].values: gap = abs(enhancer[-1]-promoter) if gap>min_ep_gap and gap<max_ep_gap: current_sample = np.r_[enhancer, promoter] fake_samples.append(current_sample) fake_samples = random.sample(fake_samples, std_ep_pair.shape[0]) fake_ep_pair = pd.DataFrame(fake_samples, columns=['chrom-Enh','chromStart','chromEnd','TSS']) return std_ep_pair, fake_ep_pair
b87906e6e2d5a23a729aa3f9b19fcd086db2e7c8
3,658,847
from typing import Union from typing import Tuple from typing import Dict def constant_lrs( draw, return_kwargs: bool = False ) -> Union[ st.SearchStrategy[lr_scheduler_pb2.ConstantLR], st.SearchStrategy[Tuple[lr_scheduler_pb2.ConstantLR, Dict]], ]: """Returns a SearchStrategy for an ConstantLR plus maybe the kwargs.""" kwargs: Dict = {} # initialise and return all_fields_set(lr_scheduler_pb2.ConstantLR, kwargs) constant_lr = lr_scheduler_pb2.ConstantLR(**kwargs) if not return_kwargs: return constant_lr return constant_lr, kwargs
d7354717a052de2852ea61e55b1b2c3e3df19010
3,658,848
def get_read_only_storage_manager(): """Get the current Flask app's read only storage manager, create if necessary""" return current_app.config.setdefault('read_only_storage_manager', ReadOnlyStorageManager())
cd5dac64a834ac98accb6824d5e971d763acc677
3,658,849
def __parse_sql(sql_rows): """ Parse sqlite3 databse output. Modify this function if you have a different database setup. Helper function for sql_get(). Parameters: sql_rows (str): output from SQL SELECT query. Returns: dict """ column_names = ['id', 'requester', 'item_name', 'custom_name', 'quantity', 'crafting_discipline', 'special_instruction', 'status', 'rarity', 'resource_provided', 'pub-date', 'crafter', 'stats'] request_dict = {str(row[0]): {column_names[i]: row[i] for i,_ in enumerate(column_names)} for row in sql_rows} return request_dict
09c61da81af069709dd020b8643425c4c6964137
3,658,850
import scipy import random def _generate_to(qubo, seed, oct_upper_bound, bias=0.5): """ Given a QUBO, an upper bound on oct, and a bias of bipartite vertices, generate an Erdos-Renyi graph such that oct_upper_bound number of vertices form an OCT set and the remaining vertices are partitioned into partites (left partite set with probability of "bias"). Edges between the partite sets are then removed. """ # Compute parameters needed for ER n = qubo.order() p = qubo.size() / scipy.special.binom(n, 2) # Generate graph graph = nx.erdos_renyi_graph(n=n, p=p, seed=seed) random.seed(seed) # Compute partite sets on the remaining vertices nodes = list(graph.nodes())[oct_upper_bound:] partite1 = set() partite2 = set() for node in nodes: if random.random() < bias: partite1.add(node) else: partite2.add(node) # Remove edges within a partite set for edge in chain(combinations(partite1, 2), combinations(partite2, 2)): if graph.has_edge(*edge): graph.remove_edge(*edge) # Name the graph graph.graph['name'] = '{}-{}-{}'.format(qubo.graph['name'], 'to', seed) # Sanitize the graph and return graph = reset_labels(graph) return graph
653aedbd44bf87a9908c8abcf2c9480b836f4a03
3,658,851
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'): """Return the sqlhelp object to create the table. @param fields: which fields to put in the create. Defaults to all. @param extraFields: A sequence of tuples containing (name,sql type) for additional fields @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format @type addCoastGuardFields: bool @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres') @return: An object that can be used to generate a return @rtype: sqlhelp.create """ if fields is None: fields = fieldList c = sqlhelp.create('waterlevel',dbType=dbType) c.addPrimaryKey() if 'MessageID' in fields: c.addInt ('MessageID') if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator') if 'UserID' in fields: c.addInt ('UserID') if 'Spare' in fields: c.addInt ('Spare') if 'dac' in fields: c.addInt ('dac') if 'fid' in fields: c.addInt ('fid') if 'month' in fields: c.addInt ('month') if 'day' in fields: c.addInt ('day') if 'hour' in fields: c.addInt ('hour') if 'min' in fields: c.addInt ('min') if 'stationid' in fields: c.addVarChar('stationid',7) if 'waterlevel' in fields: c.addInt ('waterlevel') if 'datum' in fields: c.addInt ('datum') if 'sigma' in fields: c.addInt ('sigma') if 'source' in fields: c.addInt ('source') if addCoastGuardFields: # c.addInt('cg_s_rssi') # Relative signal strength indicator # c.addInt('cg_d_strength') # dBm receive strength # c.addVarChar('cg_x',10) # Idonno c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T' c.addInt('cg_s_slotnum') # Slot received in c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string c.addInt('cg_sec') # UTC seconds since the epoch c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream return c
0a9bbbed4dd9c20e1126716bb64e2279d4ab29b6
3,658,852
def _section_cohort_management(course, access): """ Provide data for the corresponding cohort management section """ course_key = course.id ccx_enabled = hasattr(course_key, 'ccx') section_data = { 'section_key': 'cohort_management', 'section_display_name': _('Cohorts'), 'access': access, 'ccx_is_enabled': ccx_enabled, 'course_cohort_settings_url': reverse( 'course_cohort_settings', kwargs={'course_key_string': str(course_key)} ), 'cohorts_url': reverse('cohorts', kwargs={'course_key_string': str(course_key)}), 'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': str(course_key)}), 'verified_track_cohorting_url': reverse( 'verified_track_cohorting', kwargs={'course_key_string': str(course_key)} ), } return section_data
161f01b96952b8538d737c13718d455b69542b51
3,658,853
def rivers_by_station_number(stations, N): """Returns a list of N tuples on the form (river name, number of stations on the river). These tuples are sorted in decreasing order of station numbers. If many stations have the same number of stations as the 'Nth' river, these are also included.""" riversList = stations_by_river(stations) #Get list of rivers to consider riverNumber = [] for River in riversList: riverNumber.append((River, len(riversList[River]))) #Get tuple of (river name, number of stations) riverNumber.sort(key= lambda x:x[1], reverse=True) #Sort into decreasing numerical order #This code is used to include any rivers with equal number of stations to the 'final' one being output. extraStations = 0 #search through next few rivers to see how many have the same number of stations for i in range(N, len(riverNumber)): if riverNumber[i][1] == riverNumber[N-1][1]: extraStations += 1 else: break #as items pre-sorted once the number is not equal can exit N += extraStations #adjust value of N return riverNumber[:N]
5f958116ae833d2ad4921662f753ca8f30a0af73
3,658,854
import json def load_default_data() -> dict[str, str]: """Finds and opens a .json file with streamer data. Reads from the file and assigns the data to streamer_list. Args: None Returns: A dict mapping keys (Twitch usernames) to their corresponding URLs. Each row is represented as a seperate streamer. For example: { "GMHikaru":"https://www.twitch.tv/GMHikaru" } """ with open("statum\static\streamers.json", "r") as default_streamers: streamer_list: dict[str, str] = json.load(default_streamers) default_streamers.close() return streamer_list
bfeef64922fb4144228e031b9287c06525c4254d
3,658,855
def get_value_key(generator, name): """ Return a key for the given generator and name pair. If name None, no key is generated. """ if name is not None: return f"{generator}+{name}" return None
0ad630299b00a23d029ea15543982125b792ad53
3,658,856
import math def wav_to_log_spectrogram_clips(wav_file): """convert audio into logrithmic spectorgram, then chop it into 2d-segmentation of 100 frames""" # convert audio into spectorgram sound, sr = librosa.load(wav_file, sr=SR, mono=True) stft = librosa.stft(sound, n_fft=N_FFT, hop_length=HOP_LEN, win_length=WIN_LEN) mag, phase = librosa.magphase(stft) db_spectro = librosa.amplitude_to_db(mag) # chop magnitude of spectrogram into clips, each has 1025 bins, 100 frames db_spectro_clips = np.empty((0, FREQ_BINS, 100)) for i in range(math.floor(mag.shape[1] / 100)): db_spectro_clips = np.concatenate((db_spectro_clips, db_spectro[np.newaxis, :, i * 100: (i + 1) * 100])) return db_spectro_clips
51ccf7d5687005f3eb01f382d37b6d7e09e45730
3,658,857
def get_title(mods): """ Function takes the objects MODS and extracts and returns the text of the title. """ title = mods.find("{{{0}}}titleInfo/{{{0}}}title".format(MODS_NS)) if title is not None: return title.text
652a9cc61c8d2538c80818759666022b19058074
3,658,858
def sample_ingredient(user, name='Cinnamon'): """ Create and return a sample ingredient :param user: User(custom) object :param name: name of the ingredient :return: Ingredient object """ return Ingredient.objects.create(user=user, name=name)
2828e1f42f6d755ac636d93d72b291cad3ba0061
3,658,860
def viterbi(O,S,Y, pi, A, B): """Generates a path which is a sequence of most likely states that generates the given observation Y. Args: O (numpy.ndarray): observation space. Size: 1 X N S (numpy.ndarray): state space. Size: 1 X K Y (list): observation sequence. Size: 1 X T pi (numpy.ndarray): inial probablities. Size: 1 X K A (numpy.ndarray): transition matrix. Size: K X K B (numpy.ndarray): emission matrix Size: N X K Returns: list: list of most likely sequence of POS tags """ # Reference: https://en.wikipedia.org/wiki/Viterbi_algorithm#Pseudocode #************************************************************************** ## Example data for trial # input # O = np.arange(1,7) # observation space # uniq words # Size = 1 X N # S = np.asarray([0, 1, 2]) # State space # uniq POS tags # Size = 1 X K # Y = np.array([0, 2, 0, 2, 2, 1]).astype(np.int32) # Observation sequnece T # # Size = 1 X T # pi = np.array([0.6, 0.2, 0.2]) # Initial probablity # Size = 1 X K # A = np.array([[0.8, 0.1, 0.1], # [0.2, 0.7, 0.1], # [0.1, 0.3, 0.6]]) # transition matrix # Size = K X K # B = np.array([[0.7, 0.0, 0.3], # [0.1, 0.9, 0.0], # [0.0, 0.2, 0.8]]) # emission matrix # Size = K X N # print("O",O) # print("S",S) # print("pi",pi) # print("Y",Y) # print("A",A,'\n') # print("B",B) # output # X = [0, 0, 0, 2, 2, 1] # Most likely path/sequence #************************************************************************** N = len(O) K = len(S) T = len(Y) T1 = np.zeros(shape=(K,T)) T2 = np.zeros(shape=(K,T)) for i in range(K): T1[i,0] = pi[i] * B[i, Y[0]] T2[i,0] = 0 for j in range(1, T): for i in range(K): if Y[j] == -1: # Unkown word handling. Set B[i, Y[j]] = 1 for all tags if Y[j] == -1 # aka word not found in train set. next_prob = T1[:,j-1] * A[:, i] * 1 else: next_prob = T1[:,j-1] * A[:, i] * B[i, Y[j]] T1[i,j] = np.max(next_prob) T2[i,j] = np.argmax(next_prob) Z = [None] * T X = [None] * T # Backpointer Z[T-1] = np.argmax(T1[:,T-1]) X[T-1] = S[Z[T-1]] for j in reversed(range(1, T)): Z[j-1] = T2[int(Z[j]),j] X[j-1] = S[int(Z[j-1])] return X
db533c584cf2a287cfcc6f4097566cdb493c42cc
3,658,862
def int_to_bigint(value): """Convert integers larger than 64 bits to bytearray Smaller integers are left alone """ if value.bit_length() > 63: return value.to_bytes((value.bit_length() + 9) // 8, 'little', signed=True) return value
0f2d64887dc15d1902b8e10b0257a187ed75187f
3,658,863
def xcorr(S, dtmax=10): """ Cross correlate each pair of columns in S at offsets up to dtmax """ # import pdb; pdb.set_trace() (T,N) = S.shape H = np.zeros((N,N,dtmax)) # Compute cross correlation at each time offset for dt in np.arange(dtmax): # print "Computing cross correlation at offset %d" % dt # Compute correlation in sections to conserve memory chunksz = 16 for n1 in np.arange(N, step=chunksz): for n2 in np.arange(N, step=chunksz): n1c = min(n1 + chunksz, N) n2c = min(n2 + chunksz, N) # Corr coef is a bit funky. We want the upper right quadrant # of this matrix. The result is ((n1c-n1)+(n2c-n2)) x ((n1c-n1)+(n2c-n2)) H[n1:n1c, n2:n2c, dt] = np.corrcoef(S[:T-dt, n1:n1c].T, S[dt:, n2:n2c].T)[:(n1c-n1),(n1c-n1):] # Set diagonal to zero at zero offset (obviously perfectly correlated) if dt == 0: H[:,:,0] = H[:,:,0]-np.diag(np.diag(H[:,:,0])) return H
7b27b2ce5c574db253554e8d6c2ebf0ac7c354ca
3,658,864
def register_hooks(): """Exec all the rules files. Gather the hooks from them and load them into the hook dict for later use. """ global HOOKS_LOADED for name, path in load_rules().items(): globals = {} with open(path) as f: exec(compile(f.read(), path, 'exec'), globals) DESCRIPTIONS[name] = globals['__doc__'] for hook_name in HOOKS.keys(): if hook_name in globals: HOOKS[hook_name].append(globals[hook_name]) HOOKS_LOADED = True return HOOKS
c4bfd57fa0a503f4a5be7004fe2145b42c28727a
3,658,865
def proxy_rotator(): """Return a cycle object of proxy dict""" return Proxy.get_proxy_rotator()
4b988214818599ba19cd45f43aeec03e9cc37e08
3,658,867
def pow(a, b): """ Return an attribute that represents a ^ b. """ return multiplyDivide(a, b, MultiplyDivideOperation.POWER)
17551ad9a872a854c177e43317f1d22242a10cd5
3,658,868
async def send_simple_embed_to_channel(bot: commands.Bot, channel_name: str, message: str, color: str = config["colors"]["default"]) -> discord.Message: """Send a simple embed message to the channel with the given name in the given guild, using the given message and an optional colour. Args: bot (commands.Bot): The bot containing the guild with the channel to send the message to. channel_name (int): The name of the channel to send the message to. message (str): The contents of the message color (str, optional): The colour that will be used in the embed. Defaults to config["colors"]["default"]. Returns: discord.Message: The embed message that was sent. """ guild: discord.Guild = bot_util.get_guild(bot, config["guild-id"]) channel: discord.TextChannel = guild_util.get_channel_by_name(guild, channel_name) return await channel.send(embed = discord.Embed(description = message, color = int(color, 0)))
577594c5abdb946ac04decac3ef94ea0e8296535
3,658,869
def retry_on_server_errors_timeout_or_quota_issues_filter(exception): """Retry on server, timeout and 403 errors. 403 errors can be accessDenied, billingNotEnabled, and also quotaExceeded, rateLimitExceeded.""" if HttpError is not None and isinstance(exception, HttpError): if exception.status_code == 403: return True return retry_on_server_errors_and_timeout_filter(exception)
18be4224af641b35cfba50d0ec85a1d22908d1e4
3,658,870
def CSourceForElfSymbolTable(variable_prefix, names, str_offsets): """Generate C source definition for an ELF symbol table. Args: variable_prefix: variable name prefix names: List of symbol names. str_offsets: List of symbol name offsets in string table. Returns: String containing C source fragment. """ out = ( r'''// NOTE: ELF32_Sym and ELF64_Sym have very different layout. #if UINTPTR_MAX == UINT32_MAX // ELF32_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), (address), (size), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */ }, #else // ELF64_Sym # define DEFINE_ELF_SYMBOL(name, name_offset, address, size) \ { (name_offset), ELF_ST_INFO(STB_GLOBAL, STT_FUNC), \ 0 /* other */, 1 /* shndx */, (address), (size) }, #endif // !ELF64_Sym ''') out += 'static const ELF::Sym k%sSymbolTable[] = {\n' % variable_prefix out += ' { 0 }, // ST_UNDEF\n' out += ' LIST_ELF_SYMBOLS_%s(DEFINE_ELF_SYMBOL)\n' % variable_prefix out += '};\n' out += '#undef DEFINE_ELF_SYMBOL\n' return out
233c55815cf5b72092d3c60be42caffc95570c22
3,658,873
from typing import Optional def get_endpoint_access(endpoint_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointAccessResult: """ Resource schema for a Redshift-managed VPC endpoint. :param str endpoint_name: The name of the endpoint. """ __args__ = dict() __args__['endpointName'] = endpoint_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:redshift:getEndpointAccess', __args__, opts=opts, typ=GetEndpointAccessResult).value return AwaitableGetEndpointAccessResult( address=__ret__.address, endpoint_create_time=__ret__.endpoint_create_time, endpoint_status=__ret__.endpoint_status, port=__ret__.port, vpc_endpoint=__ret__.vpc_endpoint, vpc_security_group_ids=__ret__.vpc_security_group_ids, vpc_security_groups=__ret__.vpc_security_groups)
6b38fbd0b27d1ce892fc37c37944979907796862
3,658,875
def predictor( service: MLFlowDeploymentService, data: np.ndarray, ) -> Output(predictions=np.ndarray): """Run a inference request against a prediction service""" service.start(timeout=10) # should be a NOP if already started prediction = service.predict(data) prediction = prediction.argmax(axis=-1) return prediction
c3b5f7241aeab0520db535134912431edf467137
3,658,876
def get_domain(ns, domain): """ Return LMIInstance of given LMI_SSSDDomain. :type domain: string :param domain: Name of the domain to find. :rtype: LMIInstance of LMI_SSSDDomain """ keys = {'Name': domain} try: inst = ns.LMI_SSSDDomain.new_instance_name(keys).to_instance() except wbem.CIMError, err: if err[0] == wbem.CIM_ERR_NOT_FOUND: raise LmiFailed("Cannot find the domain: %s" % domain) raise return inst
beadbd0c172a07c2b55b5bf2b22a05abf562b95b
3,658,877
def mmd_loss(embedding, auxiliary_labels, weights_pos, weights_neg, params): """ Computes mmd loss, weighted or unweighted """ if weights_pos is None: return mmd_loss_unweighted(embedding, auxiliary_labels, params) return mmd_loss_weighted(embedding, auxiliary_labels, weights_pos, weights_neg, params)
6b592159587ec49fc6fd77ed286f338d11582a4b
3,658,878
def perdidas (n_r,n_inv,n_x,**kwargs): """Calcula las perdidas por equipos""" n_t=n_r*n_inv*n_x for kwargs in kwargs: n_t=n_t*kwargs return n_t
157825059ad192ba90991bff6206b289755ce0ba
3,658,879
def _symlink_dep_cmd(lib, deps_dir, in_runfiles): """ Helper function to construct a command for symlinking a library into the deps directory. """ lib_path = lib.short_path if in_runfiles else lib.path return ( "ln -sf " + relative_path(deps_dir, lib_path) + " " + deps_dir + "/" + lib.basename + "\n" )
6672decdee61dfc7f5604c6ebe1c07ac99800a91
3,658,880
def boundingBoxEdgeLengths(domain): """ Returns the edge lengths of the bounding box of a domain :param domain: a domain :type domain: `escript.Domain` :rtype: ``list`` of ``float`` """ return [ v[1]-v[0] for v in boundingBox(domain) ]
a98fc867961bbf6a2ab518da6c933f1295d858db
3,658,881
def get_user(isamAppliance, user): """ Get permitted features for user NOTE: Getting an unexplained error for this function, URL maybe wrong """ return isamAppliance.invoke_get("Get permitted features for user", "/authorization/features/users/{0}/v1".format(user))
0b2fd6c58e2623f8400daa942c83bd0757edd21f
3,658,882
from typing import Sequence from typing import List from typing import Optional def autoupdate( config_file: str, store: Store, tags_only: bool, freeze: bool, repos: Sequence[str] = (), add_unused_hooks: bool = False, ) -> int: """Auto-update the pre-commit config to the latest versions of repos.""" migrate_config(config_file, quiet=True) retv = 0 rev_infos: List[Optional[RevInfo]] = [] changed = False config = load_config(config_file) for repo_config in config['repos']: if repo_config['repo'] in {LOCAL, META}: continue info = RevInfo.from_config(repo_config) if repos and info.repo not in repos: rev_infos.append(None) continue output.write(f'Updating {info.repo} ... ') new_info = info.update(tags_only=tags_only, freeze=freeze) try: _check_hooks_still_exist_at_rev(repo_config, new_info, store) except RepositoryCannotBeUpdatedError as error: output.write_line(error.args[0]) rev_infos.append(None) retv = 1 continue if new_info.rev != info.rev: changed = True if new_info.frozen: updated_to = f'{new_info.frozen} (frozen)' else: updated_to = new_info.rev msg = f'updating {info.rev} -> {updated_to}.' output.write_line(msg) rev_infos.append(new_info) else: output.write_line('already up to date.') rev_infos.append(None) if add_unused_hooks: unused_hooks = _get_unused_hooks(repo_config, new_info, store) if unused_hooks: changed = True for unused_hook in unused_hooks: repo_config['hooks'].append({'id': unused_hook}) if changed: _write_new_config(config_file, rev_infos) return retv
f45aeae70d6e33b841791a09d9a1578834246e75
3,658,883
def plot_energy_resolution_cta_performance(cta_site, ax=None, **kwargs): """ Plot the cta performances (June 2018) for the true_energy resolution Parameters ---------- cta_site: string see `ctaplot.ana.cta_performance` ax: `matplotlib.pyplot.axes` kwargs: args for `matplotlib.pyplot.plot` Returns ------- ax: `matplotlib.pyplot.axes` """ ax = plt.gca() if ax is None else ax cta_req = ana.cta_performance(cta_site) e_cta, ar_cta = cta_req.get_energy_resolution() kwargs.setdefault('label', "CTA performance {}".format(cta_site)) ax.set_ylabel(r"$(\Delta energy/energy)_{68}$") ax.set_xlabel(rf'$E_R$ [{e_cta.unit.to_string("latex")}]') with quantity_support(): ax.plot(e_cta, ar_cta, **kwargs) ax.set_xscale('log') ax.grid(True, which='both') ax.legend() return ax
67f76bcaffb85339f45803d32daf3e2d783fb097
3,658,884
from typing import OrderedDict def _n_nested_blocked_random_indices(sizes, n_iterations): """ Returns indices to randomly resample blocks of an array (with replacement) in a nested manner many times. Here, "nested" resampling means to randomly resample the first dimension, then for each randomly sampled element along that dimension, randomly resample the second dimension, then for each randomly sampled element along that dimension, randomly resample the third dimension etc. Parameters ---------- sizes : OrderedDict Dictionary with {names: (sizes, blocks)} of the dimensions to resample n_iterations : int The number of times to repeat the random resampling """ shape = [s[0] for s in sizes.values()] indices = OrderedDict() for ax, (key, (_, block)) in enumerate(sizes.items()): indices[key] = _get_blocked_random_indices( shape[: ax + 1] + [n_iterations], ax, block ) return indices
730ddba8f0753c29ebcf55c8449f365e6fc0b9ab
3,658,885
def phase_type_from_parallel_erlang2(theta1, theta2, n1, n2): """Returns initial probabilities :math:`\\alpha` and generator matrix :math:`S` for a phase-type representation of two parallel Erlang channels with parametrisation :math:`(\\theta_1, n_1)` and :math:`(\\theta_2, n_2)` (rate and steps of Erlang channels). `Note`: To obtain a phase-type density pass the results of this method into the method `utils.phase_type_pdf`. `Note`: The two Erlang channels split at the first substep into each channel. The parametrisation implies the rate :math:`n\\cdot\\theta` on the individual exponentially-distributed substeps for the respective channel. Parameters ---------- theta1 : float Rate parameter of the first complete Erlang channel (inverse of the mean Erlang waiting time). theta2 : float Rate parameter of the second complete Erlang channel (inverse of the mean Erlang waiting time). n1 : int or float Number of steps of the first Erlang channel (shape parameter). n2 : int or float Number of steps of the second Erlang channel (shape parameter). Returns ------- alpha : 1d numpy.ndarray The initial probability vector of the phase-type distribution (with shape `(1,m)` where :math:`m=n_1+n_2-1`). S : 2d numpy.ndarray The transient generator matrix of the phase-type distribution (with shape `(m,m)` where :math:`m=n_1+n_2-1`). """ ### self-written, copied from env_PHdensity notebook ### butools can then be used to get density and network image with: ### 1) pdf = ph.PdfFromPH(a, A, x) ### 2) ph.ImageFromPH(a, A, 'display') # some checks for theta in (theta1, theta2): if not isinstance(theta, float): raise ValueError('Float expected for theta.') for n in (n1, n2): if isinstance(n, int): pass elif isinstance(n, float) and n.is_integer(): pass else: raise ValueError('Integer number expected for n.') if n<1: raise ValueError('Steps n expected to be 1 or more.') # preallocate initial probs and subgenerator matrix alpha = np.zeros((1, int(n1 + n2)-1)) S = np.zeros((int(n1 + n2)-1, int(n1 + n2)-1)) # first index sets source alpha[0, 0] = 1.0 # substep rates r1 = n1 * theta1 r2 = n2 * theta2 # outflux from source # (from competing channels) S[0, 0] = -(r1+r2) # fill matrix (first channel) l = [0] + list(range(1, int(n1))) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r1 S[inext, inext] = -r1 # fill matrix (second channel) l = [0] + list(range(int(n1), int(n1+n2)-1)) for i, inext in zip(l[0:-1], l[1:]): S[i, inext] = r2 S[inext, inext] = -r2 return alpha, S
667fc2abdb38e2e623a5f91f33ffb60f9b9e5ca8
3,658,886
def get_regions(max_time_value): """ Partition R into a finite collection of one-dimensional regions depending on the appearing max time value. """ regions = [] bound = 2 * max_time_value + 1 for i in range(0, bound + 1): if i % 2 == 0: temp = i // 2 r = Constraint('[' + str(temp) + ',' + str(temp) + ']') regions.append(r) else: temp = (i - 1) // 2 if temp < max_time_value: r = Constraint('(' + str(temp) + ',' + str(temp + 1) + ')') regions.append(r) else: r = Constraint('(' + str(temp) + ',' + '+' + ')') regions.append(r) return regions
1cc825592e07dc0bef30f04896e57df189d28bb3
3,658,887
def label_edges(g: nx.DiGraph) -> nx.DiGraph: """Label all the edges automatically. Args: g: the original directed graph. Raises: Exception: when some edge already has attribute "label_". Returns: The original directed graph with all edges labelled. """ g_labelled = nx.DiGraph(g) i = 1 for edge in g_labelled.edges.data(): if _ATTR_LABEL in edge[2]: raise Exception( f"The edge {edge[0]}-{edge[1]} already has the {_ATTR_LABEL} attribute." ) else: edge[2][_ATTR_LABEL] = f"e{i}" i += 1 return g_labelled
a74559cdce8d75a65913def6c545b86ed45b2ead
3,658,888
from datetime import datetime import calendar def report_charts(request, report, casetype='Call'): """Return charts for the last 4 days based on the Call Summary Data""" # The ussual filters. query = request.GET.get('q', '') interval = request.GET.get('interval', 'daily') category = request.GET.get('category', '') if report == 'categorysummary': y_axis = 'category' elif report == 'dailysummary': y_axis = 'daily' else: y_axis = request.GET.get('y_axis', '') datetime_range = request.GET.get("datetime_range") agent = request.GET.get("agent") form = ReportFilterForm(request.GET) # Update the search url to chart based views. search_url = reverse('report_charts', kwargs={'report': report}) # Convert date range string to datetime object if datetime_range: try: a, b = [datetime_range.split(" - ")[0], datetime_range.split(" - ")[1]] from_date = datetime.strptime(a, '%m/%d/%Y %I:%M %p') to_date = datetime.strptime(b, '%m/%d/%Y %I:%M %p') current = from_date delta = to_date - from_date date_list = [] if interval == 'hourly': for i in range(int(delta.total_seconds()//3600)): date_list.append(from_date + timedelta(seconds=i*3600)) elif interval == 'monthly': while current <= to_date: current += relativedelta(months=1) date_list.append(current) elif interval == 'weekly': while current <= to_date: current += relativedelta(weeks=1) date_list.append(current) else: while current <= to_date: current += relativedelta(days=1) date_list.append(current) epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] # Add filter to ajax query string. except Exception as e: from_date = None to_date = None else: from_date = None to_date = None # Start date base = datetime.today() date_list = [base - timedelta(days=x) for x in range(0, 3)] epoch_list = [date_item.strftime('%m/%d/%Y %I:%M %p') for date_item in date_list] epoch_list.reverse() e = None datetime_ranges = pairwise(epoch_list) callsummary_data = [] total_calls = 0 for datetime_range in datetime_ranges: # Date time list returns desending. We want assending. datetime_range_string = " - ".join(datetime_range) if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] for category in categories: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') else: report_data = report_factory(report='chartreport', datetime_range=datetime_range_string, agent=agent, query=query, category=category, casetype=casetype) # Append data to tables list. callsummary_data.append(report_data) total_calls = total_calls + report_data.get('total_offered').get('count') # Multibar chart page. if y_axis != 'daily': summary_table = CallSummaryTable(callsummary_data) tooltip_date = "%d %b %Y %H:%M:%S %p" extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}, "date_format": tooltip_date} if y_axis == 'category': categories = [i[0] for i in Category.objects.values_list('hl_category').distinct()] chartdata = { 'x': epoch_list, } for i in range(len(categories)): chartdata['name%s' % str(i+1)] = categories[i] category_related = [] for data in callsummary_data: if data.get('category') == categories[i]: category_related.append(data) chartdata['y%s' % str(i+1)] = [d.get('total_offered').get('count') for d in category_related] chartdata['extra%s' % str(i+1)] = extra_serie elif y_axis == 'daily': daysummary_data = [] month_names = [] day_names = list(calendar.day_name) chartdata = {} day_related = {} for day_name in day_names: day_related[day_name] = [] for i in range(len(day_names)): day_summary = {} chartdata['name%s' % str(i+1)] = day_names[i] day_total_offered = 0 month_name = 'None' for data in callsummary_data: if data.get('day') == day_names[i]: day_related[day_names[i]].append(data) day_total_offered = day_total_offered + data.get('total_offered').get('count') day_related[day_names[i]][-1]['day_total_offered'] = day_total_offered month_name = data.get('month') day_summary['month'] = month_name month_names.append(month_name) day_summary['%s' % (day_names[i].lower())] = day_total_offered chartdata['y%s' % str(i+1)] = [d.get('day_total_offered') for d in day_related[day_names[i]]] chartdata['extra%s' % str(i+1)] = extra_serie chartdata['x'] = month_names daysummary_data.append(day_summary) else: ydata = [d.get('total_offered').get('count') for d in callsummary_data] ydata2 = [d.get('total_answered') for d in callsummary_data] ydata3 = [d.get('total_abandoned') for d in callsummary_data] chartdata = { 'x': epoch_list, 'name1': 'Total Offered', 'y1': ydata, 'extra1': extra_serie, 'name2': 'Total Answered', 'y2': ydata2, 'extra2': extra_serie, 'name3': 'Total Abandoned', 'y3': ydata3, 'extra3': extra_serie, } charttype = "multiBarChart" chartcontainer = 'multibarchart_container' # container name if y_axis == 'daily': summary_table = DaySummaryTable(daysummary_data) export_format = request.GET.get('_export', None) if TableExport.is_valid_format(export_format): exporter = TableExport(export_format, summary_table) return exporter.response('table.{}'.format(export_format)) data = { 'title': 'callsummary', 'form': form, 'summary_table': summary_table, 'datetime_ranges_number': len(datetime_ranges), 'error': e, 'y_axis': y_axis, 'search_url': search_url, 'total_calls': total_calls, 'charttype': charttype, 'casetype': casetype, 'chartdata': chartdata, 'chartcontainer': chartcontainer, 'extra': { 'name': 'Call data', 'x_is_date': False, 'x_axis_format': '', 'tag_script_js': True, 'jquery_on_ready': True, }, } if report == 'ajax': return render(request, 'helpline/report_charts_factory.html', data) else: return render(request, 'helpline/report_charts.html', data)
0e9721446e66ee901732a6b0792075ccee607eaa
3,658,889
def _get_optimizer(learning_rate: float, gradient_clip_norm: float): """Gets model optimizer.""" kwargs = {'clipnorm': gradient_clip_norm} if gradient_clip_norm > 0 else {} return tf.keras.optimizers.Adagrad(learning_rate, **kwargs)
92b9b70c533828232872250eca724c2568638f2f
3,658,890
def is_my_message(msg): """ Функция для проверки, какому боту отправлено сообщение. Для того, чтобы не реагировать на команды для других ботов. :param msg: Объект сообщения, для которого проводится проверка. """ text = msg.text.split()[0].split("@") if len(text) > 1: if text[1] != config.bot_name: return False return True
e99c8587ffbc1e582154785d657212f37358e926
3,658,891
from typing import Any from typing import Dict def execute_search_query(client: Client, query: Any, data_range: str) -> Dict[str, Any]: """Execute search job and waiting for the results :type client: ``Client`` :param client: Http client :type query: ``Any`` :param query: Search query :type data_range: ``str`` :param data_range: http url query for getting range of data :return: Search result :rtype: ``Dict[str, Any]`` """ response = client.varonis_execute_search(query) location = get_search_result_path(response) search_result = client.varonis_get_search_result(location, data_range, SEARCH_RESULT_RETRIES) return search_result
68a58f9c4bc7c2b4a754cce8bd97022d327d5155
3,658,892
def static(directory: str) -> WSGIApp: """Return a WSGI app that serves static files under the given directory. Powered by WhiteNoise. """ app = WhiteNoise(empty_wsgi_app()) if exists(directory): app.add_files(directory) return app
9eae5f688b50d6c6c523e69ee0e79f667fb1d567
3,658,893
def check_filter(id): """ Helper function to determine if the current crime is in the dictionary """ if id not in important_crime: return 30 else: return important_crime[id] * 30
9ca74e57abd32db6176216f31deae193e0cac0d4
3,658,894
def rand_email(domain=None): """Generate a random zone name :return: a random zone name e.g. example.org. :rtype: string """ domain = domain or rand_zone_name() return 'example@%s' % domain.rstrip('.')
3653319c77b7e304ea03b7bb06888d115f45dc1e
3,658,895
def wordcount_for_reddit(data, search_word): """Return the number of times a word has been used.""" count = 0 for result in data: # do something which each result from scrape for key in result: stringed_list = str(result[key]) text_list = stringed_list.split() for word in text_list: if search_word == 'Go': if word == search_word: count += 1 elif word.lower() == search_word.lower(): count += 1 return count
b0967aa896191a69cd1b969589b34522299ff415
3,658,896
def calc_precision(gnd_assignments, pred_assignments): """ gnd_clusters should be a torch tensor of longs, containing the assignment to each cluster assumes that cluster assignments are 0-based, and no 'holes' """ precision_sum = 0 assert len(gnd_assignments.size()) == 1 assert len(pred_assignments.size()) == 1 assert pred_assignments.size(0) == gnd_assignments.size(0) N = gnd_assignments.size(0) K_gnd = gnd_assignments.max().item() + 1 K_pred = pred_assignments.max().item() + 1 for k_pred in range(K_pred): mask = pred_assignments == k_pred gnd = gnd_assignments[mask.nonzero().long().view(-1)] max_intersect = 0 for k_gnd in range(K_gnd): intersect = (gnd == k_gnd).long().sum().item() max_intersect = max(max_intersect, intersect) precision_sum += max_intersect precision = precision_sum / N return precision
536e25aa8e3b50e71beedaab3f2058c79d9957e3
3,658,898
def __get_app_package_path(package_type, app_or_model_class): """ :param package_type: :return: """ models_path = [] found = False if isinstance(app_or_model_class, str): app_path_str = app_or_model_class elif hasattr(app_or_model_class, '__module__'): app_path_str = app_or_model_class.__module__ else: raise RuntimeError('Unable to get module path.') for item in app_path_str.split('.'): if item in ['models', 'admin']: models_path.append(package_type) found = True break else: models_path.append(item) if not found: models_path.append(package_type) return '.'.join(models_path)
f08685ef47af65c3e74a76de1f64eb509ecc17b9
3,658,900
import base64 def dict_from_payload(base64_input: str, fport: int = None): """ Decodes a base64-encoded binary payload into JSON. Parameters ---------- base64_input : str Base64-encoded binary payload fport: int FPort as provided in the metadata. Please note the fport is optional and can have value "None", if not provided by the LNS or invoking function. If fport is None and binary decoder can not proceed because of that, it should should raise an exception. Returns ------- JSON object with key/value pairs of decoded attributes """ bytes = base64.b64decode(base64_input) value= (bytes[0] << 8 | bytes[1]) & 0x3FFF battery = value/1000 door_open_status = 0 if bytes[0] & 0x40: water_leak_status = 1 water_leak_status = 0 if bytes[0] & 0x80: door_open_status = 1 mod = bytes[2] if mod == 1: open_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5] open_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8] result = { "mod": mod, "battery": battery, "door_open_status": door_open_status, "open_times": open_times, "open_duration": open_duration } return result if mod == 2: leak_times = bytes[3] << 16 | bytes[4] << 8 | bytes[5] leak_duration = bytes[6] << 16 | bytes[7] << 8 | bytes[8] result = { "mod": mod, "battery": battery, "leak_times": leak_times, "leak_duration": leak_duration } return result result = { "battery": battery, "mod": mod }
05fe484eef6c4376f0b6bafbde81c7cc4476b83e
3,658,901
def handle(req): """POST""" im = Image.open(BytesIO(req.files[list(req.files.keys())[0]].body)) w, h = im.size im2 = ImageOps.mirror(im.crop((0, 0, w / 2, h))) im.paste(im2, (int(w / 2), 0)) io = BytesIO() im.save(io, format='PNG') return req.Response( body=io.getvalue(), mime_type='image/png', encoding='UTF-8')
d62afe253e331b4d7f037bdc56fa927bceb8bc03
3,658,902
import glob def read_sachs_all(folder_path): """Reads all the sachs data specified in the folder_path. Args: folder_path: str specifying the folder containing the sachs data Returns: An np.array containing all the sachs data """ sachs_data = list() # Divides the Sachs dataset into environments. for _, file in enumerate(glob.glob(f'{folder_path}*.xls')): sachs_df = pd.read_excel(file) sachs_array = sachs_df.to_numpy() sachs_data.append(sachs_array) sachs_data_envs = np.vstack(sachs_data) return sachs_data_envs
151f1eec79251019d1a1c2b828531f6c1f01d605
3,658,903
def user_permitted_tree(user): """Generate a dictionary of the representing a folder tree composed of the elements the user is allowed to acccess. """ # Init user_tree = {} # Dynamically collect permission to avoid hardcoding # Note: Any permission to an element is the same as read permission so # they are all included. file_perm_list = [ f'data_driven_acquisition.{perm}' for perm in get_perms_for_model('data_driven_acquisition.File').values_list( 'codename', flat=True) ] folder_perm_list = [ f'data_driven_acquisition.{perm}' for perm in get_perms_for_model('data_driven_acquisition.Folder').values_list( 'codename', flat=True) ] # Collect all permistted elements permitted_folders = get_objects_for_user( user, folder_perm_list, any_perm=True).all() permitted_files = get_objects_for_user( user, file_perm_list, any_perm=True).all() # Add all permitted folders to the user tree with their content and parents. for folder_obj in permitted_folders: # Get the folder content as tree tree = get_folder_tree(folder_obj) # Try to place the tree in the user tree if not place_folder_in_tree(user_tree, folder_obj, tree): # The parent is not in the user tree. # Cresting the parent folder at root level and then completing the # climb to the package level, mergin as needed. user_tree[folder_obj] = tree user_tree = climb_to_package(user_tree, folder_obj) # Add all permitted files to the user tree with theirs parents. for file_obj in permitted_files: # Add to user tree iof the parent folder is already there. if not place_file_in_tree(user_tree, file_obj): # Cold not find the parent folder in the tree. # Creating a base tree with the parent folder # the file at root level and the climbing up to the Package # Merging when required tree = { "files": [file_obj, ] } user_tree[file_obj.parent] = tree user_tree = climb_to_package(user_tree, file_obj.parent) return user_tree
fd9b7d60da7e085e948d4def0ababc3d0cb8233f
3,658,904
def extract_failure(d): """ Returns the failure object the given deferred was errback'ed with. If the deferred has result, not a failure a `ValueError` is raised. If the deferred has no result yet a :class:`NotCalledError` is raised. """ if not has_result(d): raise NotCalledError() else: result = [] def callback(value): result.append(value) d.addBoth(callback) result = result[0] if isinstance(result, failure.Failure): return result else: raise ValueError("Deferred was called back with a value: %r" % result)
7bc160a8ebd1c5cdeab1a91a556576c750d342f8
3,658,905
def convert_where_clause(clause: dict) -> str: """ Convert a dictionary of clauses to a string for use in a query Parameters ---------- clause : dict Dictionary of clauses Returns ------- str A string representation of the clauses """ out = "{" for key in clause.keys(): out += "{}: ".format(key) #If the type of the right hand side is string add the string quotes around it if type(clause[key]) == str: out += '"{}"'.format(clause[key]) else: out += "{}".format(clause[key]) out += "," out += "}" return out
8b135c799df8d16c116e6a5282679ba43a054684
3,658,906
from unittest.mock import call def all_metadata_async(): """Retrieves all available metadata for an instance async""" loop = trollius.get_event_loop() res = loop.run_until_complete(call()) return res
9759331fbd72271820896bd2849139dc13fc9d39
3,658,907
def median_std_from_ma(data: np.ma, axis=0): """On the assumption that there are bit-flips in the *data*, attempt to find a value that might represent the standard deviation of the 'real' data. The *data* object must be a numpy masked array. The value of *axis* determines which way the data are handled. The default is 0 to scan vertially to accumulate statistics for columns. In this case, only those columns with the most unmasked data are evaluated. For them, the standard deviation is found for each column, and the returned value is the median of those standard deviations. If *axis* is 1, then this is applied to rows, not columns. """ valid_points = data.count(axis=axis) std_devs = np.std(data, axis=axis) return median_std(valid_points, std_devs)
587702c52bb2000ebfe920202270610e4ed49d8c
3,658,908
def __check_value_range(x: int) -> bool: """ Checks if integer is in valid value range to be a coordinate for Tic-Tac-Toe. """ if x < 1 or x > 3: print(__standard_error_text + "Coordinates have to be between 1 and 3.\n") return False return True
45f21b3292097baea31846b8bcc51435ae15134c
3,658,909
def find_option(opt): """ This function checks for option defined with optcode; it could be implemented differently - by checking entries in world.cliopts """ # received msg from client must not be changed - make a copy of it tmp = world.climsg[world.clntCounter].copy() # 0 - ether, 1 - ipv6, 2 - udp, 3 - dhcpv6, 4 - opts if type(tmp) == Ether: tmp = tmp.getlayer(4) else: tmp = tmp.getlayer(3) while tmp: if tmp.optcode == int(opt): return True tmp = tmp.payload return False
23904c16f9206f9030a40e17be5f6b01cb0439cf
3,658,910
from typing import Set def generic_add_model_components( m, d, reserve_zone_param, reserve_zone_set, reserve_generator_set, generator_reserve_provision_variable, total_reserve_provision_expression, ): """ Generic treatment of reserves. This function creates model components related to a particular reserve requirement, including 1) an expression aggregating generator-level provision to total provision :param m: :param d: :param reserve_zone_param: :param reserve_zone_set: :param reserve_generator_set: :param generator_reserve_provision_variable: :param total_reserve_provision_expression: :return: """ # Reserve generators operational generators in timepoint # This will be the intersection of the reserve generator set and the set of # generators operational in the timepoint op_set = str(reserve_generator_set) + "_OPERATIONAL_IN_TIMEPOINT" setattr( m, op_set, Set( m.TMPS, initialize=lambda mod, tmp: getattr(mod, reserve_generator_set) & mod.OPR_PRJS_IN_TMP[tmp], ), ) # Reserve provision def total_reserve_rule(mod, ba, tmp): return sum( getattr(mod, generator_reserve_provision_variable)[g, tmp] for g in getattr(mod, op_set)[tmp] if getattr(mod, reserve_zone_param)[g] == ba ) setattr( m, total_reserve_provision_expression, Expression(getattr(m, reserve_zone_set), m.TMPS, rule=total_reserve_rule), )
2c7eef877e0ba7744ba624205fcf590071a95b84
3,658,911
def return_all_content(content): """Help function to return untruncated stripped content.""" return mark_safe(str(content).replace('><', '> <')) if content else None
e24a1ee812a3a011cf6e369ba96bc2989ad7603d
3,658,912
def get_trailing_returns(uid): """ Get trailing return chart """ connection = pymysql.connect(host=DB_SRV, user=DB_USR, password=DB_PWD, db=DB_NAME, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor(pymysql.cursors.SSCursor) sql = "SELECT instruments.fullname, instruments.is_benchmark, "+\ "instruments.market, instruments.symbol, instruments.asset_class "+\ "FROM instruments JOIN symbol_list ON symbol_list.symbol = instruments.symbol "+\ "WHERE symbol_list.uid=" + str(uid) cursor.execute(sql) res = cursor.fetchall() for row in res: fullname = row[0].replace("'", "") is_benchmark = row[1] market = row[2] symbol_is_portf = row[3] asset_class = row[4] if symbol_is_portf.find(get_portf_suffix()) > -1: sql = "SELECT date FROM chart_data WHERE uid=" + str(uid) + " ORDER BY date DESC LIMIT 1" else: sql = "SELECT price_instruments_data.date FROM price_instruments_data JOIN symbol_list "+\ "ON symbol_list.symbol = price_instruments_data.symbol "+\ "WHERE symbol_list.uid=" + str(uid) +" ORDER BY date DESC LIMIT 1" cursor.execute(sql) res = cursor.fetchall() as_date = '' l_as_date = '' for row in res: as_date = row[0] if as_date != '': l_as_date = 'Trailing returns as of '+ as_date.strftime("%d-%b-%Y") font_size = 10 l_y1 = '1-Year' l_m6 = '6-month' l_m3 = '3-month' l_m1 = '1-month' l_w1 = '1-week' minb = 0 mini = 0 maxb = 0 maxi = 0 benchmark_header = '' benchmark_data_y1 = '' benchmark_data_m6 = '' benchmark_data_m3 = '' benchmark_data_m1 = '' benchmark_data_w1 = '' if not is_benchmark: sql = "SELECT symbol_list.uid, instruments.fullname "+\ "FROM symbol_list JOIN instruments "+\ "ON symbol_list.symbol = instruments.symbol "+\ "WHERE instruments.market='"+\ str(market) +"' AND instruments.asset_class='"+\ str(asset_class) +"' AND instruments.is_benchmark=1" cursor.execute(sql) res = cursor.fetchall() benchmark_uid = 0 for row in res: benchmark_uid = row[0] benchmark_fullname = row[1].replace("'", "") if benchmark_uid != 0: benchmark_header = ", ' " +\ benchmark_fullname +\ " ', {type: 'string', role: 'annotation'}" benchmark_data_y1 = ','+ get_chart_data(benchmark_uid, 'y1') benchmark_data_m6 = ','+ get_chart_data(benchmark_uid, 'm6') benchmark_data_m3 = ','+ get_chart_data(benchmark_uid, 'm3') benchmark_data_m1 = ','+ get_chart_data(benchmark_uid, 'm1') benchmark_data_w1 = ','+ get_chart_data(benchmark_uid, 'w1') minb = get_minmax(benchmark_uid, 'min') maxb = get_minmax(benchmark_uid, 'max') data = ''+\ '["'+ l_y1 + '",' + get_chart_data(uid, 'y1') + benchmark_data_y1 +']' + ',' +\ '["'+ l_m6 + '",' + get_chart_data(uid, 'm6') + benchmark_data_m6 + ']' + ',' +\ '["'+ l_m3 + '",' + get_chart_data(uid, 'm3') + benchmark_data_m3 + ']' + ',' +\ '["'+ l_m1 + '",' + get_chart_data(uid, 'm1') + benchmark_data_m1 + ']' + ',' +\ '["'+ l_w1 + '",' + get_chart_data(uid, 'w1') + benchmark_data_w1 + ']' mini = get_minmax(uid, 'min') maxi = get_minmax(uid, 'max') if minb < mini: mini = minb if maxb > maxi: maxi = maxb header = " ['x', ' " +\ fullname + " ', {type: 'string', role: 'annotation'}"+\ benchmark_header +" ]," chart_content = "" +\ "<script>" +\ "google.charts.load('current', {packages: ['corechart', 'bar']});" +\ "google.charts.setOnLoadCallback(drawAnnotations);" +\ "function drawAnnotations() {" +\ " var data = google.visualization.arrayToDataTable([" +\ header +\ data +\ " ]);" +\ " var options = {" +\ " fontSize: "+ str(font_size) + "," +\ " legend: {position:'top', textStyle: {color:"+\ theme_return_this("'black'", "'white'") +"} }," +\ " title: ''," +\ " backgroundColor: 'transparent',"+\ " chartArea: {width: '50%'}," +\ " annotations: {" +\ " alwaysOutside: true," +\ " textStyle: {" +\ " auraColor: 'none'," +\ " color: '#555'" +\ " }," +\ " boxStyle: {" +\ " stroke: '#ccc'," +\ " strokeWidth: 1," +\ " gradient: {" +\ " color1: 'yellow'," +\ " color2: 'white'," +\ " x1: '0%', y1: '0%'," +\ " x2: '100%', y2: '100%'" +\ " }" +\ " }" +\ " }," +\ " series: {0:{color: "+\ theme_return_this("'blue'", "'orange'") +"}, 1:{color: '#c9d6ea'} }," +\ " chartArea: {width:'80%',height:'80%'}," +\ " hAxis: {" +\ " title: '" + l_as_date + "', " +\ " titleTextStyle:{ color:"+\ theme_return_this("'black'", "'white'") +"},"+\ " viewWindow:{min:"+\ str(mini) +",max:"+\ str(maxi) +"}," +\ " gridlines: { color: 'transparent' },"+\ " textStyle: { color: "+\ theme_return_this("'black'", "'white'") +" } "+\ " }," +\ " vAxis: {" +\ " title: '', " +\ " textStyle: { color: "+\ theme_return_this("'black'", "'white'") +" } "+\ " }" +\ " };" +\ " var chart = "+\ "new google.visualization.BarChart(document.getElementById('trail_chart'));" +\ " chart.draw(data, options);" +\ " }" +\ " </script>" +\ " <div id='trail_chart' class='sa-chart-hw-290'></div>" cursor.close() connection.close() return chart_content
96e8ea67b1b91c3dfc6994b5ff56d9384aca6da5
3,658,913
def bitsNotSet(bitmask, maskbits): """ Given a bitmask, returns True where any of maskbits are set and False otherwise. Parameters ---------- bitmask : ndarray Input bitmask. maskbits : ndarray Bits to check if set in the bitmask """ goodLocs_bool = np.zeros(bitmask.shape).astype(bool) for m in maskbits: bitind = bm.bit_set(m, bitmask) goodLocs_bool[bitind] = True return goodLocs_bool
746c054310ac06c58cc32e5635d270c64481a527
3,658,914
def plot(foo, x, y): """x, y are tuples of 3 values: xmin, xmax, xnum""" np_foo = np.vectorize(foo) x_space = np.linspace(*x) y_space = np.linspace(*y) xx, yy = np.meshgrid(x_space, y_space) xx = xx.flatten() yy = yy.flatten() zz = np_foo(xx, yy) num_x = x[-1] num_y = y[-1] points = np.array([xx, yy, zz]).T scale = coin.SoScale() scale.scaleFactor.setValue(1, 1, abs(x[1] - x[0]) / abs(max(zz) - min(zz))) return [scale, simple_quad_mesh(points, num_x, num_y)]
67bfcc70a71140efa3d08960487a69943d2acdc8
3,658,915
import struct def _StructPackEncoder(wire_type, format): """Return a constructor for an encoder for a fixed-width field. Args: wire_type: The field's wire type, for encoding tags. format: The format string to pass to struct.pack(). """ value_size = struct.calcsize(format) def SpecificEncoder(field_number, is_repeated, is_packed): local_struct_pack = struct.pack if is_packed: tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) local_EncodeVarint = _EncodeVarint def EncodePackedField(write, value): write(tag_bytes) local_EncodeVarint(write, len(value) * value_size) for element in value: write(local_struct_pack(format, element)) return EncodePackedField elif is_repeated: tag_bytes = TagBytes(field_number, wire_type) def EncodeRepeatedField(write, value): for element in value: write(tag_bytes) write(local_struct_pack(format, element)) return EncodeRepeatedField else: tag_bytes = TagBytes(field_number, wire_type) def EncodeField(write, value): write(tag_bytes) return write(local_struct_pack(format, value)) return EncodeField return SpecificEncoder
7c58d955a903bac423799c99183066268fb7711b
3,658,916
def transition_temperature(wavelength): """ To get temperature of the transition in K Wavelength in micros T = h*f / kB """ w = u.Quantity(wavelength, u.um) l = w.to(u.m) c = _si.c.to(u.m / u.s) h = _si.h.to(u.eV * u.s) kb = _si.k_B.to(u.eV / u.K) f = c/l t = h*f/kb return t
dbec1ee2c1ad01cd257791624105ff0c4de6e708
3,658,917
def truncate_string(string: str, max_length: int) -> str: """ Truncate a string to a specified maximum length. :param string: String to truncate. :param max_length: Maximum length of the output string. :return: Possibly shortened string. """ if len(string) <= max_length: return string else: return string[:max_length]
c7d159feadacae5a692b1f4d95da47a25dd67c16
3,658,918
import requests import json def geoinfo_from_ip(ip: str) -> dict: # pylint: disable=invalid-name """Looks up the geolocation of an IP address using ipinfo.io Example ipinfo output: { "ip": "1.1.1.1", "hostname": "one.one.one.one", "anycast": true, "city": "Miami", "region": "Florida", "country": "US", "loc": "25.7867,-80.1800", "org": "AS13335 Cloudflare, Inc.", "postal": "33132", "timezone": "America/New_York", "readme": "https://ipinfo.io/missingauth" } """ valid_ip = ip_address(ip) url = f"https://ipinfo.io/{valid_ip}/json" resp = requests.get(url) if resp.status_code != 200: raise Exception(f"Geo lookup failed: GET {url} returned {resp.status_code}") geoinfo = json.loads(resp.text) return geoinfo
956a9d12b6264dc283f64ee792144946e313627b
3,658,919
def mpileup2acgt(pileup, quality, depth, reference, qlimit=53, noend=False, nostart=False): """ This function was written by Francesco Favero, from: sequenza-utils pileup2acgt URL: https://bitbucket.org/sequenza_tools/sequenza-utils original code were protected under GPLv3 license. Parse the mpileup format and return the occurrence of each nucleotides in the given positions. pileup format: 1 chr 2 1-based coordinate 3 reference base 4 depth 5 read bases 6 base qualities 7 mapping qualities # argument pileup = column-6 """ nucleot_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0} strand_dict = {'A': 0, 'C': 0, 'G': 0, 'T': 0} n = 0 block = {'seq': '', 'length': 0} start = False del_ins = False l_del_ins = '' last_base = None ins_del_length = 0 for base in pileup: if block['length'] == 0: if base == '$': if noend: if last_base: nucleot_dict[last_base.upper()] -= 1 if last_base.isupper(): strand_dict[last_base.upper()] -= 1 last_base = None elif base == '^': start = True block['length'] += 1 block['seq'] = base elif base == '+' or base == '-': del_ins = True block['length'] += 1 block['seq'] = base elif base == '.' or base == ',': ## . on froward, , on reverse if ord(quality[n]) >= qlimit: nucleot_dict[reference] += 1 if base == '.': strand_dict[reference] += 1 last_base = reference else: last_base = reference.lower() else: last_base = None n += 1 elif base.upper() in nucleot_dict: if ord(quality[n]) >= qlimit: nucleot_dict[base.upper()] += 1 if base.isupper(): strand_dict[base.upper()] += 1 last_base = base else: last_base = None n += 1 else: n += 1 else: if start: block['length'] += 1 block['seq'] += base if block['length'] == 3: if not nostart: if base == '.' or base == ',': if ord(quality[n]) >= qlimit: nucleot_dict[reference] += 1 if base == '.': strand_dict[reference] += 1 elif base.upper() in nucleot_dict: if ord(quality[n]) >= qlimit: nucleot_dict[base.upper()] += 1 if base.isupper(): strand_dict[base.upper()] += 1 block['length'] = 0 block['seq'] = '' start = False last_base = None n += 1 elif del_ins: if base.isdigit(): l_del_ins += base block['seq'] += base block['length'] += 1 else: ins_del_length = int(l_del_ins) + 1 + len(l_del_ins) block['seq'] += base block['length'] += 1 if block['length'] == ins_del_length: block['length'] = 0 block['seq'] = '' l_del_ins = '' # ins_del = False ins_del_length = 0 nucleot_dict['Z'] = [strand_dict['A'], strand_dict['C'], strand_dict['G'], strand_dict['T']] return nucleot_dict
bf5a0c5e147ece6e9b3be5906ba81ed54593b257
3,658,920
def normalize_missing(xs): """Normalize missing values to avoid string 'None' inputs. """ if isinstance(xs, dict): for k, v in xs.items(): xs[k] = normalize_missing(v) elif isinstance(xs, (list, tuple)): xs = [normalize_missing(x) for x in xs] elif isinstance(xs, basestring): if xs.lower() in ["none", "null"]: xs = None elif xs.lower() == "true": xs = True elif xs.lower() == "false": xs = False return xs
5d3fef8370e6a4e993eb06d96e5010c4b57907ba
3,658,921
def ini_inventory(nhosts=10): """Return a .INI representation of inventory""" output = list() inv_list = generate_inventory(nhosts) for group in inv_list.keys(): if group == '_meta': continue # output host groups output.append('[%s]' % group) for host in inv_list[group].get('hosts', []): output.append(host) output.append('') # newline # output child groups output.append('[%s:children]' % group) for child in inv_list[group].get('children', []): output.append(child) output.append('') # newline # output group vars output.append('[%s:vars]' % group) for k, v in inv_list[group].get('vars', {}).items(): output.append('%s=%s' % (k, v)) output.append('') # newline return '\n'.join(output)
46182c727e9dbb844281842574bbb54d2530d42b
3,658,922
def get_crp_constrained_partition_counts(Z, Cd): """Compute effective counts at each table given dependence constraints. Z is a dictionary mapping customer to table, and Cd is a list of lists encoding the dependence constraints. """ # Compute the effective partition. counts = defaultdict(int) seen = set() # Table assignment of constrained customers. for block in Cd: seen.update(block) customer = block[0] table = Z[customer] counts[table] += 1 # Table assignment of unconstrained customers. for customer in Z: if customer in seen: continue table = Z[customer] counts[table] += 1 return counts
acda347ec904a7835c63afe5ec6efda5915df405
3,658,923
import re from re import T def create_doc(): """Test basic layer creation and node creation.""" # Stupid tokenizer tokenizer = re.compile(r"[a-zA-Z]+|[0-9]+|[^\s]") doc = Document() main_text = doc.add_text("main", "This code was written in Lund, Sweden.") # 01234567890123456789012345678901234567 # 0 1 2 3 token = doc.add_layer("token", text=main_text.spantype) for m in tokenizer.finditer(str(main_text)): token.add(text=main_text[m.start():m.end()]) named_entity = doc.add_layer("named_entity", text=main_text.spantype, cls=T.string) named_entity.add(text=main_text[25:29], cls="GPE") named_entity.add(text=main_text[31:37], cls="GPE") return doc
36171fd68712861370b6ea1a8ae49aa7ec0c139a
3,658,924
def jissue_get_chunked(jira_in, project, issue_max_count, chunks=100): """ This method is used to get the issue list with references, in case the number of issues is more than 1000 """ result = [] # step and rest simple calc step = issue_max_count / chunks rest = issue_max_count % chunks # iterate the issue gathering for i in range(step): result.extend(jissue_query(jira_in, project, chunks*i, chunks)) result.extend(jissue_query(jira_in, project, issue_max_count-rest, rest)) return result
1c32859b91f139f5b56ce00f38dba38b2297109e
3,658,925
def negative_height_check(height): """Check the height return modified if negative.""" if height > 0x7FFFFFFF: return height - 4294967296 return height
4d319021f9e1839a17b861c92c7319ad199dfb42
3,658,926
def linked_gallery_view(request, obj_uuid): """ View For Permalinks """ gallery = get_object_or_404(Gallery, uuid=obj_uuid) images = gallery.images.all().order_by(*gallery.display_sort_string) paginator = Paginator(images, gallery.gallery_pagination_count) page = request.GET.get('page') try: imgs = paginator.page(page) except PageNotAnInteger: imgs = paginator.page(1) except EmptyPage: imgs = paginator.page(paginator.num_pages) context = { "images": imgs, "gallery": gallery, "gallery_name": gallery.title } return render(request, 'image_list.html', context)
c425c6678f2bfb3b76c039ef143d4e7cbc6ee922
3,658,928
def _gm_cluster_assign_id(gm_list, track_id, num_tracks, weight_threshold, z_dim, max_id, max_iteration=1000): """The cluster algorithm that assign a new ID to the track Args: gm_list (:obj:`list`): List of ``GaussianComponent`` representing current multi-target PHD density. track_id (:obj:`int`): Current track id. num_tracks (:obj:`int`): The number of tracks that this list of Gaussian components need to split into. weight_threshold (:obj:`float`): Initial weight threshold for each newly spawned track. z_dim (:obj:`int`): The dimensionality of measurement space. max_id (:obj:`int`): The next track ID number that can be assigned. max_iteration (:obj:`int`): Max number of iterations in case that the clustering algorithm does not converge and oscillates. Returns: A `list` of Gaussian components with updated track ID and the next track ID that can be assigned to new tracks in the future. """ clusters_mean = np.random.uniform(0, 1, (num_tracks, z_dim)) previous_clusters_mean = None cluster_gms = [[] for i in range(num_tracks)] count = 0 while np.any(clusters_mean != previous_clusters_mean) and \ count < max_iteration: previous_clusters_mean = np.copy(clusters_mean) # There n_tracks means, calculate the distance between each track, # and sorted from high to low gm_distance_matrix = _gm_cluster_distance(gm_list=gm_list, clusters_mean=clusters_mean, num_tracks=num_tracks, z_dim=z_dim) # Assign GM to each mean where the weight of each cluster equals or # just higher than the weight threshold. cluster_gms = _gm_group_cluster(gm_list=gm_list, distance_matrix=gm_distance_matrix, weight_threshold=weight_threshold) # Update mean for i in range(num_tracks): new_mean = np.zeros((z_dim,), dtype=np.float32) new_weight = 0. for gm in cluster_gms[i]: new_mean += gm.mean.flatten()[0:z_dim] * gm.weight new_weight += gm.weight if new_weight == 0.: new_weight = 1 clusters_mean[i, :] = new_mean / new_weight # Update count count += 1 # Assign ID to each cluster for i in range(num_tracks): # For every new track, start counting with max_id if track_id == 0 and i == 0: for gm in cluster_gms[i]: gm.mean[-1, :] = max_id max_id += 1 elif i != 0: for gm in cluster_gms[i]: gm.mean[-1, :] = max_id max_id += 1 return gm_list, max_id
f079867333a9e66f7b48782b899f060c38694220
3,658,929
def get_bprop_scatter_nd(self): """Generate bprop for ScatterNd""" op = P.GatherNd() def bprop(indices, x, shape, out, dout): return zeros_like(indices), op(dout, indices), zeros_like(shape) return bprop
3f2f5247b03ba49918e34534894c9c1761d02f07
3,658,930
import requests def delete_policy_rule(policy_key, key, access_token): """ Deletes a policy rule with the given key. Returns the response JSON. See http://localhost:8080/docs#/Policy/delete_rule_api_v1_policy__policy_key__rule__rule_key__delete """ return requests.delete( f"{FIDESOPS_URL}/api/v1/policy/{policy_key}/rule/{key}", headers=oauth_headers(access_token=access_token), )
b53e52b2498707b82e3ceaf89be667886c75ca3c
3,658,932
def knn_search_parallel(data, K, qin=None, qout=None, tree=None, t0=None, eps=None, leafsize=None, copy_data=False): """ find the K nearest neighbours for data points in data, using an O(n log n) kd-tree, exploiting all logical processors on the computer. if eps <= 0, it returns the distance to the kth point. On the other hand, if eps > 0 """ # print("starting the parallel search") if eps is not None: assert data.shape[0]==len(eps) # build kdtree if copy_data: dataCopy = data.copy() # print('copied data') else: dataCopy = data if tree is None and leafsize is None: tree = ss.cKDTree(dataCopy) elif tree is None: tree = ss.cKDTree(dataCopy, leafsize=leafsize) if t0 is not None: print('time to tree formation: %f' %(clock()-t0)) ndata = data.shape[0] nproc = 20 # print('made the tree') # compute chunk size chunk_size = int(data.shape[0] / (4*nproc)) chunk_size = 100 if chunk_size < 100 else chunk_size if qin==None or qout==None: # set up a pool of processes qin = processing.Queue(maxsize=int(ndata/chunk_size)) qout = processing.Queue(maxsize=int(ndata/chunk_size)) if eps is None: pool = [processing.Process(target=__remote_process_query, args=(rank, qin, qout, tree, K, leafsize)) for rank in range(nproc)] else: pool = [processing.Process(target=__remote_process_ball, args=(rank, qin, qout, tree, leafsize)) for rank in range(nproc)] for p in pool: p.start() # put data chunks in input queue cur, nc = 0, 0 while 1: _data = data[cur:cur+chunk_size, :] if _data.shape[0] == 0: break if eps is None: qin.put((nc,_data)) else: _eps = eps[cur:cur+chunk_size] qin.put((nc,_data,_eps)) cur += chunk_size nc += 1 # read output queue knn = [] while len(knn) < nc: knn += [qout.get()] # avoid race condition _knn = [n for i,n in sorted(knn)] knn = [] for tmp in _knn: knn += [tmp] # terminate workers for p in pool: p.terminate() if eps is None: output = np.zeros((sum([ x.shape[0] for x in knn]),knn[0].shape[1])) else: output = np.zeros(sum([ len(x) for x in knn])) outputi = 0 for x in knn: if eps is None: nextVal = x.shape[0] else: nextVal = len(x) output[outputi:(outputi+nextVal)] = x outputi += nextVal return output
cc0dfaee8d1990f1d336e6a5e71973e1b4702e25
3,658,933
from typing import Tuple def compute_vectors_from_coordinates( x: np.ndarray, y: np.ndarray, fps: int = 1 ) -> Tuple[Vector, Vector, Vector, Vector, np.array]: """ Given the X and Y position at each frame - Compute vectors: i. velocity vector ii. unit tangent iii. unit norm iv. acceleration and scalar quantities: i. speed ii. curvature See: https://stackoverflow.com/questions/28269379/curve-curvature-in-numpy """ # compute velocity vector dx_dt = np.gradient(x) dy_dt = np.gradient(y) velocity = ( np.array([[dx_dt[i], dy_dt[i]] for i in range(dx_dt.size)]) * fps ) # compute scalr speed vector ds_dt = np.sqrt(dx_dt * dx_dt + dy_dt * dy_dt) # get unit tangent vector tangent = np.array([1 / ds_dt] * 2).transpose() * velocity unit_tangent = tangent / np.apply_along_axis( np.linalg.norm, 1, tangent ).reshape(len(tangent), 1) # get unit normal vector tangent_x = tangent[:, 0] tangent_y = tangent[:, 1] deriv_tangent_x = np.gradient(tangent_x) deriv_tangent_y = np.gradient(tangent_y) dT_dt = np.array( [ [deriv_tangent_x[i], deriv_tangent_y[i]] for i in range(deriv_tangent_x.size) ] ) length_dT_dt = np.sqrt( deriv_tangent_x * deriv_tangent_x + deriv_tangent_y * deriv_tangent_y ) normal = np.array([1 / length_dT_dt] * 2).transpose() * dT_dt # get acceleration and curvature d2s_dt2 = np.gradient(ds_dt) d2x_dt2 = np.gradient(dx_dt) d2y_dt2 = np.gradient(dy_dt) curvature = ( np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5 ) t_component = np.array([d2s_dt2] * 2).transpose() n_component = np.array([curvature * ds_dt * ds_dt] * 2).transpose() acceleration = t_component * tangent + n_component * normal return ( Vector(velocity), Vector(tangent), Vector( -unit_tangent[:, 1], unit_tangent[:, 0] ), # normal as rotated tangent Vector(acceleration), curvature, )
073262b521f3da79945674cc60ea26fee4c87529
3,658,935
import requests def get_now(pair): """ Return last info for crypto currency pair :param pair: ex: btc-ltc :return: """ info = {'marketName': pair, 'tickInterval': 'oneMin'} return requests.get('https://bittrex.com/Api/v2.0/pub/market/GetLatestTick', params=info).json()
b5db7ba5c619f8369c052a37e010229db7f78186
3,658,936
def hsv(h: float, s: float, v: float) -> int: """Convert HSV to RGB. :param h: Hue (0.0 to 1.0) :param s: Saturation (0.0 to 1.0) :param v: Value (0.0 to 1.0) """ return 0xFFFF
638c1784f54ee51a3b7439f15dab45053a8c3099
3,658,938
def make_exponential_mask(img, locations, radius, alpha, INbreast=False): """Creating exponential proximity function mask. Args: img (np.array, 2-dim): the image, only it's size is important locations (np.array, 2-dim): array should be (n_locs x 2) in size and each row should correspond to a location [x,y]. Don't need to be integer, truncation is applied. NOTICE [x,y] where x is row number (distance from top) and y column number (distance from left) radius (int): radius of the exponential pattern alpha (float): decay rate INbreast (bool, optional): Not needed anymore, handled when parsing INbreast dataset Returns: mask (np.array, 0.0-1.0): Exponential proximity function """ # create kernel which we will be adding at locations # Kernel has radial exponential decay form kernel = np.zeros((2*radius+1,2*radius+1)) for i in range(0, kernel.shape[0]): for j in range(0, kernel.shape[1]): d = np.sqrt((i-radius)**2+(j-radius)**2) if d<= radius: kernel[i,j]=(np.exp(alpha*(1-d/radius))-1)/(np.exp(alpha)-1) # pad original img to avoid out of bounds errors img = np.pad(img, radius+1, 'constant').astype(float) # update locations locations = np.array(locations)+radius+1 locations = np.round(locations).astype(int) # initialize mask mask = np.zeros_like(img) for location in locations: if INbreast: y, x = location else: x, y = location # add kernel mask[x-radius:x+radius+1, y-radius:y+radius+1] =np.maximum(mask[x-radius:x+radius+1, y-radius:y+radius+1],kernel) # unpad mask = mask[radius+1:-radius-1,radius+1:-radius-1] return mask
14be02cee27405c3a4abece7654b7bf902a43a47
3,658,939
from typing import Tuple def delete(client, url: str, payload: dict) -> Tuple[dict, bool]: """Make DELETE requests to K8s (see `k8s_request`).""" resp, code = request(client, 'DELETE', url, payload, headers=None) err = (code not in (200, 202)) if err: logit.error(f"{code} - DELETE - {url} - {resp}") return (resp, err)
8ed463e063b06a48b410112f163830778f887551
3,658,940
def f(x): """The objective is defined as the cost + a per-demographic penalty for each demographic not reached.""" n = len(x) assert n == n_venues reached = np.zeros(n_demographics, dtype=int) cost = 0.0 for xi, ri, ci in zip(x, r, c): if xi: reached = reached | ri # cost += ci for ri, pi in zip(reached, p): if ri == 0: cost += pi return cost
d6724595086b0facccae84fd4f3460195bc84a1f
3,658,941
def clamp(minVal, val, maxVal): """Clamp a `val` to be no lower than `minVal`, and no higher than `maxVal`.""" return max(minVal, min(maxVal, val))
004b9a393e69ca30f925da4cb18a8f93f12aa4ef
3,658,942
def get_closest_spot( lat: float, lng: float, area: config.Area ) -> t.Optional[config.Spot]: """Return closest spot if image taken within 100 m""" if not area.spots: return None distances = [ (great_circle((spot.lat, spot.lng), (lat, lng)).meters, spot) for spot in area.spots ] distance, closest_spot = min(distances) return closest_spot if distance < 100 else None
55424c3b5148209e62d51cc7c6e5759353f5cb0a
3,658,943
def drawBezier( page: Page, p1: point_like, p2: point_like, p3: point_like, p4: point_like, color: OptSeq = None, fill: OptSeq = None, dashes: OptStr = None, width: float = 1, morph: OptStr = None, closePath: bool = False, lineCap: int = 0, lineJoin: int = 0, overlay: bool = True, stroke_opacity: float = 1, fill_opacity: float = 1, oc: int = 0, ) -> Point: """Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3.""" img = page.newShape() Q = img.drawBezier(Point(p1), Point(p2), Point(p3), Point(p4)) img.finish( color=color, fill=fill, dashes=dashes, width=width, lineCap=lineCap, lineJoin=lineJoin, morph=morph, closePath=closePath, stroke_opacity=stroke_opacity, fill_opacity=fill_opacity, oc=oc, ) img.commit(overlay) return Q
7bd3c0b8e3ca8717447213c6c2ac8bc94ea0f029
3,658,944
from functools import reduce import operator def product(numbers): """Return the product of the numbers. >>> product([1,2,3,4]) 24 """ return reduce(operator.mul, numbers, 1)
102ac352025ffff64a862c4c5ccbdbc89bdf807e
3,658,945
def load_ref_system(): """ Returns l-phenylalanine as found in the IQMol fragment library. All credit to https://github.com/nutjunkie/IQmol """ return psr.make_system(""" N 0.7060 -1.9967 -0.0757 C 1.1211 -0.6335 -0.4814 C 0.6291 0.4897 0.4485 C -0.8603 0.6071 0.4224 C -1.4999 1.1390 -0.6995 C -2.8840 1.2600 -0.7219 C -3.6384 0.8545 0.3747 C -3.0052 0.3278 1.4949 C -1.6202 0.2033 1.5209 C 2.6429 -0.5911 -0.5338 O 3.1604 -0.2029 -1.7213 O 3.4477 -0.8409 0.3447 H -0.2916 -2.0354 -0.0544 H 1.0653 -2.2124 0.8310 H 0.6990 -0.4698 -1.5067 H 1.0737 1.4535 0.1289 H 0.9896 0.3214 1.4846 H -0.9058 1.4624 -1.5623 H -3.3807 1.6765 -1.6044 H -4.7288 0.9516 0.3559 H -3.5968 0.0108 2.3601 H -1.1260 -0.2065 2.4095 H 4.1118 -0.2131 -1.6830 """)
724e0d37ae5d811da156ad09d4b48d43f3e20d6a
3,658,946
from typing import List def range_with_bounds(start: int, stop: int, interval: int) -> List[int]: """Return list""" result = [int(val) for val in range(start, stop, interval)] if not isclose(result[-1], stop): result.append(stop) return result
1667657d75f918d9a7527048ad4207a497a20316
3,658,948
import warnings def iou_score(box1, box2): """Returns the Intersection-over-Union score, defined as the area of the intersection divided by the intersection over the union of the two bounding boxes. This measure is symmetric. Args: box1: The coordinates for box 1 as a list of points box2: The coordinates for box 2 in same format as box1. """ if len(box1) == 2: x1, y1 = box1[0] x2, y2 = box1[1] box1 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]) if len(box2) == 2: x1, y1 = box2[0] x2, y2 = box2[1] box2 = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]) if any(cv2.contourArea(np.int32(box2)[:, np.newaxis, :]) == 0 for box in [box1, box2]): warnings.warn('A box with zero area was detected.') return 0 pc = pyclipper.Pyclipper() pc.AddPath(np.int32(box1), pyclipper.PT_SUBJECT, closed=True) pc.AddPath(np.int32(box2), pyclipper.PT_CLIP, closed=True) intersection_solutions = pc.Execute(pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD) union_solutions = pc.Execute(pyclipper.CT_UNION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD) union = sum(cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in union_solutions) intersection = sum( cv2.contourArea(np.int32(points)[:, np.newaxis, :]) for points in intersection_solutions) return intersection / union
746129ac390e045887ca44095af02370abb71d81
3,658,949
def _actually_on_chip(ra, dec, obs_md): """ Take a numpy array of RA in degrees, a numpy array of Decin degrees and an ObservationMetaData and return a boolean array indicating which of the objects are actually on a chip and which are not """ out_arr = np.array([False]*len(ra)) d_ang = 2.11 good_radii = np.where(angularSeparation(ra, dec, obs_md.pointingRA, obs_md.pointingDec)<d_ang) if len(good_radii[0]) > 0: chip_names = chipNameFromRaDecLSST(ra[good_radii], dec[good_radii], obs_metadata=obs_md).astype(str) vals = np.where(np.char.find(chip_names, 'None')==0, False, True) out_arr[good_radii] = vals return out_arr
809bfb59f63a62ab236fb2a3199e28b4f0ee93fd
3,658,950
from typing import Tuple def outlier_dataset(seed=None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """Generates Outliers dataset, containing 10'000 inliers and 50 outliers Args: seed: random seed for generating points Returns: Tuple containing the inlier features, inlier labels, outlier features and outlier labels """ if seed is not None: np.random.seed(seed) inlier_feats = np.concatenate( [np.random.normal(1, 1, 5000), np.random.normal(-1, 1, 5000)] ) inlier_labels = np.concatenate( [ np.ones((5000,)), -1 * np.ones((5000,)), ] ) outlier_feats = np.concatenate( [np.random.normal(-200, 1, 25), np.random.normal(200, 1, 25)] ) outlier_labels = np.concatenate( [ np.ones((25,)), -1 * np.ones((25,)), ] ) return inlier_feats, inlier_labels, outlier_feats, outlier_labels
1b19e66f151290047017bce76e581ae0e725626c
3,658,951
def posts(request, payload={}, short_id=None): """ Posts endpoint of the example.com public api Request with an id parameter: /public_api/posts/1qkx8 POST JSON in the following format: POST /public_api/posts/ {"ids":["1qkx8","ma6fz"]} """ Metrics.api_comment.record(request) ids = payload.get('ids') if short_id and not ids: try: comment = Comment.details_by_id(long_id(short_id), promoter=PublicAPICommentDetails) (comment,) = CachedCall.multicall([comment]) return comment.to_client() except (ObjectDoesNotExist, util.Base36DecodeException): raise ServiceError("Post not found") elif ids: ids = [long_id(x) for x in set(ids)] calls = [Comment.details_by_id(id, ignore_not_found=True, promoter=PublicAPICommentDetails) for id in ids] comments = CachedCall.multicall(calls, skip_decorator=True) return {'posts': [x.to_client() for x in comments if x]}
c8ec491638417fe972fe75c0cf9f26fe1cf877ae
3,658,952