query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Write the concordance entries to the output file(filename) See sample output files for format.
def write_concordance(self, filename): all_keys = self.concordance_table.get_all_keys() lines = [] for i in all_keys: a = "" a += i + ":" f = self.concordance_table.get_value(i) if f != None: for s in f: a += " " + str(s) a += "\n" lines.append(a) a = open(filename, "w+") for i in lines: a.write(i) a.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())", "def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)", "def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def write_conll(conll_file, sents):\n with codecs.open(conll_file, mode = 'w', errors = 'ignore', encoding = 'utf-8') as ofile:\n for sent in sents:\n if sent:\n for element in sent:\n word = element[0]\n tag = element[1]\n ofile.write(str(tag) + '\\t' + str(word) + '\\n')\n ofile.write('\\n')", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()", "def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()", "def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return", "def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)", "def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def write_crf_input(out_file, sentences, poss, lemmas, concepts):\n\n print '\\n\\tWrite out data in crf compliant format'\n f = open(out_file, 'w+')\n for position_i in range(len(sentences)):\n for position_j in range(len(sentences[position_i])):\n f.write(\n sentences[ position_i ][ position_j ] + '\\t' +\n poss[ position_i ][ position_j ] + '\\t' +\n lemmas[ position_i ][ position_j ] + '\\t' +\n concepts[ position_i ][ position_j ]\n + '\\n'\n )\n f.write('\\n')\n f.close()\n print '\\t--done'", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')", "def write_dialogue_to_file(utterances, dialogue_index, filename):\n with open(filename, 'a') as file:\n for sentence_index in range(len(utterances[dialogue_index][0])):\n file.write('{0} {1}\\n'.format(utterances[dialogue_index][0][sentence_index],\n utterances[dialogue_index][1][sentence_index]))", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_to_file(info, mode='w', file=\"output4.txt\"):\n with open(file, mode, encoding='utf-8') as f:\n for line in info:\n f.write(' '.join(map(str, line)) + '\\n')", "def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')", "def write_conll(cls, filename, writer, document_id, sentences):\n with open(filename, 'w') as fd:\n writer.write(fd, document_id, sentences)", "def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)" ]
[ "0.7794726", "0.66742295", "0.64932483", "0.64526165", "0.6379942", "0.63655496", "0.63634735", "0.62910575", "0.6240714", "0.6233921", "0.6233921", "0.6233921", "0.61785156", "0.61412483", "0.61257005", "0.610843", "0.6082861", "0.60720426", "0.6064205", "0.60603034", "0.59847915", "0.5953382", "0.5949586", "0.59256744", "0.59232116", "0.59232116", "0.5918855", "0.5918259", "0.591524", "0.59104925" ]
0.7876976
0
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory.
def factor_circulant_matrix(x, k): n=len(x) return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K", "def K(self):\n\n # Calculate and return the stiffness matrix in global coordinates\n return matmul(matmul(inv(self.T()), self.k()), self.T())", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)", "def calc_big_K(T, n_factors, tau, var_n, out=None):\n if out is None:\n K = np.zeros((T * n_factors, T * n_factors))\n else:\n K = out\n for delta_t in range(T):\n diag = calc_K(tau, delta_t, var_n)\n diag = np.tile(diag, T - delta_t)\n idxs_0 = np.arange(0, (T - delta_t) * n_factors)\n idxs_1 = np.arange(delta_t * n_factors, T * n_factors)\n K[idxs_0, idxs_1] = diag\n K[idxs_1, idxs_0] = diag\n return K", "def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)", "def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator", "def ckm(i,j):\n if i >= 1 and i <= 3 and j >= 1 and j <= 3:\n return _ckm_abs[i-1, j-1]\n else:\n raise(ValueError('Wrong generation index in CKM matrix: ({},{}).'.format(i,j)))", "def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def expansion_matrix_c(self):\n row = np.zeros(0)\n nnz = 0\n col = np.arange(nnz, dtype=np.int)\n data = np.zeros(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))", "def _Kdiag(self, X):\r\n return self.mapping.f(X).flatten()**2", "def matrix_K1(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- d21(l, zt), d23(l, xt)))\n row2 = np.array((- d41(l, zt), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))", "def k(self):\n return add(self.k_b(), self.k_m())", "def _compute_kTable(self, expand=False, factor=False, simplify=False):\n if self._has(\"k\"):\n return\n if self._has(\"p\"):\n k = tuple(self._.p[0, i, i] for i in range(self._.d + 1))\n else:\n if not self._has(\"P\"):\n self.eigenmatrix(expand=expand, factor=factor,\n simplify=simplify)\n k = tuple(integralize(x) for x in self._.P[0])\n assert k[0] == 1, \\\n \"the valency of the first relation is not 1\"\n self._.k = k", "def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)", "def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result", "def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn", "def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans", "def _knn_matrix(x, k=16, self_loop=True):\n x = x.transpose(2, 1).squeeze(-1)\n batch_size, n_points, n_dims = x.shape\n if self_loop:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k)\n else:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k+1)\n nn_idx = nn_idx[:, :, 1:]\n center_idx = torch.arange(0, n_points).repeat(batch_size, k, 1).transpose(2, 1)\n center_idx = center_idx.to(x.device)\n return torch.stack((nn_idx, center_idx), dim=0)", "def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)", "def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue", "def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)", "def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0", "def make_mat_cp_le(cons_pot_mesh, lin_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == lin_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_le_DL_terms(K, cons_pot_mesh, lin_geo_mesh)\n add_cp_le_RBM_terms(K, cons_pot_mesh, lin_geo_mesh)\n return K", "def fastdiag_solver(KM):\n dim = len(KM)\n n = tuple(K.shape[0] for (K,_) in KM)\n EV = [scipy.linalg.eigh(_asdense(K), _asdense(M)) for (K,M) in KM]\n\n diags = []\n for d in range(dim):\n D = [np.ones(n[j]) for j in range(dim)]\n D[d] = EV[d][0] # eigenvalues\n diags.append(reduce(np.kron, D))\n diag = sum(diags)\n\n l_op = KroneckerOperator(*tuple(U for (_,U) in EV))\n r_op = KroneckerOperator(*tuple(U.T for (_,U) in EV))\n\n return l_op * DiagonalOperator(1.0 / diag) * r_op", "def make_mat_cp_qe(cons_pot_mesh, quad_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == quad_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_qe_DL_terms(K, cons_pot_mesh, quad_geo_mesh)\n add_cp_qe_RBM_terms(K, cons_pot_mesh, quad_geo_mesh)\n return K", "def bc_outgoing_mat(n, h, k):\n \n d = [1.0, 2.0j*k*h]\n i = [n-1, n-1]\n j = [n-2, n-1]\n return scipy.sparse.coo_matrix((d, (i, j)))" ]
[ "0.6495986", "0.6089255", "0.6045119", "0.59890914", "0.5949488", "0.59035623", "0.5859298", "0.58462423", "0.57634705", "0.574443", "0.5730508", "0.5717386", "0.56819576", "0.566873", "0.5568253", "0.55545205", "0.5523086", "0.55172205", "0.5492196", "0.5491694", "0.5478032", "0.545727", "0.54372895", "0.5429208", "0.54242074", "0.54238397", "0.5373548", "0.5370893", "0.5370422", "0.5327783" ]
0.78092545
0
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real
def factor_circulant_multiplication(u, x, k=1): n = len(u) D_k = (k**(1/n))**np.arange(0,n) Lambda = fft(D_k*x) return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def circulant_multiplication(u, a):\n \n return real(ifft(fft(a)*fft(u)))", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def matmul(x, y):\n return np.matmul(x, y)", "def test_two_qubit_weyl_decomposition_cnot(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, 0, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def zzX_mul_term(f, c, k):\n if poly_univariate_p(f):\n return zzx_mul_term(f, c, k)\n elif zzX_zero_p(f):\n return f\n elif zzX_zero_p(c):\n return zzX_zero_of(f)\n else:\n return [ zzX_mul(c, coeff) for coeff in f ] + zzX_zeros_of(f, k, 1)", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall", "def dot_kf(u, v):\n # TODO: implement the kernel function\n\n counter = 0\n if len(u)==len(v):\n for i in range(len(u)):\n counter = counter + (u[i]*v[i])\n return counter", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def zzx_mul_term(f, c, k):\n if not c or not f:\n return []\n else:\n return [ c * coeff for coeff in f ] + [INT_ZERO]*k", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices" ]
[ "0.6325033", "0.6273725", "0.6251581", "0.62479377", "0.6177961", "0.6087597", "0.6022537", "0.60215706", "0.6020421", "0.60090333", "0.6000697", "0.5998053", "0.59429264", "0.59204763", "0.58713275", "0.5850264", "0.5813686", "0.57964927", "0.57901424", "0.57262236", "0.57260317", "0.5713855", "0.571201", "0.5704799", "0.57028663", "0.5689596", "0.5675992", "0.56757015", "0.5666318", "0.5655894" ]
0.693636
0
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector
def levinson(r, b): n = len(b) y = zeros((n,)) x = zeros((n,)) # normalize the system so that the T matrix has diagonal of ones r_0 = r/r[0] b_0 = b/r[0] if n == 1: return b_0 y[0] = -r_0[1] x[0] = b_0[0] beta = 1 alpha = -r_0[1] for k in range(0,n-1): beta = (1 - alpha*alpha)*beta mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu if k < n-2: alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _tridisolve(d, e, b, overwrite_b=True):\n\t\tN = len(b)\n\t\t# work vectors\n\t\tdw = d.copy()\n\t\tew = e.copy()\n\t\tif overwrite_b:\n\t\t\tx = b\n\t\telse:\n\t\t\tx = b.copy()\n\t\tfor k in range(1, N):\n\t\t\t# e^(k-1) = e(k-1) / d(k-1)\n\t\t\t# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\n\t\t\tt = ew[ k - 1 ]\n\t\t\tew[ k - 1 ] = t / dw[ k - 1 ]\n\t\t\tdw[ k ] = dw[ k ] - t * ew[ k - 1 ]\n\t\tfor k in range(1, N):\n\t\t\tx[ k ] = x[ k ] - ew[ k - 1 ] * x[ k - 1 ]\n\t\tx[ N - 1 ] = x[ N - 1 ] / dw[ N - 1 ]\n\t\tfor k in range(N - 2, -1, -1):\n\t\t\tx[ k ] = x[ k ] / dw[ k ] - ew[ k ] * x[ k + 1 ]\n\n\t\tif not overwrite_b:\n\t\t\treturn x", "def tridisolve(d, e, b, overwrite_b=True):\r\n N = len(b)\r\n # work vectors\r\n dw = d.copy()\r\n ew = e.copy()\r\n if overwrite_b:\r\n x = b\r\n else:\r\n x = b.copy()\r\n for k in range(1, N):\r\n # e^(k-1) = e(k-1) / d(k-1)\r\n # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\r\n t = ew[k - 1]\r\n ew[k - 1] = t / dw[k - 1]\r\n dw[k] = dw[k] - t * ew[k - 1]\r\n for k in range(1, N):\r\n x[k] = x[k] - ew[k - 1] * x[k - 1]\r\n x[N - 1] = x[N - 1] / dw[N - 1]\r\n for k in range(N - 2, -1, -1):\r\n x[k] = x[k] / dw[k] - ew[k] * x[k + 1]\r\n\r\n if not overwrite_b:\r\n return x", "def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x", "def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]", "def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z", "def SelfDualNewtonSystem(A, b, c, e):\n \n n = A.shape[1]\n m = A.shape[0]\n \n b_bar = b - np.matmul(A,e)\n c_bar = c - e\n alpha = 1 + np.dot(c, e)\n beta = n + 2\n \n A_star = np.c_[A,-b,b_bar]\n C = np.zeros((n+2,n+2))\n C[0:n,n] = c\n C[n,0:n] = -C[0:n,n].T\n C[0:n,n+1] = -c_bar\n C[n+1,0:n] = -C[0:n,n+1].T\n C[n,n+1] = alpha\n C[n+1,n] = -C[n,n+1].T\n \n yA = np.r_[np.zeros((m,m)), -A_star.T, np.zeros((n+2, m))]\n xA = np.r_[A_star, C, np.eye(n+2)]\n sA = np.r_[np.zeros((m, n+2)), -np.eye(n+2), np.eye(n+2)]\n \n return np.c_[yA, xA, sA]", "def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def __solve(self, tsnMat, vecB):\n A_d = np.linalg.inv(np.dot(tsnMat.T, tsnMat))\n return np.dot(np.dot(A_d, tsnMat.T), vecB)", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a,b)\n loss =compute_loss_LS(y,tx,w)\n return loss, w", "def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation", "def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]", "def forward_committor_sensitivity(T, A, B, index):\n\n n = len(T)\n set_X = numpy.arange(n) # set(range(n))\n set_A = numpy.unique(A) # set(A)\n set_B = numpy.unique(B) # set(B)\n set_AB = numpy.union1d(set_A, set_B) # set_A | set_B\n notAB = numpy.setdiff1d(set_X, set_AB, True) # list(set_X - set_AB)\n m = len(notAB)\n\n K = T - numpy.diag(numpy.ones(n))\n\n U = K[numpy.ix_(notAB.tolist(), notAB.tolist())]\n\n v = numpy.zeros(m)\n\n # for i in xrange(0, m):\n # for k in xrange(0, len(set_B)):\n # v[i] = v[i] - K[notAB[i], B[k]]\n v[:] = v[:] - K[notAB[:], B[:]]\n\n qI = numpy.linalg.solve(U, v)\n\n q_forward = numpy.zeros(n)\n #q_forward[set_A] = 0 # double assignment.\n q_forward[set_B] = 1\n #for i in range(len(notAB)):\n q_forward[notAB[:]] = qI[:]\n\n target = numpy.eye(1, n, index)\n target = target[0, notAB]\n\n UinvVec = numpy.linalg.solve(U.T, target)\n Siab = numpy.zeros((n, n))\n\n for i in range(m):\n Siab[notAB[i]] = - UinvVec[i] * q_forward\n\n return Siab", "def nnls(A, b, maxiter=None, eps=1e-11):\n m, n = A.shape\n x = np.zeros(n)\n P = []\n Z = list(range(n))\n k = 0\n\n if maxiter is None:\n maxiter = 3 * m\n\n while True:\n if k == maxiter:\n return x\n\n w = np.matmul(A.T, (b - np.matmul(A, x)))\n if Z == [] or np.all(w[Z] <= eps):\n return x\n\n while True:\n\n t = np.argmax(ma.masked_array(w, mask=[not i in Z for i in range(n)]))\n P.append(t)\n Z.remove(t)\n Ap = A.copy()\n Ap[:, Z] = 0\n\n z = np.linalg.lstsq(Ap, b, rcond=None)[0]\n\n if np.all(z[P] > 0):\n x = z\n break\n\n alpha = np.min(ma.masked_array(x / (x - z), mask=[not i in P or z[i] > 0 for i in range(n)]))\n x = x + alpha * (z - x)\n\n T = np.where(x == 0.0)[0]\n Z = [z for z in set(Z + P) if z in Z or z in P and z in T]\n P = [p for p in P if not p in T]\n\n k = k + 1", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_cost(y, tx, w)\n return w, loss", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def solve_fwd_bkwd(matrix_a, b):\n _L = cholesky(matrix_a) \n _U = transpose_matrix(_L) \n \n n = len(b)\n x = [0 for i in xrange(n)] \n y = [0 for i in xrange(n)] \n\n #forward solve _Ly = b\n for i in xrange(n):\n y[i] = b[i]\n for j in xrange(i):\n\t y[i] -= _L[i][j] * y[j]\n\ty[i] /= _L[i][i]\n\n #backward solve _Ux = y\n for i in xrange(n-1, -1, -1):\n\tx[i] = y[i]\n for j in xrange(i+1, n):\n x[i] -= _U[i][j] * x[j]\n x[i] /= _U[i][i]\n\n return x", "def lp_acent(A,b,c,x_0):\n #Parameters\n b = b.flatten()\n c = c.flatten()\n ALPHA = 0.01\n BETA = 0.5\n EPSILON = 1e-6\n MAXITERS = 100\n if (np.min(x_0)<=0) and (np.linalg.norm>1e-3):\n print 'failed' \n return 0\n #m = len(b)\n #n = len(x_0)\n lambda_hist = []\n x = x_0\n for iter in range(MAXITERS):\n # H = np.diag(1/np.power(x,3))\n g = c-np.power(x,-1)\n #print g.shape\n #solving KKT system\n w = np.linalg.solve(np.dot(np.dot(A,np.diag(np.power(x,2))),A.T),\n np.dot(np.dot(-A,np.diag(np.power(x,2))),g))\n dx = np.dot(-np.diag(np.power(x,2)),np.dot(A.T,w)+g)\n lambdasqr = np.dot(-g.T,dx) #dx'*T*dx: newton incremental\n lambda_hist.append(lambdasqr/2)\n if lambdasqr/2 <= EPSILON:\n break\n # backtracking line search\n t = 1\n # brin the point inside the domain\n while np.min(x+t*dx)<=0:\n t =BETA*t\n while np.dot(c.T,np.dot(t,dx))-np.sum(np.log(x+t*dx))+np.sum(np.log(x))-ALPHA*t*np.dot(g.T,dx)>0:\n t = BETA*t\n x = x+t*dx\n if iter == MAXITERS:\n print 'ERROR: MAXITERS reached'\n else:\n #plt.figure()\n #plt.plot(range(len(lambda_hist)),lambda_hist,'b-',range(len(lambda_hist)),lambda_hist,'bo')\n return x,w,lambda_hist", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n loss = compute_loss(y, tx, w)\n return w, loss", "def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def project_L1_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n # By Moreau's identity, we convert to proximal of dual problem (L-inf norm)\n return x - project_Linf_ball(x, t)", "def wasserstein(X,t,p,lam=10,its=10,sq=False,backpropT=False):\n\n it = torch.where(t > 0)[0] # getting the positions\n ic = torch.where(t < 1)[0]\n\n Xt = torch.index_select(X, 0, it) # Getting the nx100 for each value\n Xc = torch.index_select(X, 0, ic)\n\n nc = Xc.shape[0]\n nt = Xt.shape[0]\n\n ''' Compute distance matrix'''\n if sq:\n M = pdist2sq(Xt,Xc)\n else:\n M = safe_sqrt(pdist2sq(Xt,Xc))\n\n ''' Estimate lambda and delta '''\n M_mean = torch.mean(M)\n M_drop = torch.nn.Dropout(10/(nc*nt))(M)\n delta = torch.max(M)\n eff_lam = lam/M_mean\n\n ''' Compute new distance matrix '''\n Mt = M\n row = delta*torch.ones(M.shape[1])\n col = torch.cat((delta*torch.ones(M.shape[0]),torch.zeros((1))),0)\n Mt = torch.cat((M, torch.unsqueeze(row, 0)), 0)\n Mt = torch.cat((Mt, torch.unsqueeze(col, 1)), 1)\n\n ''' Compute marginal vectors '''\n temp = torch.where(t > 0)[0].shape\n a = torch.cat((p * torch.ones((torch.where(t > 0)[0].shape[0],1)) / nt, (1 - p) * torch.ones((1,1))), 0)\n b = torch.cat(((1-p) * torch.ones((torch.where(t < 1)[0].shape[0],1)) / nc, p * torch.ones((1,1))), 0)\n\n ''' Compute kernel matrix'''\n Mlam = eff_lam*Mt\n K = torch.exp(-Mlam) + 1e-6 # added constant to avoid nan\n U = K*Mt\n ainvK = K/a\n\n u = a\n for i in range(0,its):\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n u = 1.0/(torch.matmul(ainvK,( b / temp)))\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n v = b/(temp)\n\n T = u*(torch.transpose(v,0,1)*K)\n\n E = T*Mt\n D = 2*torch.sum(E)\n\n return D, Mlam", "def solve_L(L, b):\n n = b.size\n assert L.shape == (n,n)\n x = zeros(n)\n for i in range(n):\n x[i] = (b[i] - dot(x[:i], L[i,:i])) / L[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x", "def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n return w, compute_mse(y, tx, w)", "def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent", "def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u", "def SYR_forward(b, alpha, V, s0, y0, T=100):\n n = len(y0)\n\n du = np.zeros(n+1)\n u0 = np.zeros(n+1)\n u0[0] = s0\n u0[1:] = y0\n \n def f(t,u):\n s = u[0]\n y = u[1:]\n force = np.dot(y,b) # Force of infection\n du[0] = - s*force\n du[1:] = s*force*alpha - np.dot(V,y)\n return du\n\n times = np.linspace(0,T,10000)\n solution = solve_ivp(f,[0,T],u0,t_eval=times,method='RK23',max_step=0.1)\n s = solution.y[0,:]\n y = solution.y[1:,:]\n t = solution.t\n \n return s, y, t", "def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir" ]
[ "0.63466734", "0.61827254", "0.61033237", "0.6093494", "0.60769826", "0.5885008", "0.58844715", "0.5877297", "0.58737326", "0.58588946", "0.5838278", "0.5794063", "0.57753825", "0.5773156", "0.5763559", "0.57562786", "0.574674", "0.57452273", "0.57390094", "0.57179475", "0.5697003", "0.5690629", "0.5688454", "0.56821567", "0.567632", "0.56759006", "0.56697446", "0.56637865", "0.5655258", "0.5654495" ]
0.7257071
0
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited.
def toeplitz_slogdet(r): n = len(r) r_0 = r[0] r = np.concatenate((r, np.array([r_0]))) r /= r_0 # normalize the system so that the T matrix has diagonal of ones logdet = n*np.log(np.abs(r_0)) sign = np.sign(r_0)**n if n == 1: return (sign, logdet) # now on is a modification of Levinson algorithm y = zeros((n,)) x = zeros((n,)) b = -r[1:n+1] r = r[:n] y[0] = -r[1] x[0] = b[0] beta = 1 alpha = -r[1] d = 1 + dot(-b[0], x[0]) sign *= np.sign(d) logdet += np.log(np.abs(d)) for k in range(0,n-2): beta = (1 - alpha*alpha)*beta mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta x[0:k+1] = x[0:k+1] + mu*y[k::-1] x[k+1] = mu d = 1 + dot(-b[0:k+2], x[0:k+2]) sign *= np.sign(d) logdet += np.log(np.abs(d)) if k < n-2: alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta y[0:k+1] = y[0:k+1] + alpha * y[k::-1] y[k+1] = alpha return(sign, logdet)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld", "def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_", "def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol", "def log_abs_det_jacobian(self, z):\n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(self.w, dim=1) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(self.a)\n logj = torch.log(torch.sum(torch.exp(logj)))#,2).sum(2)\n logdet = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return sum_dims(logdet)", "def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n logdet = self.bn_arn(x)[1]\n return logdet.sum(-1)\n else:\n logdet = intermediates\n return logdet.sum(-1)", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def _inverse_log_det_jacobian(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n h_prime = -(h ** 2)\n beta_h = beta * h\n log_det_jacobian = tf.reduce_sum(\n (self.dim - 1) * tf.math.log1p(beta_h)\n + tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)\n return log_det_jacobian", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n log_scale = self.arn(x)[1]\n log_scale = _clamp_preserve_gradients(\n log_scale, self.log_scale_min_clip, self.log_scale_max_clip\n )\n return log_scale.sum(-1)\n else:\n log_scale = intermediates\n return log_scale.sum(-1)", "def determinant_fast(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = copy_matrix(A)\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0: \n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1,n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, but one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n \n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]", "def determinant(matrix):\n if type(matrix) is not list or len(matrix) == 0:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(matrix) == 1 and len(matrix[0]) == 0:\n return 1\n\n for i in matrix:\n if type(i) is not list:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(i) != len(matrix):\n raise ValueError(\"matrix must be a square matrix\")\n\n if len(matrix) == 1:\n return matrix[0][0]\n\n if len(matrix) == 2:\n return (matrix[0][0] * matrix[1][1]) - (matrix[0][1]\n * matrix[1][0])\n deter = 0\n\n for j, k in enumerate(matrix[0]):\n rows = [r for r in matrix[1:]]\n sub = []\n for r in rows:\n sub.append([r[a] for a in range(len(matrix)) if a != j])\n deter += k * (-1) ** j * determinant(sub)\n return deter", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def local_det_chol(node):\r\n if node.op == det:\r\n x, = node.inputs\r\n for (cl, xpos) in x.clients:\r\n if isinstance(cl.op, Cholesky):\r\n L = cl.outputs[0]\r\n return [tensor.prod(extract_diag(L) ** 2)]", "def logp(value, mu, rowchol, colchol):\n\n if value.ndim != 2:\n raise ValueError(\"Value must be two dimensional.\")\n\n # Compute Tr[colcov^-1 @ (x - mu).T @ rowcov^-1 @ (x - mu)] and\n # the logdet of colcov and rowcov.\n delta = value - mu\n\n # Find exponent piece by piece\n right_quaddist = solve_lower(rowchol, delta)\n quaddist = pt.nlinalg.matrix_dot(right_quaddist.T, right_quaddist)\n quaddist = solve_lower(colchol, quaddist)\n quaddist = solve_upper(colchol.T, quaddist)\n trquaddist = pt.nlinalg.trace(quaddist)\n\n coldiag = pt.diag(colchol)\n rowdiag = pt.diag(rowchol)\n half_collogdet = pt.sum(pt.log(coldiag)) # logdet(M) = 2*Tr(log(L))\n half_rowlogdet = pt.sum(pt.log(rowdiag)) # Using Cholesky: M = L L^T\n\n m = rowchol.shape[0]\n n = colchol.shape[0]\n\n norm = -0.5 * m * n * pm.floatX(np.log(2 * np.pi))\n return norm - 0.5 * trquaddist - m * half_collogdet - n * half_rowlogdet", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return functools.reduce(\n lambda x, y: x ^ y,\n [self[0, j] and\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)],\n )", "def Determinant(matrix, mul):\r\n width = len(matrix)\r\n # Stop Conditions\r\n if width == 1:\r\n return mul * matrix[0][0]\r\n else:\r\n sign = -1\r\n det = 0\r\n for i in range(width):\r\n m = []\r\n for j in range(1, width):\r\n buff = []\r\n for k in range(width):\r\n if k != i:\r\n buff.append(matrix[j][k])\r\n m.append(buff)\r\n # Change the sign of the multiply number\r\n sign *= -1\r\n # Recursive call for determinant calculation\r\n det = det + mul * Determinant(m, sign * matrix[0][i])\r\n return det", "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def det(a):\n a = copy.deepcopy(a)\n n = len(a)\n det = 1\n com_k = 1\n for k in range(n-1):\n step = 1\n\n while a[k][k] == 0:\n a[k+step], a[k] = a[k], a[k+step]\n det = -det\n step += 1\n mul = a[k][k]\n\n for i in range(k+1, n):\n for j in range(k+1, n):\n a[i][j] *= mul\n a[i][j] -= a[i][k] * a[k][j]\n a[i][j] /= com_k\n\n com_k = mul\n\n det = det * a[-1][-1]\n\n return det", "def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return sum([self[0, j] * (-1 if j % 2 else 1) *\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)])", "def log_det_K(self, Ks=None):\n log_det = 0.\n for K in self.Ks:\n rank_d = self.n / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det", "def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)", "def log_det_K(self, Ks=None):\n Ks = self.Ks if Ks is None else Ks\n log_det = 0.\n for K in Ks:\n rank_d = self.m / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def determinant(matrix):\n if matrix == [[]]:\n return 1\n if type(matrix) is not list or len(matrix) < 1 or\\\n not all(isinstance(x, list) for x in matrix):\n raise TypeError(\"matrix must be a list of lists\")\n if not all(len(matrix) == len(x) for x in matrix):\n raise ValueError(\"matrix must be a square matrix\")\n copy = list(map(list, matrix))\n dim = len(matrix)\n if dim == 1:\n return matrix[0][0]\n elif dim == 2:\n return matrix[0][0] * matrix[1][1] - matrix[1][0] * matrix[0][1]\n else:\n for cur in range(dim):\n for i in range(cur + 1, dim):\n if copy[cur][cur] == 0:\n copy[cur][cur] = 1.0e-10\n curScaler = copy[i][cur] / copy[cur][cur]\n for j in range(dim):\n copy[i][j] = copy[i][j] - curScaler * copy[cur][j]\n det = 1\n for i in range(dim):\n det *= copy[i][i]\n return round(det)" ]
[ "0.7205463", "0.69225436", "0.6803772", "0.6577487", "0.65662503", "0.6258033", "0.6235449", "0.6192166", "0.61640286", "0.60718197", "0.602648", "0.5906651", "0.5904567", "0.58784807", "0.58522433", "0.5850299", "0.58452636", "0.5838441", "0.5796368", "0.57808894", "0.5778876", "0.57782644", "0.5773432", "0.5766508", "0.57584566", "0.5755279", "0.5697666", "0.5686144", "0.5684613", "0.56827873" ]
0.6977162
1
Preprocessing needed for toeplitz_inverse_multiplication()
def toeplitz_inverse_multiplication_prep(T_column): phi=1 psi=2 assert phi != 0 assert psi != 0 assert phi != psi n = len(T_column) x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) ) y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) ) x_0 = x[0] D_phi = (phi**(1/n))**np.arange(0,n) D_psi = (psi**(1/n))**np.arange(0,n) Lambda_1 = fft(D_psi*x) Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1]))) Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1]))) Lambda_4 = fft(D_phi*x) return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop over each block\n t.append(toeplitz_inverse_multiplication_prep(c))\n return tuple(t)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def back_substitution(U, z):\n n = len(U[0])\n x = [0] * n\n for i in range(n - 1, -1, -1):\n if U[i][i] != 0:\n accum = 0\n for j in range(i, n):\n accum += U[i][j] * x[j]\n x[i] = (z[i] - accum) / U[i][i]\n return x", "def mul(Z,X,Y):", "def reconstruct(A, B, z):\n f = factorint(igcd(A, B))\n for p, e in f.items():\n if e != 1:\n raise ValueError('a and b should be square-free')\n z *= p\n return z", "def preprocessing(ct):\n return value_preprocessing(ct, False)", "def test_inverse_transform(self):", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def test__inverse_transform_continuous(self):", "def complex_inverse(c1,cr):", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features", "def mul_inplace(a, b):", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def multInverse(a, m):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while m != 0:\n p = a // m\n z = a % m\n a = m\n m = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n if(x0):\n return(x0)\n else:\n print(\"multiplicative inverse does not exist\")\n return 0", "def inv_inplace(a):", "def de_mult(self,z):\n z = np.asanyarray(z)\n if not (np.any(z<0) or np.any(z>=9.)):\n return self.de_true_interp(z)\n result = np.zeros_like(z)\n result[z<0.] = (z[z<0.]+1.)**(3.*(1.+self.w))\n result[(z>=0.)*(z<9.)] = self.de_true_interp(z[(z>=0.)*(z<9.)])\n result[z>=9.] = np.exp(3.*(_de_exp_const_w(z[z>=9.],self.w)-_de_exp_const_w(9.,self.w)+np.log(self.de_true_interp(9.))/3.))\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return result", "def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))", "def exp2_inplace(a):", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]", "def inv(z: int) -> int:\n # Adapted from curve25519_athlon.c in djb's Curve25519.\n z2 = z * z % q # 2\n z9 = pow2(z2, 2) * z % q # 9\n z11 = z9 * z2 % q # 11\n z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0\n z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0\n z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...\n z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q\n z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q\n z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q\n z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q\n z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0\n return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2", "def multiply_by_i(z: torch.Tensor):\n return to_complex(-z.imag, z.real)", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r", "def calculate_compressibility_factor(p_in, p_out, temp_in, temp_out):\n temp = np.transpose([200, 300, 400, 500, 600, 800, 1000, 2000])\n\n p = [1, 10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000]\n\n z = [\n [1.0007, 1.0066, 1.0134, 1.0275, 1.0422, 1.0575, 1.0734, 1.163, 1.355, 1.555, 1.753, 1.936],\n [1.0005, 1.0059, 1.0117, 1.0236, 1.0357, 1.0479, 1.0603, 1.124, 1.253, 1.383, 1.510, 1.636],\n [1.0004, 1.0048, 1.0096, 1.0192, 1.0289, 1.0386, 1.0484, 1.098, 1.196, 1.293, 1.388, 1.481],\n [1.0004, 1.0040, 1.0080, 1.0160, 1.0240, 1.0320, 1.0400, 1.080, 1.159, 1.236, 1.311, 1.385],\n [1.0003, 1.0034, 1.0068, 1.0136, 1.0204, 1.0272, 1.0340, 1.068, 1.133, 1.197, 1.259, 1.320],\n [1.0002, 1.0026, 1.0052, 1.0104, 1.0156, 1.0208, 1.0259, 1.051, 1.100, 1.147, 1.193, 1.237],\n [1.0002, 1.0021, 1.0042, 1.0084, 1.0126, 1.0168, 1.0209, 1.041, 1.080, 1.117, 1.153, 1.187],\n [1.0009, 1.0013, 1.0023, 1.0044, 1.0065, 1.0086, 1.0107, 1.021, 1.040, 1.057, 1.073, 1.088],\n ]\n\n interp_func = interpolate.interp2d(p, temp, z)\n\n z_in = interp_func(p_in, temp_in)\n z_out = interp_func(p_out, temp_out)\n\n return [z_in, z_out]" ]
[ "0.65743506", "0.63173485", "0.60780877", "0.60345995", "0.5920918", "0.5710167", "0.5684219", "0.56176597", "0.56087387", "0.5590726", "0.5568226", "0.556281", "0.5558012", "0.5548983", "0.5540906", "0.5426001", "0.5426001", "0.5406237", "0.53970987", "0.5395093", "0.53894615", "0.53726643", "0.53536415", "0.5352041", "0.5330332", "0.53212094", "0.5295059", "0.52926826", "0.5283007", "0.5263422" ]
0.65871215
0
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication()
def bd_toeplitz_inverse_multiplication(u, *arrs): y = zeros(shape(u)) n_start = 0 n_end = 0 for t in arrs: n_start = n_end n_end += len(t[3]) # len(t[3]) is the length of the block y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t) assert len(y) == n_end return y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def inverse(self) -> 'Matrix':\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Must be a square matrix. This one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n\n # 1) Construct the minor_matrix. Feel free to make this a separate method.\n minor_matrix_times_cofactor = Matrix.zeros(self.shape())\n\n for i in range (num_R):\n for j in range(num_C):\n minor_matrix_times_cofactor.mat[i][j] = self.get_minor(i,j).determinant() * (-1)**(i+j)\n\n minor_matrix_times_cofactor.display(message=\"minor\")\n # 2) Calculate the determinant, either by calling the determinant() method or by using the minor_matrix (faster)\n det = 0\n for i in range (num_R):\n det += self.mat[i][0] * minor_matrix_times_cofactor.mat[i][0]\n #print (f\"determinant: {self.determinant()}\")\n # 3) The inverse is the transpose of the minor matrix, divided by the determinant. Make sure that the determinant\n # isn't zero!\n if det == 0:\n return None\n return minor_matrix_times_cofactor.transpose().times(1/det)\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird später das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # müssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N können Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, müssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, können wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, müssen wir nun noch einmal von unten nach oben durchgehen\n # um die Einträge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return ConstantDiagLinearOperator(self.diag_values.reciprocal(), diag_shape=self.diag_shape)" ]
[ "0.65371925", "0.6473114", "0.639856", "0.6361315", "0.6302969", "0.6292023", "0.6192051", "0.61344135", "0.61059606", "0.60929507", "0.6069136", "0.6021487", "0.60205114", "0.6011188", "0.5997013", "0.5966648", "0.5926399", "0.5926365", "0.5916658", "0.5888663", "0.5883227", "0.5874907", "0.5866973", "0.58164996", "0.5813204", "0.5803478", "0.58029234", "0.5792404", "0.5783036", "0.57659465" ]
0.7164876
0
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.)
def _parse_csv_to_arrow_warning(line: str) -> I18nMessage: for pattern, builder in _ERROR_PATTERNS: match = pattern.match(line) if match: return builder(**match.groupdict()) raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_line(self, line):\n raise NotImplementedError", "def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()", "def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult", "def _parse_csv(\n path: Path,\n *,\n settings: Settings = DEFAULT_SETTINGS,\n encoding: Optional[str],\n delimiter: Optional[str],\n has_header: bool,\n autoconvert_text_to_numbers: bool,\n) -> ParseCsvResult:\n warnings = []\n\n with contextlib.ExitStack() as ctx:\n n_bytes = path.stat().st_size\n if n_bytes > settings.MAX_CSV_BYTES:\n # We can't simply os.truncate() the input file, because sandboxed code\n # can't modify input files.\n truncated_path = ctx.enter_context(tempfile_context(prefix=\"truncated-\"))\n with path.open(\"rb\") as src, truncated_path.open(\"wb\") as dest:\n os.sendfile(dest.fileno(), src.fileno(), 0, settings.MAX_CSV_BYTES)\n path = truncated_path\n warnings.append(\n _trans_cjwparse(\n \"csv.truncated_file\",\n \"{n_bytes_truncated, one{Truncated # byte} other{Truncated # bytes}} from file (maximum is {max_n_bytes} bytes)\",\n dict(\n n_bytes_truncated=(n_bytes - settings.MAX_CSV_BYTES),\n max_n_bytes=settings.MAX_CSV_BYTES,\n ),\n )\n )\n\n utf8_path = ctx.enter_context(tempfile_context(prefix=\"utf8-\", suffix=\".txt\"))\n # raises LookupError, UnicodeError\n warnings.extend(\n transcode_to_utf8_and_warn(path, utf8_path, encoding, settings=settings)\n )\n\n # Sniff delimiter\n if not delimiter:\n delimiter = detect_delimiter(utf8_path, settings)\n\n with tempfile_context(suffix=\".arrow\") as arrow_path:\n # raise subprocess.CalledProcessError on error ... but there is no\n # error csv-to-arrow will throw that we can recover from.\n child = subprocess.run(\n [\n \"/usr/bin/csv-to-arrow\",\n \"--delimiter\",\n delimiter,\n \"--max-rows\",\n str(settings.MAX_ROWS_PER_TABLE),\n \"--max-columns\",\n str(settings.MAX_COLUMNS_PER_TABLE),\n \"--max-bytes-per-value\",\n str(settings.MAX_BYTES_PER_VALUE),\n utf8_path.as_posix(),\n arrow_path.as_posix(),\n ],\n capture_output=True,\n check=True,\n )\n warnings.extend(_parse_csv_to_arrow_warnings(child.stdout.decode(\"utf-8\")))\n\n reader = pyarrow.ipc.open_file(arrow_path.as_posix())\n raw_table = reader.read_all() # efficient -- RAM is mmapped\n\n table, more_warnings = _postprocess_table(\n raw_table, has_header, autoconvert_text_to_numbers, settings\n )\n return ParseCsvResult(table, warnings + more_warnings)", "def _parse_tuple(self, line):\n elements = line[1:-1].split(\",\\t\")\n if len(elements) == len(self.description):\n return tuple(\n [\n pythonize.convert(element.strip(), description[1])\n for (element, description) in zip(elements, self.description)\n ]\n )\n else:\n self._exception_handler(\n InterfaceError, \"length of row doesn't match header\"\n )", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def parse(cls, line):\r\n raise NotImplementedError", "def from_csv_line(line):\r\n return line.strip().split(',')", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def ParseRow(self, parser_mediator, row_offset, row):\n try:\n date_time = self._CreateDateTime(row['date'], row['time'])\n except errors.ParseError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to create date time with error: {0!s}'.format(exception))\n date_time = None\n\n status = row['status']\n if status:\n status = status.rstrip()\n\n event_data = McafeeAVEventData()\n event_data.action = row['action']\n event_data.filename = row['filename']\n event_data.offset = row_offset\n event_data.rule = row['rule']\n event_data.status = status\n event_data.trigger_location = row['trigger_location']\n event_data.username = row['username']\n event_data.written_time = date_time\n\n parser_mediator.ProduceEventData(event_data)", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def parse_line(line: str) -> str:\n return line", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def csv_readline(line):\n for row in csv.reader([line]):\n return row", "def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line", "def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def next_line(self, context, line):\n if \"\\t\" in line:\n next_index = line.find(\"\\t\", 0)\n while next_index != -1:\n extra_data = f\"Column: {next_index + 1}\"\n self.report_next_line_error(\n context, next_index + 1, extra_error_information=extra_data\n )\n next_index = line.find(\"\\t\", next_index + 1)", "def process_line(line: str):\n \n comment_start = line.find(';')\n\n # Remove comments, one comment per line allowed\n if comment_start != -1:\n line = line[:comment_start]\n\n line = line.strip()\n \n # Splits commands such that the command and all details are seperated\n # \"command ...\" -> [command, ...]\n try:\n command, contents = line.split(maxsplit = 1)\n # Deals with function names, two special commands, and empty lines\n except ValueError:\n if line == '':\n return None\n elif line[-1] == ':' or line == 'end' or line == 'ret':\n return (line,)\n\n # Splits depending on command type, some requiring one argument, others two\n try:\n one, two = contents.split(',')\n return command, one.strip(), two.strip()\n except ValueError:\n return command, contents.strip()", "def read(self, line):\n data = []\n if six.PY3 and type(line) == six.binary_type:\n line = line.decode('utf-8')\n\n csv_reader = csv.reader(six.StringIO(line),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n skipinitialspace=True)\n for cr in csv_reader:\n data = [decode_string(f).strip() for f in cr]\n break\n\n return None, data", "def parse_row(self, response, row):\n raise NotImplementedError", "def __read_csv(self) -> tuple:\n with open(self.csv_file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0].isspace():\n raise StopIteration\n yield row", "def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)", "def process_line(self, line):\n columns = line.split('|')\n\n if len(line) == 0 or len(columns) < 16:\n return None # empty line or malformed line\n\n cmte_id, name, zip_code = columns[0], columns[7], columns[10][:5]\n transaction_dt, transaction_amt = columns[13], columns[14]\n other_id = columns[15]\n\n if len(other_id) > 0 or len(transaction_amt) == 0 or len(cmte_id) == 0 or len(name) == 0 or len(zip_code) < 5:\n return None # malformed data fields, ignore this line\n transaction_date = string_to_date(transaction_dt)\n if transaction_date is None:\n return None # 'TRANSACTION_DT' is an invalid date\n\n try:\n if self.repeat_donor(name, zip_code, transaction_date.year):\n # this record is from a repeat donor in any prior calendar year\n amount = float(transaction_amt)\n key = RecipientZipYear(cmte_id, zip_code, transaction_date.year)\n if key not in self.running_percentile:\n self.running_percentile[key] = RunningPercentile(self.percentile)\n self.running_percentile[key].add(amount)\n return self.print_record(key)\n else:\n return None # this record is not from a repeat donor\n except:\n return None # exception may comes from malformed line, so just ignore this line", "def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))", "def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None", "def parse_row(self, row):\n \n self.metadata = row", "def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1" ]
[ "0.6595832", "0.6529445", "0.62704617", "0.61401874", "0.61335003", "0.61316746", "0.61252147", "0.61061907", "0.5982218", "0.5961737", "0.5809438", "0.5809438", "0.5809438", "0.5809438", "0.5806658", "0.5806658", "0.5729117", "0.5704075", "0.5667828", "0.56519485", "0.5627262", "0.5607163", "0.55858445", "0.5569104", "0.55405027", "0.552808", "0.5510118", "0.5474966", "0.5470403", "0.54466015" ]
0.7278119
0
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.)
def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool: _, offsets_buf, data_buf = chunk.buffers() offsets = array.array("i") assert offsets.itemsize == 4 offsets.frombytes(offsets_buf) if sys.byteorder != "little": offsets.byteswap() # pyarrow is little-endian offset0 = offsets[chunk.offset] offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk) b = data_buf[offset0:offsetN].to_pybytes() return SCARY_BYTE_REGEX.search(b) is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])", "def is_array(self):\n return False", "def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False", "def sanitize_array(array):\n a = np.ravel(array)\n maxi = np.nanmax(a[np.isfinite(a)])\n mini = np.nanmin(a[np.isfinite(a)])\n array[array == float('inf')] = maxi\n array[array == float('-inf')] = mini\n mid = (maxi + mini) / 2\n array[np.isnan(array)] = mid\n return array", "def nonans(array):\n return array[~np.isnan(array)]", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def isfillvalue(a):\n a = numpy.asarray(a)\n if a.dtype.kind == 'i':\n mask = a == -999999999\n elif a.dtype.kind == 'f':\n mask = numpy.isnan(a)\n elif a.dtype.kind == 'S':\n mask = a == ''\n else:\n raise ValueError('Fill value not known for dtype %s' % a.dtype)\n return mask", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def is_sorted_array(arr, increasing=True):\n # If only 1\n if len(arr) == 0:\n return True\n # If multiple values\n if increasing:\n return np.all(np.diff(arr) >= 0)\n else:\n return np.all(np.diff(arr) <= 0)", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')", "def is_array(t):\n return isinstance(t, ast.Array)", "def is_array(self):\n return len(self.descriptor) > 1", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def convert_non_monotonic_to_nan(array):\n keep = np.arange(0, len(array))\n is_monotonic = False\n while not is_monotonic:\n is_monotonic_array = np.hstack(\n (array[keep][1:] >= array[keep][:-1], np.array(True))\n )\n is_monotonic = is_monotonic_array.all()\n keep = keep[is_monotonic_array]\n out_array = np.full_like(array.astype(np.float), np.nan)\n out_array[keep] = array[keep]\n return out_array", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def _autocast_column(data: pyarrow.ChunkedArray) -> pyarrow.ChunkedArray:\n # All-empty (and all-null) columns stay text\n for chunk in data.iterchunks():\n # https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout\n _, offsets_buf, _ = chunk.buffers()\n # If data has an offset, ignore what comes before\n #\n # We don't need to grab the _int_ offset: we can just look at the\n # byte-representation of it.\n offset_0_buf = offsets_buf[chunk.offset * 4 : (chunk.offset + 1) * 4]\n # last offset isn't always the last 4 bytes: there can be padding\n offset_n_buf = offsets_buf[\n (chunk.offset + len(chunk)) * 4 : (chunk.offset + len(chunk) + 1) * 4\n ]\n if offset_0_buf.to_pybytes() != offset_n_buf.to_pybytes():\n # there's at least 1 byte of text. (Assumes the CSV reader doesn't\n # pad the buffer with gibberish.)\n break\n else:\n # there are 0 bytes of text\n return data\n\n # Convert \"\" => null, so pyarrow cast() won't balk at it.\n sane = pyarrow.chunked_array(\n [_nix_utf8_chunk_empty_strings(chunk) for chunk in data.iterchunks()]\n )\n\n for chunk in sane.iterchunks():\n # pyarrow cast() uses double-conversion, so it parses \"NaN\" and \"Inf\"\n # as doubles. Workbench doesn't support NaN or Inf, so don't convert to\n # them.\n if _utf8_chunk_may_contain_inf_or_nan(chunk):\n return data\n\n try:\n numbers = sane.cast(pyarrow.float64())\n except pyarrow.ArrowInvalid:\n # Some string somewhere wasn't a number\n return data\n\n # Test that there's no infinity. We'll use numpy. .to_numpy() with\n # zero_copy_only=False will convert nulls to NaN. That's fine, since we\n # know `numbers` has no NaN values (because `cast()` would have raised\n # rather than return a NaN.)\n for chunk in numbers.iterchunks():\n npchunk = chunk.to_numpy(zero_copy_only=False)\n if np.inf in npchunk or -np.inf in npchunk:\n # Numbers too large\n return data\n\n # Downcast integers, when possible.\n #\n # We even downcast float to int. Workbench semantics say a Number is a\n # Number; so we might as well store it efficiently.\n try:\n # Shrink as far as we can, until pyarrow complains.\n #\n # pyarrow will error \"Floating point value truncated\" if a conversion\n # from float to int would be lossy.\n #\n # We'll return the last _successful_ `numbers` result.\n numbers = numbers.cast(pyarrow.int32())\n numbers = numbers.cast(pyarrow.int16())\n numbers = numbers.cast(pyarrow.int8())\n except pyarrow.ArrowInvalid:\n pass\n\n return numbers", "def _numba_not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:\n diff = np.abs(array - vector)\n for idx in range(array.shape[0]):\n localdiff = np.max(diff[idx, :])\n if localdiff < delta:\n return False\n\n return True", "def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]", "def is_array_type(an_array, atype):\n tmp = [i for i in an_array if not isinstance(i, atype)]\n return len(tmp) == 0", "def isinf(data):\n return _make.isinf(data)" ]
[ "0.63142204", "0.59511065", "0.59251046", "0.5863669", "0.5700599", "0.5661153", "0.5581066", "0.54970616", "0.54685277", "0.54147017", "0.53897524", "0.5384138", "0.53668594", "0.5293467", "0.52856606", "0.527953", "0.5257239", "0.5248469", "0.5248469", "0.5215622", "0.5214552", "0.5208751", "0.5203614", "0.5202484", "0.5198838", "0.5193919", "0.5189366", "0.5188746", "0.5166378", "0.5151065" ]
0.60420185
1
Update the config information with new dropout values.
def update_dropout(info, dropout, dropout_type, prop_name): if dropout_type == "schnet_dropout": info["model_params"]["schnet_dropout"] = dropout elif dropout_type == "chemprop_dropout": info["model_params"]["cp_dropout"] = dropout elif dropout_type == "readout_dropout": # if it's in the readout layers, find the dropout # layers in the readout dictionary and update them readout = info["model_params"]["readoutdict"] layer_dics = readout[prop_name] for layer_dic in layer_dics: if layer_dic["name"] == "Dropout": layer_dic["param"]["p"] = dropout info["model_params"]["readoutdict"] = {prop_name: layer_dics} elif dropout_type == "attention_dropout": info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout else: info["model_params"][dropout_type] = dropout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def update(self):\n self.save_config_file()", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def changeDropout(self,dropout):\n self.dropout = dropout", "def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)", "def update_global_config(self, config, **kwargs):\n pass", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def refresh_configuration(self):\n pass", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)", "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def _on_config_changed(self, _):\n self._configure_pod()", "def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())", "def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def apply_user_configuration(self, config):\n self.logDisplay.set_logging_level(config['log'].get('logging_level', fallback='Verbose'))\n\n # MIDI\n self.winchMidiInputCombo.select_item(config['midi'].get('winch_midi_input', fallback='<no selection>'))\n self.midiOutputCombo.select_item(config['midi'].get('midi_output', fallback='<no selection>'))\n\n # OSC\n oscdef = config['osc']\n self.oscListenerConfig.set_OSC_port(oscdef.get('listener_addr', fallback='localhost'),\n oscdef.getint('listener_port', fallback=3751))\n\n self.oscSenderConfig.set_OSC_port(oscdef.get('sender_addr', fallback='localhost'),\n oscdef.getint('sender_port', fallback=3752))\n\n # DMX\n self.dmxSelect.select_item(config['dmx'].get('dmx_output_serial_port', fallback='<no selection>'))\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n winchSelect.select_item(config['winches'].get(key, fallback = '<no selection>'))\n return", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])", "async def async_update_config(self, config: ConfigType) -> None:\n self._config = config\n # just in case min/max values changed\n if self._current_value is None:\n return\n self._current_value = min(self._current_value, self._maximum)\n self._current_value = max(self._current_value, self._minimum)\n self.async_write_ha_state()", "def _save_config(self, data):\n curr_conf = self.config_entry.options.copy()\n curr_conf.update(data)\n curr_conf.update(self._conf_devs_option)\n\n return self.async_create_entry(title=\"\", data=curr_conf)", "def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def config(self, config_dict):\r\n self._cfg.config = config_dict", "def configure(self, config: dict):\n self.config.update(config)", "def update(self, obj):\n\n self.cfg.update(obj)", "def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)" ]
[ "0.6544299", "0.63342535", "0.60116196", "0.59151256", "0.5909534", "0.57759255", "0.57704425", "0.5765275", "0.5730661", "0.56408286", "0.5635697", "0.558882", "0.55770063", "0.5571904", "0.5553866", "0.5534613", "0.5478377", "0.546527", "0.5463798", "0.5436312", "0.5427711", "0.53996444", "0.5395192", "0.538703", "0.5386605", "0.53815395", "0.5366553", "0.5358142", "0.5354595", "0.5339216" ]
0.63966775
1
Update the config information with the number of attention heads.
def update_heads(info, heads): info["model_params"]["boltzmann_dict"]["num_heads"] = heads # Concatenate the fingerprints produced by the different heads info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate" readoutdict = info["model_params"]["readoutdict"] feat_dim = info["model_params"]["mol_basis"] for key, lst in readoutdict.items(): for i, dic in enumerate(lst): if "param" in dic and "in_features" in dic.get("param", {}): # make sure that the input dimension to the readout is equal to # `heads * feat_dim`, where `feat_dim` is the feature dimension # produced by each head readoutdict[key][i]["param"]["in_features"] = feat_dim * heads break info["model_params"]["readoutdict"] = readoutdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count", "def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config", "def update(self, num_of_updates=25) -> None:\n\t\tfor _ in range(num_of_updates):\n\t\t\tself.__find_joint_configurations()", "def conf_update(self):\n pass", "def get_config(self):\n config = {\n 'F_': self.F_,\n 'attn_heads': self.attn_heads,\n 'attn_heads_reduction': self.attn_heads_reduction,\n 'edge_type_reduction': self.edge_type_reduction,\n 'attention_type': self.attention_type,\n 'attn_dropout': self.attn_dropout,\n 'feature_dropout': self.feature_dropout,\n 'activation': self.activation,\n 'use_value_bias': self.use_value_bias,\n 'use_key_bias': self.use_key_bias,\n 'kernel_initializer': self.kernel_initializer,\n 'bias_initializer': self.bias_initializer,\n 'attn_kernel_initializer': self.attn_kernel_initializer,\n 'attn_bias_initalizer': self.attn_bias_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'bias_regularizer': self.bias_regularizer,\n 'attn_kernel_regularizer': self.attn_kernel_regularizer,\n 'attn_bias_regularizer': self.attn_bias_regularizer,\n 'activity_regularizer': self.activity_regularizer,\n 'kernel_constraint': self.kernel_constraint,\n 'bias_constraint': self.bias_constraint,\n 'attn_kernel_constraint': self.attn_kernel_constraint,\n 'attn_bias_constraint': self.attn_bias_constraint\n }\n base_config = super(BatchShawMultigraphAttention, self).get_config()\n return dict(list(base_config.items())) + list(config.items())", "def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']", "def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)", "def n_configs(self, val):\n if val >= 1 and isinstance(val, int):\n if val != self._faux._n_configs:\n self._faux._n_configs = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")", "def update(self, rxn_probs):\n pass", "def _InitAttentionParams(self, atten_tpl):\n p = self.params\n\n if isinstance(p.num_heads, list) != isinstance(atten_tpl, list):\n raise ValueError('p.num_heads and p.atten_tpl should both be lists '\n f'or both scalars for {p.name} num_heads={p.num_heads}.')\n if isinstance(p.num_heads, list) and (len(p.num_heads) != len(atten_tpl)):\n raise ValueError('num_heads and atten_tpl should both be lists '\n 'of the equal sizes: '\n f'{len(p.num_heads)} vs {len(atten_tpl)}')\n\n def _SetCommonParams(params, name, num_heads):\n # Raise warning if self.params override params from atten_tpl\n for key in ['input_dim', 'hidden_dim', 'num_heads', 'atten_dropout_prob']:\n if params.Get(key) is not p.Get(key):\n tf.logging.warning('attention param {} overriding: {} -> {}'.format(\n key, params.Get(key), p.Get(key)))\n if params.name is not name:\n tf.logging.warning('attention param name overriding: {} -> {}'.format(\n params.name, name))\n params.name = name\n params.input_dim = p.input_dim\n params.hidden_dim = p.hidden_dim\n params.num_heads = num_heads\n params.atten_dropout_prob = p.atten_dropout_prob\n if isinstance(p.num_heads, list):\n params.proj_tpl.make_output_proj_no_op = True\n # Each dim per head is now divided among all heads\n dim_per_head = p.hidden_dim // sum(p.num_heads)\n params.proj_tpl.dim_per_head = dim_per_head\n params.dim_per_head = dim_per_head\n params.hidden_dim = p.hidden_dim // len(p.num_heads)\n return params\n\n if isinstance(p.num_heads, list):\n params_list = []\n for i in range(len(atten_tpl)):\n params = atten_tpl[i].Copy()\n params = _SetCommonParams(params, 'mixed_atten_{}'.format(i),\n p.num_heads[i])\n params_list.append(params)\n params = params_list\n else:\n params = atten_tpl.Copy()\n params = _SetCommonParams(params, 'multihead_atten', p.num_heads)\n return params", "def update_count(self):\n pass", "def n_configs(self):\n return self._faux._n_configs", "def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])", "def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()", "def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")", "def _update_count(self):\n self._count = len(self._items)", "def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)", "def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())", "def update_count(self):\n pass # Do nothing", "def _InitAttentionParams(self, atten_tpl):\n p = self.params\n source_atten_tpls = []\n # Set up each source attention.\n for i in range(p.num_source):\n src_key = 'source_%d' % i\n src_atten = atten_tpl.Copy()\n src_atten = super()._InitAttentionParams(src_atten)\n if isinstance(src_atten, list):\n raise ValueError(\n 'TransformerMultiSourceAttentionLayer does not support '\n 'num_heads > 1.')\n src_atten.name = 'multihead_atten_%s' % src_key\n source_atten_tpls.append((src_key, src_atten))\n\n # Initialize multi-source attention.\n msa = p.multi_source_atten.Copy()\n msa.name = 'multi_source_atten'\n msa.input_dim = p.input_dim\n msa.hidden_dim = p.hidden_dim\n msa.source_atten_tpls = source_atten_tpls\n msa.primary_source_key = 'source_%d' % p.primary_source_index\n return msa", "def update_config(self, config) -> InferredConfig:\n categorical_dim = len(config.categorical_cols)\n continuous_dim = len(config.continuous_cols)\n if config.task == \"regression\":\n output_dim = len(config.target)\n elif config.task == \"classification\":\n output_dim = len(self.train[config.target[0]].unique())\n else:\n output_dim = None\n categorical_cardinality = None\n embedding_dims = None\n if not self.do_leave_one_out_encoder():\n categorical_cardinality = [\n int(self.train[col].fillna(\"NA\").nunique()) + 1 for col in config.categorical_cols\n ]\n embedding_dims = [(x, min(50, (x + 1) // 2)) for x in categorical_cardinality]\n if hasattr(config, \"embedding_dims\"):\n if config.embedding_dims is not None:\n embedding_dims = config.embedding_dims\n return InferredConfig(\n categorical_dim=categorical_dim,\n continuous_dim=continuous_dim,\n output_dim=output_dim,\n categorical_cardinality=categorical_cardinality,\n embedding_dims=embedding_dims,\n )", "def config_count(self) -> int:\n return pulumi.get(self, \"config_count\")", "def find_n(self):\n metadata_files = [\n file for file in self.cfg[\"input_files\"]\n if \"tas/metadata.yml\" in file\n ]\n self.cfg[\"N\"] = {}\n for meta_file in metadata_files:\n n_identifyer = meta_file.split(\"/tas/\")[0].split(\"/tas_\")[-1]\n metadata = group_metadata(get_cfg(meta_file).values(), \"dataset\")\n self.cfg[\"N\"][n_identifyer] = len(metadata.keys()) - 1", "def setMancount(self, cnt):\n self.__mancount=cnt", "def num_of_adaptors(self, num_of_adaptors):\n\n self._num_of_adaptors = num_of_adaptors", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )", "def updateInfo(self):\n\t\tif ( self.errorCount == 2 ):\n\t\t\tself.pitchText.text = \"Unclear microphone input...\"\n\n\t\tcurNote = self.listener.pitch.note\n\t\tcurFreq = self.listener.pitch.freq\n\t\tself.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune()\n\t\ttuneText = \"%0.2f Hz off from %s (%0.1f Hz)\" % (abs(self.tuneDelta), \n\t\t\t\t\t\t\t\t\t\t\t\tself.tuneNeighbor.note, \n\t\t\t\t\t\t\t\t\t\t\t\tcurFreq)\n\t\tself.pitchText.text = tuneText" ]
[ "0.5661511", "0.5599164", "0.54210174", "0.53882116", "0.5338775", "0.5247799", "0.5247248", "0.5225227", "0.51431704", "0.5058479", "0.49841285", "0.49445143", "0.49379683", "0.48532596", "0.4848556", "0.48481622", "0.4835506", "0.48258802", "0.48030823", "0.48024145", "0.47915727", "0.47881028", "0.4777855", "0.4774145", "0.47700423", "0.47676536", "0.4764091", "0.47598007", "0.47409284", "0.4735868" ]
0.5935313
0
Update a general parameter that's in the main info dictionary.
def update_general(info, key, val): info["model_params"][key] = val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_general_param(self, param, val):\n assert param in self.params, '%s is not recognized as a valid parameter' % param\n self.params[param].change_value(val)", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)", "def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None", "def updateParameters(self, parameters):", "def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)", "def update_params(self):\n pass", "def update_param(self, update_param):\n\n self._update_param = update_param", "def updateParameters(self):\n\n return", "def update_param_info(param_info, config, is_user_config=False):\n if 'parameters' not in config:\n return\n params = config['parameters']\n for name in params:\n val = params[name]\n if not is_user_config:\n # If this is not a user-provided configuration, we disallow parameter redefinition.\n if name in param_info:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter redefinition is not allowed for non-user configuration.\"\n \" This is a system configuration error that must not happen.\"\n \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)\n )\n if isinstance(val, dict):\n # This is a complete parameter definition with name, value and description.\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n if name not in param_info:\n param_info[name] = copy.deepcopy(val) # New parameter, set it info object.\n # TODO what about parameter type and description?\n else:\n logging.warn(\n \" Parameter (%s) entirely redefines existing parameter (%s).\"\n \" Normally, only value needs to be provided.\"\n \" We will proceed but you may want to fix this.\",\n json.dumps(val),\n json.dumps(param_info[name])\n )\n param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value\n else:\n # Just parameter value\n val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__\n if name not in param_info:\n param_info[name] = {\n 'val': val,\n 'type': val_type,\n 'desc': \"No description for this parameter provided (it was automatically converted from its value).\"\n }\n else:\n param_info[name]['val'] = val\n # Do final validations\n if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter has invalid type = '%s'.\"\n \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])\n )\n if 'type' not in param_info[name] or 'desc' not in param_info[name]:\n logging.warn(\n \"Parameter definition does not contain type ('type') and/or description ('desc').\"\n \" You should fix this. Parameter definition is\"\n \" %s = %s\", name, param_info[name]\n )", "def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})", "def update_settings(self, param):\n if param.name() == '':\n pass", "def update(self, **params):\n self.parameters.update(params)", "def _update_params(self):\n pass", "def update(self, params):", "def __adjust_param(self, option):\n # Get the name of the parameter.\n name = self.__option_params[option]\n\n # Ask the user for a new value.\n value = float(input(\"Enter value for {}: \".format(name)))\n self._params.update(name, value)\n\n # Update the description with the new value.\n desc = self.__make_description(name)\n self.update_description(option, desc)\n\n # Stay on the same menu.\n return self.get_name()", "def _update_params(self):\n raise NotImplementedException()", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def __updateParameter(self, currentParam, newParam):\n for i in xrange(len(currentParam)):\n for np in newParam:\n if np['name'] == currentParam[i]['name']:\n currentParam[i] = np", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def updateParameters(self, parameters):\r\n return", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def edit_parameter(request, parameter, **_kwargs):\n pass", "def setParam(self,param,value):\n if param in self.params.keys():\n self.params[param] = value" ]
[ "0.7279819", "0.71316004", "0.70896465", "0.68731415", "0.6845889", "0.68180555", "0.6810109", "0.67108864", "0.6680052", "0.6631445", "0.6597182", "0.6568276", "0.65336627", "0.65146816", "0.64628476", "0.64187586", "0.64153326", "0.63640064", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.63570213", "0.6326515", "0.632127", "0.631701", "0.62342215" ]
0.7829526
0
Construct generalized extreme value distribution. The parameters `loc`, `scale`, and `concentration` must be shaped in a way that supports broadcasting (e.g. `loc + scale` + `concentration` is valid).
def __init__(self, loc, scale, concentration, validate_args=False, allow_nan_stats=True, name='GeneralizedExtremeValue'): parameters = dict(locals()) with tf.name_scope(name) as name: dtype = dtype_util.common_dtype([loc, scale, concentration], dtype_hint=tf.float32) loc = tensor_util.convert_nonref_to_tensor( loc, name='loc', dtype=dtype) scale = tensor_util.convert_nonref_to_tensor( scale, name='scale', dtype=dtype) concentration = tensor_util.convert_nonref_to_tensor( concentration, name='concentration', dtype=dtype) dtype_util.assert_same_float_dtype([loc, scale, concentration]) # Positive scale is asserted by the incorporated GEV bijector. self._gev_bijector = gev_cdf_bijector.GeneralizedExtremeValueCDF( loc=loc, scale=scale, concentration=concentration, validate_args=validate_args) # Because the uniform sampler generates samples in `[0, 1)` this would # cause samples to lie in `(inf, -inf]` instead of `(inf, -inf)`. To fix # this, we use `np.finfo(dtype_util.as_numpy_dtype(self.dtype).tiny` # because it is the smallest, positive, 'normal' number. super(GeneralizedExtremeValue, self).__init__( distribution=uniform.Uniform( low=np.finfo(dtype_util.as_numpy_dtype(dtype)).tiny, high=tf.ones([], dtype=dtype), allow_nan_stats=allow_nan_stats), # The GEV bijector encodes the CDF function as the forward, # and hence needs to be inverted. bijector=invert_bijector.Invert( self._gev_bijector, validate_args=validate_args), parameters=parameters, name=name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, loc=0, scale=1, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def __call__(self, loc=0.0, scale=1.0, size=None, **kwargs):\n return super().__call__(loc, scale, size=size, **kwargs)", "def maxabs_scale(X, *, axis=..., copy=...):\n ...", "def default_loc_scale_fn(\n is_singular=False,\n loc_initializer=init_ops.random_normal_initializer(stddev=0.1),\n untransformed_scale_initializer=init_ops.random_normal_initializer(\n mean=-3., stddev=0.1),\n loc_regularizer=None,\n untransformed_scale_regularizer=None,\n loc_constraint=None,\n untransformed_scale_constraint=None):\n def _fn(dtype, shape, name, trainable, add_variable_fn):\n \"\"\"Creates `loc`, `scale` parameters.\"\"\"\n loc = add_variable_fn(\n name=name + \"_loc\",\n shape=shape,\n initializer=loc_initializer,\n regularizer=loc_regularizer,\n constraint=loc_constraint,\n dtype=dtype,\n trainable=trainable)\n if is_singular:\n return loc, None\n untransformed_scale = add_variable_fn(\n name=name + \"_untransformed_scale\",\n shape=shape,\n initializer=untransformed_scale_initializer,\n regularizer=untransformed_scale_regularizer,\n constraint=untransformed_scale_constraint,\n dtype=dtype,\n trainable=trainable)\n scale = (np.finfo(dtype.as_numpy_dtype).eps +\n nn_ops.softplus(untransformed_scale))\n return loc, scale\n return _fn", "def gaussian_many(\n x: float,\n values: np.array,\n uncertainties: np.array\n) -> np.array:\n center = np.array(values)\n width = np.maximum(np.array(uncertainties), 1e-6)\n coefficient = 1 / np.sqrt(2.0 * math.pi * width * width)\n exponent = -0.5 * ((float(x) - center) ** 2) / (width * width)\n return coefficient * np.exp(exponent)", "def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,\n input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes + 1.0) - 1.0\n for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def coordExtrema(a):\n # Extreme values of longitude and latitude in the survey.\n longiMin = sp.inf\n latMin = sp.inf\n longiMax = -sp.inf\n latMax = -sp.inf\n for t in range(len(a)):\n if a[t].pktCount > 0:\n arraMin = sp.amin(a[t].longi)\n if arraMin < longiMin:\n longiMin = sp.amin(a[t].longi)\n arraMin = sp.amin(a[t].lat)\n if arraMin < latMin:\n latMin = arraMin\n arraMax = sp.amax(a[t].longi)\n if arraMax > longiMax:\n longiMax = arraMax\n arraMax = sp.amax(a[t].lat)\n if arraMax > latMax:\n latMax = arraMax\n\n ext = cs.emptyClass()\n ext.longiMin = longiMin\n ext.longiMax = longiMax\n ext.latMin = latMin\n ext.latMax = latMax\n return ext", "def MinX(*args, **kwargs):\n return _gdi_.DC_MinX(*args, **kwargs)", "def minmax_scale(X, feature_range=..., *, axis=..., copy=...):\n ...", "def gauss_func(x, wid, cen, amp):\n\n return np.exp(-((x-cen)**2.)/(2.*wid**2)) * amp", "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def gev_ll(loc,c,scale):\n \n def gev_logp(value):\n scaled = (value - loc) / scale\n logp = -(scale\n + ((c + 1) / c) * tt.log1p(c * scaled)\n + (1 + c * scaled) ** (-1/c))\n alpha = loc - scale / c\n \n # If the value is greater than zero, then check to see if \n # it is greater than alpha. Otherwise, check to see if it \n # is less than alpha.\n bounds = tt.switch(value > 0, value > alpha, value < alpha)\n \n # The returned array will have entries of -inf if the bound\n # is not satisfied. This condition says \"if c is zero or\n # value is less than alpha, return -inf and blow up \n # the log-likelihood.\n return bound(logp, bounds, c != 0)\n return gev_logp", "def xscale(value):\n impl.xscale(**locals())", "def robust_scale(X, *, axis=..., with_centering=..., with_scaling=..., quantile_range=..., copy=..., unit_variance=...):\n ...", "def gen_gaussian_low(img, c_res, c=0.5, vx_size=1):\n\n # Input parsing\n assert (c_res > 0) and (c > 0) and (vx_size > 0)\n assert isinstance(img, np.ndarray) and (len(img.shape) == 3)\n\n # Initialization\n f_vx = c_res / vx_size\n ff_vx = min(img.shape) / (2. * np.pi * f_vx)\n sf_vx = ff_vx / math.sqrt(2. * math.log(1. / c))\n\n # Meshgrid generation\n nx, ny, nz = (img.shape[0] - 1) * .5, (img.shape[1] - 1) * .5, (img.shape[2] - 1) * .5\n if (nx % 1) == 0:\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx + 1, 1)))\n else:\n if nx < 1:\n arr_x = np.arange(0, 1)\n else:\n nx = math.ceil(nx)\n arr_x = np.concatenate((np.arange(-nx, 0, 1), np.arange(0, nx, 1)))\n if (ny % 1) == 0:\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny + 1, 1)))\n else:\n if ny < 1:\n arr_y = np.arange(0, 1)\n else:\n ny = math.ceil(ny)\n arr_y = np.concatenate((np.arange(-ny, 0, 1), np.arange(0, ny, 1)))\n if (nz % 1) == 0:\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz + 1, 1)))\n else:\n if nz < 1:\n arr_z = np.arange(0, 1)\n else:\n nz = math.ceil(nz)\n arr_z = np.concatenate((np.arange(-nz, 0, 1), np.arange(0, nz, 1)))\n [X, Y, Z] = np.meshgrid(arr_x, arr_y, arr_z, indexing='ij')\n X = X.astype(np.float32, copy=False)\n Y = Y.astype(np.float32, copy=False)\n Z = Z.astype(np.float32, copy=False)\n R = np.sqrt(X * X + Y * Y + Z * Z)\n\n # Building\n return np.exp(-R / (2.*sf_vx*sf_vx))", "def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n\n return g.unsqueeze(0).unsqueeze(0)", "def __new__(cls, minx, miny, minz, maxx, maxy, maxz):\n # Coerce bounds to floats, and nones to infs\n kwargs = locals()\n for b, inf in zip(('min', 'max'),\n (-np.inf, np.inf)):\n for axis in 'xyz':\n bound = b + axis\n value = kwargs[bound]\n kwargs[bound] = inf if value is None else float(value)\n \n kwargs.pop('cls') # must be passed positionally\n return super(cls, cls).__new__(cls, **kwargs)", "def __call__(\n self,\n loc: Union[np.ndarray, float],\n scale: Union[np.ndarray, float] = 1.0,\n size: Optional[Union[List[int], int]] = None,\n **kwargs,\n ) -> RandomVariable:\n return super().__call__(loc, scale, size=size, **kwargs)", "def extreme_jacobian_entries(\n m=None, scaled=True, large=1e4, small=1e-4, zero=1e-10, jac=None, nlp=None\n):\n if jac is None or nlp is None:\n jac, nlp = get_jacobian(m, scaled)\n el = []\n for i, c in enumerate(nlp.clist):\n for j in jac[i].indices:\n v = nlp.vlist[j]\n e = abs(jac[i, j])\n if (e <= small and e > zero) or e >= large:\n el.append((e, c, v))\n return el", "def apply_short1(y, A, c, scale=1):\n m = A.nrows()\n y = vector(ZZ, 1/ZZ(scale) * y[-m:])\n a = balanced_lift(y*A)\n e = balanced_lift(y*c)\n return a, e", "def pl_exp_cut(x, mask=None, **params):\n energy_scale = np.ones(x.shape)\n if mask is not None:\n # E -> E * (1 + s)\n energy_scale[mask] += params['Energy_Scale']\n else:\n # apply to all energies\n energy_scale += params['Energy_Scale']\n\n if isinstance(x, u.Quantity):\n energy_scale *= u.dimensionless_unscaled\n\n x_scaled = x * energy_scale\n result = params[\"Prefactor\"] * np.power(x_scaled / params[\"Scale\"], params[\"Index\"])\n result *= np.exp(-x_scaled / params[\"Cutoff\"])\n return result" ]
[ "0.5140067", "0.51391375", "0.51391375", "0.51391375", "0.51391375", "0.51391375", "0.51385653", "0.512212", "0.51002985", "0.5092538", "0.5085862", "0.5006036", "0.49219593", "0.49213806", "0.4891641", "0.48677793", "0.48438725", "0.4842382", "0.48329198", "0.48204932", "0.48114428", "0.4772086", "0.47648063", "0.47611213", "0.47515526", "0.47369727", "0.47274134", "0.4711493", "0.47055256", "0.47015047" ]
0.6448586
0
Construct Artillery YAML configuration
def set_yaml_config(self) -> None: # LT-248: We can pick Artillery Phase configuration from conf file self.yaml_config = { "config": { "target": self.get_swagger_url(), "processor": f"./{self.OUT_FILE}", "phases": [ { "duration": settings.DURATION or 1, "arrivalRate": settings.SPAWN_RATE or 1 } ] }, "scenarios": self.task_set.yaml_flow }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters", "def create_yaml(self):\n if self._language == PYTHON:\n language_str = 'python'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._python_dependencies()\n elif self._language == NODE:\n language_str = 'node'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._node_dependencies()\n elif self._language == DOTNET:\n language_str = 'dotnet'\n package_route = '$(System.DefaultWorkingDirectory)/publish_output/s'\n dependencies = self._dotnet_dependencies()\n elif self._language == POWERSHELL:\n language_str = 'powershell'\n package_route = '$(System.DefaultWorkingDirectory)'\n dependencies = self._powershell_dependencies()\n else:\n raise LanguageNotSupportException(self._language)\n\n if self._app_type == WINDOWS:\n platform_str = 'windows'\n yaml = self._generate_yaml(dependencies, 'VS2017-Win2016', language_str, platform_str, package_route)\n else:\n platform_str = 'linux'\n yaml = self._generate_yaml(dependencies, 'ubuntu-16.04', language_str, platform_str, package_route)\n\n with open('azure-pipelines.yml', 'w') as f:\n f.write(yaml)", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)", "def build_configs():", "def user_create_yaml(self):\n pass", "def minimal_config():\n return yaml.round_trip_load(\n textwrap.dedent(\n r\"\"\"\n static_data_config:\n reference:\n path: /path/to/ref.fa\n\n step_config:\n ngs_mapping:\n tools:\n rna: ['star']\n star:\n path_index: /path/to/star/index\n\n data_sets:\n first_batch:\n file: sheet.tsv\n search_patterns:\n - {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'}\n search_paths: ['/path']\n type: matched_cancer\n naming_scheme: only_secondary_id\n \"\"\"\n ).lstrip()\n )", "def get_configured_yaml() -> ModuleType:\n import yaml\n\n from manubot.cite.csl_item import CSL_Item\n\n yaml.add_representer(str, _yaml_str_representer)\n # CSL_Item: pyyaml chokes on dict subclass\n # https://github.com/yaml/pyyaml/issues/142\n # https://stackoverflow.com/a/50181505/4651668\n yaml.add_representer(\n CSL_Item,\n lambda dumper, data: dumper.represent_mapping(\n tag=\"tag:yaml.org,2002:map\", mapping=data.items()\n ),\n )\n return yaml", "def yaml(self):\n raise NotImplementedError", "def configs(self):\n yield \"singleimage\", build_config.BuildConfig()", "def exp_config():\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"experiment.yaml\")\n ) as f:\n exp_config = list(yaml.safe_load_all(f))\n\n for config in exp_config[0]:\n backward.populate_space(config)\n\n return exp_config", "def test_yaml_creation():\n ligand_path = examples_paths()['p-xylene']\n toluene_path = examples_paths()['toluene']\n with mmtools.utils.temporary_directory() as tmp_dir:\n molecules = \"\"\"\n T4lysozyme:\n filepath: {}\n leap: {{parameters: oldff/leaprc.ff14SB}}\"\"\".format(examples_paths()['lysozyme'])\n solvent = \"\"\"\n vacuum:\n nonbonded_method: NoCutoff\"\"\"\n protocol = indent(standard_protocol)\n system = \"\"\"\n system:\n ligand: p-xylene\n receptor: T4lysozyme\n solvent: vacuum\"\"\"\n experiment = \"\"\"\n protocol: absolute-binding\n system: system\"\"\"\n\n yaml_content = \"\"\"\n ---\n options:\n output_dir: {}\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n benzene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n GBSA-OBC2:\n nonbonded_method: NoCutoff\n implicit_solvent: OBC2\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(os.path.relpath(tmp_dir), molecules,\n os.path.relpath(ligand_path), toluene_path,\n solvent, system, protocol, experiment)\n\n # We need to check whether the relative paths to the output directory and\n # for p-xylene are handled correctly while absolute paths (T4lysozyme) are\n # left untouched\n expected_yaml_content = textwrap.dedent(\"\"\"\n ---\n version: '{}'\n options:\n experiments_dir: .\n output_dir: .\n molecules:{}\n p-xylene:\n filepath: {}\n antechamber: {{charge_method: bcc}}\n leap: {{parameters: leaprc.gaff}}\n solvents:{}\n systems:{}\n protocols:{}\n experiments:{}\n \"\"\".format(HIGHEST_VERSION, molecules, os.path.relpath(ligand_path, tmp_dir),\n solvent, system, protocol, experiment))\n expected_yaml_content = expected_yaml_content[1:] # remove first '\\n'\n\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n\n # during setup we can modify molecule's fields, so we need\n # to check that it doesn't affect the YAML file exported\n experiment_dict = yaml.load(experiment, Loader=yaml.FullLoader)\n exp_builder._db.get_system(experiment_dict['system'])\n\n generated_yaml_path = os.path.join(tmp_dir, 'experiment.yaml')\n exp_builder._generate_yaml(experiment_dict, generated_yaml_path)\n with open(generated_yaml_path, 'r') as f:\n assert yaml.load(f, Loader=yaml.FullLoader) == yank_load(expected_yaml_content)", "def celery_config() -> Dict:\n with open(script_dir + 'config.yml', 'r') as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.SafeLoader)\n celery_cfg = cfg['celery']\n result = {\n 'main': celery_cfg['main'],\n 'broker': celery_cfg['broker_url'],\n 'backend': celery_cfg['backend_url'],\n }\n return result", "def _separate(self):\n s = self.as_yamlstr()\n self._config = yaml.load(s, Loader=yaml.Loader)\n self._comments = self._extract_comments(self._yaml_config)", "def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs", "def configuration():", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def get_valid_config(args):\n source = confuse.YamlSource(args.config)\n config = confuse.RootView([source])\n\n job_template = {\n \"job\": {\n \"name\": str,\n \"dir\": confuse.Optional(\n FilenameValidate(\n cwd=str(pathlib.Path(__file__).parent.absolute())),\n default=str(pathlib.Path(__file__).parent.absolute())\n ),\n }\n }\n job_config = config.get(job_template)\n\n logging_template = confuse.Optional(\n confuse.MappingTemplate({\n 'ids': confuse.StrSeq(),\n 'data': confuse.Sequence(\n confuse.Choice(['objectives', 'state', 'variables'])),\n 'timestamped': confuse.Optional(bool, default=True),\n \"to_file\": confuse.Optional(bool, default=True),\n \"to_console\": confuse.Optional(bool, default=False)\n })\n )\n\n sumo_template = {\n \"dir\": FilenameValidate(\n cwd=job_config.job.dir),\n \"gui\": confuse.Optional(bool, default=True),\n \"max_steps\": confuse.Optional(int, default=10e5),\n \"network\": FilenameValidate(relative_to=\"dir\"),\n }\n sumo_config = config.get({\"sumo\": sumo_template})\n sumo_template[\"additional\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n sumo_template[\"route\"] = confuse.Sequence(\n FilenameValidate(cwd=sumo_config.sumo.dir))\n\n tls_template = confuse.Sequence({\n \"id\": str,\n \"controller\": confuse.Choice(\n TLSFactory.get_registered_keys()),\n \"constants\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list),\n AllowedContainers(dict),\n FilenameValidate(cwd=job_config.job.dir),\n ExecutableValidate()\n ])\n ),\n \"variables\": confuse.MappingValues(\n confuse.OneOf([\n confuse.Number(),\n AllowedContainers(list)\n ])\n ),\n \"extract\": {\n \"user_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"count\", \"speed\", \"eta\", \"delay\", \"waiting_time\"]),\n \"user_class\": confuse.Choice(\n [\"bicycle\", \"passenger\", \"pedestrian\", \"bus\", \"truck\", \"moped\"]),\n \"at\": confuse.Choice(\n [\"lane\", \"detector\", \"phase\"]),\n \"mapping\": AllowedContainers(dict)\n }),\n \"tls_data\": confuse.Sequence({\n \"feature\": confuse.Choice(\n [\"elapsed_time\", \"integer_phase\", \"binary_phase\"]),\n \"to_variable\": str\n })\n }\n })\n\n full_template = {\n \"logging\": logging_template,\n \"sumo\": sumo_template,\n \"tls\": tls_template,\n }\n job_template.update(full_template)\n valid_config = config.get(job_template)\n\n # second round of sumo validation\n assert len(valid_config.sumo.route) > 0, \\\n \"No demand definition: sumo.route is an empty list, expected at least one *.rou.xml\"\n \n # second round of logger validation, look if ids are given\n if valid_config.logging:\n if valid_config.logging.ids and valid_config.logging.data:\n output_dir = os.path.join(valid_config.job.dir, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n valid_config.logging.update({\"dir\": output_dir})\n else:\n del valid_config['logging']\n\n return valid_config", "def config():", "def config():", "def _get_yaml_parser():\n # using a function here so settings are always the same\n parser = YAML(typ=\"jinja2\")\n parser.indent(mapping=2, sequence=4, offset=2)\n parser.width = 320\n parser.preserve_quotes = True\n return parser", "def setup_config():\n\n config = configparser.ConfigParser()\n config.read(CONFIG_PATH)\n\n return config", "def __init__(self):\n # Read configuration into dictionary\n self.directories = general.config_directories()\n self.config = general.read_yaml_files(self.directories)", "def configure(self, yaml_file):\n with open(yaml_file, \"r\") as f:\n panorama_conf = yaml.load(f)\n\n # Configuring factories to:\n # - get only title, date and category from article metadata\n # - rename the first 4 tags with the names defined below\n\n self.data_factory = DataFactory(\n metadata_columns=panorama_conf[\"metadata_columns\"],\n tag_columns=panorama_conf[\"tag_columns\"],\n )\n self.chart_factory = ChartFactory()\n\n # Configuring the charts if a chart configuration information is available in the conf file\n if \"chart_conf\" in panorama_conf:\n self.chart_factory.chart_conf = panorama_conf[\"chart_conf\"]\n\n # Creating the configurations\n for yaml_conf in panorama_conf[\"confs\"]:\n chart_id = yaml_conf[\"chart_id\"]\n try:\n producer = self._create_producer(yaml_conf[\"producer\"])\n renderer = self._create_renderer(yaml_conf[\"renderer\"], chart_id)\n self.append_conf(\n chart_id=chart_id, producer=producer, renderer=renderer\n )\n except ValueError as err:\n logger.exception(\n \"Error while initializing [%s] conf. -> chart not available.\",\n chart_id,\n )", "def create_boot_config(configuration_manager, credential, storage_uri, password):\n\n config = ConfigParser.SafeConfigParser()\n \n rabbit_dict = {'rabbit_host': 'localhost', \n 'rabbit_port': '5672', \n 'rabbit_use_ssl': 'False',\n 'rabbit_userid': 'user',\n 'rabbit_password': 'password',\n 'rabbit_virtual_host': '/',\n 'amqp_connection_uri': None }\n \n section = 'messaging'\n config.add_section(section) \n for k in rabbit_dict.keys():\n v = configuration_manager.get(k, rabbit_dict[k])\n if v:\n config.set(section, k, v)\n\n section = 'database'\n config.add_section(section)\n config.set(section, 'initial_password', password)\n\n if storage_uri and len(storage_uri) > 0:\n section = 'snapshot'\n config.add_section(section)\n config.set(section, 'snapshot_uri', storage_uri)\n config.set(section, 'swift_auth_url', configuration_manager.get('reddwarf_proxy_swift_auth_url', 'http://0.0.0.0:5000/v2.0'))\n config.set(section, 'swift_auth_user', \"%s:%s\" % (credential['tenant_id'], credential['user_name']))\n config.set(section, 'swift_auth_key', credential['password'])\n config.set(section, 'snapshot_key', configuration_manager.get('snapshot_key',\"changeme\"))\n \n mem_file = StringIO.StringIO()\n config.write(mem_file)\n \n return mem_file.getvalue()", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def load_args(self):\n\n # retrieve module path\n dir_path = os.path.dirname(os.path.abspath(__file__))\n dir_path = os.path.split(dir_path)[0]\n # get all the default yaml configs with glob\n dir_path = os.path.join(dir_path, 'configs', '*.yml')\n\n # -- From default yapt configuration\n self._defaults_path = {}\n self._defaults_yapt = OmegaConf.create(dict())\n for file in glob.glob(dir_path):\n # split filename from path to create key and val\n key = os.path.splitext(os.path.split(file)[1])[0]\n self._defaults_path[key] = file\n # parse default args\n self._defaults_yapt = OmegaConf.merge(\n self._defaults_yapt, OmegaConf.load(file))\n\n # -- From command line\n self._cli_args = OmegaConf.from_cli()\n if self._cli_args.config is not None:\n self.default_config = self._cli_args.config\n del self._cli_args['config']\n self.console_log.warning(\"override default config with: %s\", self.default_config)\n\n # -- From experiment default config file\n self._default_config_args = OmegaConf.create(dict())\n if self.default_config is not None:\n self._default_config_args = OmegaConf.load(self.default_config)\n\n # -- Merge default args\n self._args = OmegaConf.merge(\n self._defaults_yapt,\n self._default_config_args)\n\n # -- Resolve interpolations to be sure all nodes are explicit\n # self._args = OmegaConf.to_container(self._args, resolve=True)\n # self._args = OmegaConf.create(self._args)\n\n # -- make args structured: it fails if accessing a missing key\n OmegaConf.set_struct(self._args, True)", "def configs(self):\n raise NotImplementedError()", "def test_load_config_with_aliases(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\"image: bosybux\\n\")\n f.write(\"aliases:\\n\")\n f.write(\" foo: bar\\n\")\n f.write(\" snap: crackle pop\\n\")\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.image == \"bosybux\"\n assert len(config.aliases) == 2\n assert config.aliases[\"foo\"].script == [\"bar\"]\n assert config.aliases[\"snap\"].script == [\"crackle pop\"]" ]
[ "0.6141873", "0.6054223", "0.6019494", "0.5994903", "0.59923637", "0.5968164", "0.59273314", "0.57849234", "0.5761136", "0.57510424", "0.57198846", "0.5717234", "0.57150394", "0.5667676", "0.5636936", "0.56257606", "0.55977577", "0.55945677", "0.55880404", "0.55880404", "0.55735403", "0.5540175", "0.551129", "0.5499784", "0.5492602", "0.54755855", "0.54755855", "0.5462785", "0.5429664", "0.541362" ]
0.69550043
0
Tell if a person if allergic to the given allergen.
def is_allergic_to(self, allergen): return allergen in self.list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_allergen(self, is_allergen):\n\n self._is_allergen = is_allergen", "def in_garden(obj):\n print(\"Searching the garden's random objects\")\n return obj in _random_objects", "def allergies(self, allergies):\n\n self.logger.debug(\"In 'allergies' setter.\")\n\n self._allergies = allergies", "def eligiblePresident(age,bornInHomeland):\n return (age>=35) and bornInHomeland", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False", "def _bot_assigned_bell(self, bell: Bell) -> bool:\n return self._tower.is_bell_assigned_to(bell, self._user_name)", "def is_bothell_student():\n return _is_member('uw_affiliation_bothell-student')", "def satisfies(self, reg):\n ### If no value, there is no need for filtering\n if self.getValues()==['']:\n return True\n affiliation = self.getValues()[0]\n return True if (affiliation == reg.getRepresentationType()[\"organizationRepresentative\"]) else False", "async def get_guardian_email(guardian_id: UUID, angel_name: str) -> str:\n try:\n user = await User.get(id=guardian_id)\n except DoesNotExist:\n return False\n\n angels = await user.fetch_related(\"angels\")\n for angel in angels:\n if angel.name == angel_name:\n return user.email\n return False", "def is_any_mentor_became_human(self):\n for mentor in self.mentors:\n if mentor.humanity_level >= 10:\n print(\"\\033[44m\"+mentor.first_name, mentor.last_name+\" called \"+ mentor.nickname+\" has become human \"\n \"Is ready to deliver to new Codecool facility!\", mentor.first_name, mentor.last_name,\n \"may the Force be with You!\\033[0m\")\n time.sleep(3)\n return True\n return False", "def is_allergic_to(self, item):\n if item in self.list:\n return True\n else:\n return False", "def _user_assigned_bell(self, bell: Bell) -> bool:\n return not self._bot_assigned_bell(bell)", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def all(x) -> bool:\n pass", "def addAllergies(self):\n if int(self.pid)%100 < 85: # no allergies for ~ 85%\n exclusion = NO_ALLERGY.sub({\n 'exclusion':\"no known allergies\",\n 'exclusion_id':\"160244002\",\n }).done()\n self.data.append(SDMX.sub({'models':exclusion}, escape=False).done())\n else: # Sprinkle in some sulfa allergies\n al = DRUG_CLASS_ALLERGY.sub({\n 'reaction': \"skin rash\",\n 'reaction_id': \"271807003\",\n 'category': \"drug allergy\",\n 'category_id': \"416098002\",\n 'allergen': \"sulfonamide antibacterial\",\n 'allergen_id': \"N0000175503\",\n 'severity': \"mild\",\n 'severity_id': \"255604002\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())\n \n if int(self.pid)%2: # and throw in peanut allergies for every other patient\n al = FOOD_ALLERGY.sub({\n 'reaction': \"anaphylaxis\",\n 'reaction_id': \"39579001\",\n 'category': \"food allergy\",\n 'category_id': \"414285001\",\n 'allergen': \"peanut\",\n 'allergen_id': \"QE1QX6B99R\",\n 'severity': \"severe\",\n 'severity_id': \"24484000\",\n }).done()\n self.data.append(SDMX.sub({'models':al}, escape=False).done())", "def feasible(individual):\n val=0;\n for i in individual:\n if viajes[val][6]==True and False==vehiculos_esp[i]:\n return False\n val+=1\n return True", "def check_any_light_on(bridge):\n for i,group in bridge.get_group().items():\n if group['state']['any_on']:\n return True\n return False", "def check_if_group_member(self, organism):\n for key, item in self.phen_dict.items():\n if organism in item:\n self.declare(Organism(name=key))", "def allele_semblable(self, mere):\n Similarite = 0\n for Allele in range(3):\n if self.allele[Allele] in mere.allele and self.allele[Allele] != 0.0:\n Similarite = Similarite + 1\n if Similarite == 2:\n self.informatif = 2", "def is_grad_student():\n return _is_member('uw_affiliation_graduate')", "def is_ligand(cls):\n return LigandComponent.residue_id == cls.residue_id", "def bust(person):\n if person.total > GOAL_TOTAL() and person.aceCount == 0:\n return True\n elif person.total > GOAL_TOTAL() and person.aceCount > 0:\n adjust_ace(person)\n return person.total > GOAL_TOTAL()\n else: # person.total <= GOAL_TOTAL()\n return False", "def enter_night_club(individual):\n if individual.age > LEGAL_DRINKING_AGE:\n print(\"Allowed to enter.\")\n else:\n print(\"Enterance of minors is denited.\")", "def is_monster_lord(self):\n return True", "def wife(backpack):\n print(\"\\nYour wife says: \")\n if \"corn\" in backpack:\n if backpack['corn'][0] < 20:\n print(\"-You need to gather 20 corn cob so get back to work! \")\n enter()\n else:\n print(\"-Ahh you are a bastard but I know your dream...\\nNow go to city and buy your ticket my love :* \")\n enter()\n return True # because of this we can change lvl\n if \"corn\" not in backpack:\n print(\"-Where have u been u f...... drunkard, \\nget back to work and collect 20 corn cobs! \")\n enter()", "def yell():\n ground_description_int = GROUND_FEATURES_LIST[ZERO_BASE_PLYR_POS]\n if ground_description_int != 12:\n printmessage(\"You yell, but nobody hears you.\", 5, CYAN, 1)\n else:\n printmessage(\"You have found the ranger, amd won the game!\", 5, GREEN, 3)\n die(\"ranger\")", "def any(self) -> bool:", "def is_all(variable):\n\n if isinstance(variable, str):\n return variable in ['all', 'All', 'ALL']\n\n return False", "def __contains__(self, ngram):\n return ngram in self.root" ]
[ "0.6099033", "0.5558612", "0.5521234", "0.5301362", "0.5294011", "0.5216652", "0.5088434", "0.507583", "0.5070816", "0.50542706", "0.5041537", "0.50312674", "0.49993923", "0.49899283", "0.49749395", "0.49660623", "0.49616873", "0.49227342", "0.49170405", "0.49064264", "0.48977208", "0.48425022", "0.48294932", "0.48152772", "0.48003718", "0.47859085", "0.47855812", "0.4783256", "0.478226", "0.4775519" ]
0.77161974
0
This returns a single entry corresponding to the Directory Entity referred to by FolderEntityData. The returned string is given below (between Start and End) Start
def getFolderEntry(FolderEntityData): if FolderEntityData.Type not in ['IntermediateDir', 'ExperimentDir']: errprint('\nThe given EntityData does not represent the data of a directory') raise ValueError OutputLines = [] OutputLines.append("FolderID : {UID}".format(UID=FolderEntityData.ID)) OutputLines.append("ParentFolderID : {UID}".format(UID=FolderEntityData.ParentID)) OutputLines.append("FolderType : {Type}".format(Type=FolderEntityData.Type)) OutputLines.append("FolderTitle : {Title}".format(Title=FolderEntityData.Title)) OutputLines.append("FolderDescription: |-2") OutputLines += [" "+Line for Line in FolderEntityData.Description.splitlines()] OutputLines.append("") return "\n".join(OutputLines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFolderItemName(self) -> unicode:\n ...", "def getFolderPath(self) -> unicode:\n ...", "def directory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_id\")", "def get(self):\n return self.directory_name", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.parentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.grandparentGuid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def path(self):\n return self._dir_entry.path", "def get_entries(self):\n for irde in self.Entries:\n if irde != None:\n if irde.Name & 0x80000000:\n # Points to a Name object\n name = obj.Object(\"_IMAGE_RESOURCE_DIR_STRING_U\", (irde.Name & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n else:\n name = int(irde.Name)\n if irde.DataOffset & 0x80000000:\n # We're another DIRECTORY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", (irde.DataOffset & 0x7FFFFFFF) + self.sectoffset, vm = self.obj_vm, parent = irde)\n retobj.sectoffset = self.sectoffset\n else:\n # We're a DATA_ENTRY\n retobj = obj.Object(\"_IMAGE_RESOURCE_DATA_ENTRY\", irde.DataOffset + self.sectoffset, vm = self.obj_vm, parent = irde)\n yield (name, bool(irde.DataOffset & 0x80000000), retobj)", "def get_relative_name(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetRelativeName', self.handle)", "def Dirname(self):\n result = self.Copy()\n\n while 1:\n last_directory = posixpath.dirname(result.last.path)\n if last_directory != \"/\" or len(result) <= 1:\n result.last.path = last_directory\n # Make sure to clear the inode information.\n result.last.inode = None\n\n break\n\n result.Pop(-1)\n\n return result", "def get_path(self):\n definition = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id)\n parent_id = definition.get(\"parentId\", None)\n if parent_id is not None:\n parent = DSSProjectFolder(self.client, parent_id)\n path = parent.get_path()\n return (\"\" if path == \"/\" else path) + \"/\" + definition.get(\"name\", \"\")\n else:\n return \"/\"", "def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")", "def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")", "def metadataDirectory(self):\n guid_hash = utils.sha1hash(self.guid)\n return str(Path('Metadata') / 'TV Shows' / guid_hash[0] / f'{guid_hash[1:]}.bundle')", "def folder(self):\n return self._folder", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def Directory(self) -> str:", "def directory_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"directory_id\")", "def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result", "def media_folder_name(self):\n raise NotImplementedError", "def entity_prefix(self):", "def get_name(self):\n return self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"name\", None)", "def FullDirItems():\n return fulldiritems", "def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string", "def _parse_dir(self):\r\n type_char = self._grab_type()\r\n user_name = self._grab_unascii() #This gets the user_name field for the DirEntity\r\n self._match(\"\\t\")\r\n selector = self._grab_unascii() #This gets the selector.\r\n self._match(\"\\t\")\r\n host = self._grab_host()\r\n self._match(\"\\t\")\r\n port = self._grab_port()\r\n self._match(\"\\r\\n\")\r\n return DirEntity(type_char, user_name, selector, host, port)", "def GetNTFSFileEntry(self):\n return self._fsntfs_file_entry", "def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId", "def getGroupFolder(self):\n if platform.system()==\"Windows\":\n groupFolder = os.path.join(\"\\\\\\\\ursa\",\"AQOGroupFolder\")\n if platform.system()==\"Linux\":\n groupFolder = os.path.join(\"/media\",\"ursa\",\"AQOGroupFolder\")\n return groupFolder", "def getPath(self):\n uid = str(self._result.uid)\n if not uid.startswith('/zport/dmd'):\n uid = '/zport/dmd/' + uid\n return uid", "def main_entity_of_page(self) -> str:\n return self._main_entity_of_page" ]
[ "0.5994919", "0.56029546", "0.5287925", "0.52766377", "0.5237696", "0.52345103", "0.5213454", "0.5185664", "0.517336", "0.51614946", "0.51094973", "0.51090777", "0.50988936", "0.50865364", "0.50664777", "0.5054286", "0.50433457", "0.5036197", "0.50105923", "0.4974974", "0.49746954", "0.49649084", "0.49560356", "0.4953408", "0.4924727", "0.49246567", "0.49189046", "0.4891939", "0.48900315", "0.48747793" ]
0.7627963
0
This returns a single entry corresponding to the Experiment Entity referred to by ExpEntityData. The returned string is given below (between Start and End) Start
def getExperimentEntry(ExpEntityData): # Validate that ExpEntityData actually corresponds to an Experiment Entity if ExpEntityData.Type != 'Experiment': errprint("\nThe Entity Data does not represent the data of an experiment") raise ValueError OutputLines = [] OutputLines.append("") OutputLines.append("- ID : {ID}".format(ID=ExpEntityData.ID)) OutputLines.append(" Title : {Title}".format(Title=ExpEntityData.Title)) OutputLines.append(" Description: |-2") OutputLines += [" "+Line for Line in ExpEntityData.Description.splitlines()] OutputLines.append("") OutputLines.append( "{0:#<100}".format("## End of Experiment {UID} ".format(UID=ExpEntityData.ID))) return "\n".join(OutputLines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_full_entity(entity: spacy.tokens.Token) -> str:\n entity_string = SpacyEventExtractor._get_chunk(entity)\n\n word = entity\n while True:\n prep, word = SpacyEventExtractor._get_prep_with_word(word)\n if word is None:\n break\n entity_string += \" \" + prep\n return entity_string", "def entity_description(self, eid):\n entities = self._load_entities()\n return entities[eid][\"description\"]", "def entity_extract(self, eid):\n fname = os.path.join(\n self.data_dir_base, \"entities\", self.code, \"extracts\", f\"{eid}.txt\"\n )\n if os.path.exists(fname):\n with open(fname) as f:\n return \"\".join(f.readlines())\n return \"\"", "def getEntity(self):\n\n fid = file(self.filename)\n entityre = re.compile(\"entity (\\w+) is\", re.IGNORECASE)\n\n matches = entityre.search(fid.read())\n self.entityname = matches.groups()[0]\n return self.entityname", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def _getEntityEndKey(entityId):\n return \"%s\\x1E\" % entityId", "def current_entity(self):\n return self.entities[len(self.device_data[CONF_ENTITIES])]", "def _getEntityStartKey(entityId):\n return \"%s\\x1D\" % entityId", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def extent(obj):\n return obj.get('startOffset', -1), obj.get('endOffset', -1)", "def main_entity_of_page(self) -> str:\n return self._main_entity_of_page", "def entity_guid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entity_guid\")", "def test_entity(self):\n self.request.log(\"Hello World\", entities=(Entity(1337)(12, \"Demo\"),))\n self.request.end()\n entry = self.get_entry()\n assert 'entities' in entry\n assert len(entry['entities']) == 1\n assert entry['entities'][0] == dict(entity=1337, id=12, name=\"Demo\")", "def __str__(self) -> str:\n st = \"<Entity>: \\n{\\n\"\n for k, v in self._keys.items():\n if not isinstance(v, list):\n st += f\"\\t {k} = \\\"{v}\\\"\\n\"\n if self._fixup is not None:\n for k, v in self.fixup.items():\n st += f\"\\t ${k} = \\\"{v}\\\"\\n\"\n\n for out in self.outputs:\n st += f'\\t{out!s}\\n'\n st += \"}\\n\"\n return st", "def _entity_as_text(self):\n return str(self.value)", "def get_description(self):\n return \"It is an Entity.\"", "def exp_metadata(self) -> LabExperiment:\n\n return self._exp_metadata", "def GetEntity(self):\n\t\treturn self.acad.ActiveDocument.Utility.GetEntity()", "def entity_name(self):\n return self.__entity_name", "def eid(self):\n return self._json['coredata']['eid']", "def entity_key(mention):\n return mention.get('entityId')", "def entity_id(self) -> str:\n return self._entity_id", "def entity(self):\n return self._entity", "def entity(self):\n return self._entity", "def __repr__(self) -> str:\n desc: List[str] = []\n if classname := self['classname']:\n desc.append(classname)\n desc.append('Entity')\n if name := self['targetname']:\n desc.append(f'\"{name}\"({classname})')\n else:\n desc.append(classname)\n if hammerid := self['hammerid']:\n desc.append(f'#{hammerid}')\n if origin := self['origin']:\n desc.append(f'@ ({origin})')\n return f'<{\" \".join(desc)}>'", "def entity_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"entity_path\")", "def entity_search():\n data = {'EntityType': entity_type}\n parameters = data_to_json(data)\n url = base_url + 'ams/entity/search'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Records']\n return r_value", "def GetEntity(self):\n return self.__entity", "def entity_snippet(response):\n for result in response.results:\n e_set = extract_entities(result.summary)\n result.summary = ' '.join(e_set)\n return response", "def get_entity_name() -> str:\n return \"NewsItemEntity\"" ]
[ "0.6376407", "0.5688458", "0.56678385", "0.5637462", "0.5474638", "0.5404834", "0.5358794", "0.53286546", "0.5274664", "0.5274664", "0.52555805", "0.5246036", "0.51556396", "0.51428646", "0.51405764", "0.51387566", "0.51039517", "0.5103533", "0.5092884", "0.5080785", "0.50753295", "0.5066855", "0.5056109", "0.5056109", "0.5042214", "0.5036803", "0.5013337", "0.5006676", "0.5000942", "0.500042" ]
0.81062454
0
get all the employees out of the database
def get_employees(self): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('select * from employee') employees = list() for row in cursor: employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) employees.append(employee) return employees
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def getEmployees(self):\n return self.employees", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_employees(self):\n return self.employees", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def employees(self) -> object:\n return self._employees", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def employee_works_in(employee_id: int) -> List[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT department\n FROM EmployeeDepartments\n WHERE EmployeeDepartments.empid = %s\"\"\"\n cur.execute(sql, (employee_id,));\n\n # Attempt to fetch all\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n departments = []\n for row in result:\n departments.append(\n row[0]\n )\n\n cur.close()\n conn.close()\n return departments\n except Exception as e:\n print(\"ddd\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_employees_directory(self):\n response = requests.get(self._base_url + \"employees/directory\",\n auth=(self._api_key, \"pass\"),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n emps_json = json.loads(response.text)['employees']\n return {int(e['id']): Employee(e['displayName'],\n e['firstName'],\n e['lastName'],\n e['nickname']) for e in emps_json}", "def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])" ]
[ "0.7986589", "0.78798616", "0.7810305", "0.7761145", "0.771737", "0.7529015", "0.7422921", "0.722801", "0.7137951", "0.71137774", "0.7091648", "0.7014968", "0.6984071", "0.6906787", "0.6850874", "0.67577934", "0.6755647", "0.67153805", "0.66789347", "0.6564769", "0.65336215", "0.64868057", "0.6434089", "0.6367211", "0.63359034", "0.6327923", "0.62664163", "0.6265159", "0.61977154", "0.6197423" ]
0.8586788
0
this function gets all the admins from the database
def get_admins(self): from Employee import Employee admins = list() cursorRoles = self.dbconnect.get_cursor() cursorRoles.execute('select * from employeeRoles where role=\'admin\'') for row in cursorRoles: admins.append(self.get_employee(row[0])) return admins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)", "def get_admins(self):\n return self.admins_group.user_set.all()", "async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "async def _ad_all(self, ctx):\n all_admins = self.database.get_all_admins()\n consumed = []\n out = \"```\"\n for admin in all_admins:\n if admin.guild_id not in consumed:\n out += f\"Guild: {self.bot.get_guild(admin.guild_id)}\\n\"\n consumed.append(admin.guild_id)\n admin = self.bot.get_user(admin.user_id)\n admin = str(admin) if admin is not None else admin.user_id\n out += f\" {admin}\\n\"\n if out != \"```\":\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"No admins currently\")", "def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def __update_admin_cache(self):\n\n header = connect(self.__path)\n curs = header.cursor()\n curs.execute(\"SELECT * FROM admins WHERE id IS NOT NULL\")\n data = curs.fetchall()\n newlist = []\n for item in data:\n newlist.append(item[0])\n self.__admins = newlist", "def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response", "def admins_index(_):\n return {\"admin_users\": [u.username for u in models.User.admins()]}", "def get_users_admins_list(self, session):\n\n users = session.query(User.chat_id).all()\n return users", "def get_admin_users(self):\r\n try:\r\n users = self.list_all(\"users\")\r\n users_admin = [user for user in users if user[\"role\"] == \"admin\"]\r\n return users_admin\r\n except PDClientError as e:\r\n raise e", "def get_org_admins(self, dataset: Dict) -> List[User]:\n organization_id = dataset[\"organization_id\"]\n orgadmins = list()\n organization = self.organizations[organization_id]\n if \"admin\" in organization:\n for userid in self.organizations[organization_id][\"admin\"]:\n user = self.users.get(userid)\n if user:\n orgadmins.append(user)\n return orgadmins", "def get_admins(self, uid):\n admin_data = self.list_admin_roles(uid)\n admins = []\n for admin in admin_data:\n admins.append(\n ZenossDeviceManagementAdmin(\n self.api_url,\n self.api_headers,\n self.ssl_verify,\n admin\n )\n )\n\n return admins", "def get_drink_admins(self):\n admins = self.group('drink')\n return admins", "def get_admin_users() -> User:\n return User.objects.filter(group__name__contains=\"admin\")", "def getPermsOfAdmin(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\")\n admin_perms=admin_main.getLoader().getAdminByName(request[\"admin_username\"]).getPerms()\n perms_list=self.__getPermsListFromAdminPerms(admin_perms)\n sorted=SortedList(perms_list)\n sorted.sortByPostText('[\"name\"]',0)\n return sorted.getList()", "def admins(message):\n hf.query_users(message, hf.get_users(), \"admin\")", "def get_all_npf_admins(self):\n npf_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_org():\n npf_admins.append(user.user)\n return npf_admins", "def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))", "def get_administrators(self, *args, **kwargs):\n return self.bot.get_chat_administrators(self.id, *args, **kwargs)", "def get_all_biz_admins(self):\n biz_admins = []\n for user in OrgUser.objects.all():\n u = OcAuth(user.id)\n if u.is_admin_biz():\n biz_admins.append(user.user)\n\n return biz_admins", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "def get_all_users_for_admin_purposes(connection):\r\n with connection:\r\n return connection.execute(GET_ALL_USERS).fetchall()[1]", "def get_all_users():", "def getAdmin():", "def organization_get_admins_no_login(self, client, id):\n assert client.get('/organizations/' + id + '/admins',\n headers={}).status == '400 BAD REQUEST'", "def get_for_admin(self, admin):\n if admin.is_superuser:\n return self.get_query_set()\n return self.get_query_set().filter(owners__user=admin)", "def __reloadAdmins(self, admin_id):\n for admin_username in admin_main.getLoader().getAllUsernames():\n try:\n admin_obj=admin_main.getLoader().getAdminByName(admin_username)\n if admin_obj.creator_id == admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n else:\n for lock_obj in admin_obj.getLocks():\n if lock_obj.getLockerID()==admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n break\n except:\n logException(LOG_DEBUG)" ]
[ "0.77166426", "0.76271695", "0.76095897", "0.7580871", "0.75705355", "0.7568923", "0.74572515", "0.7428086", "0.7204757", "0.7203195", "0.7175435", "0.70761865", "0.70348865", "0.70129657", "0.69840354", "0.6977274", "0.69340014", "0.6931243", "0.6885563", "0.6853746", "0.6849007", "0.68253785", "0.67971444", "0.6790082", "0.66530603", "0.6632572", "0.66026497", "0.65907925", "0.6582403", "0.65572435" ]
0.81468296
0
gets a single employee out the database on an id
def get_employee(self, id): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,)) row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_employeeOnName(self, name):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,))\n if (cursor.rowcount != 0):\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n else:\n return None", "def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_by_name(name: str):\n logger.debug('Retrieving employee by name %s.', name)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.name == name\n ).scalar()\n except Exception as exception:\n logger.error('An error occurred while retrieving employee by name %s.'\n ' Exception: %s', name, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by name %s.', name)\n return employee", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def find_by_id(id: int):\n exercise = Exercise.try_find_by_id(id)\n if not exercise:\n raise NotFound(EXERCISE_NOT_FOUND_MSG)\n return exercise", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def get_entry_by_id(model, id):\n print(model, id)\n return db.session.query(model).filter_by(id=id).first()", "def is_manager(employee_id: int) -> Optional[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Department.name\n FROM Employee JOIN Department ON(Employee.empid = Department.manager)\n WHERE Employee.empid = %s\"\"\"\n cur.execute(sql, (employee_id,))\n\n # Attempt to fetch first row\n result = cur.fetchone()\n\n # If nothing is fetched\n if result == None:\n cur.close()\n conn.close()\n return result\n\n\n cur.close()\n conn.close()\n return result[0]\n except Exception as e:\n # If something went really wrong\n print(\"bbb\")\n print(e)\n cur.close()\n conn.close()\n return None", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret" ]
[ "0.8462909", "0.8436909", "0.8286135", "0.8277989", "0.795276", "0.7786417", "0.75786275", "0.7489491", "0.7484681", "0.73151207", "0.7233033", "0.6973724", "0.6964095", "0.6891466", "0.68717116", "0.6806852", "0.66826135", "0.6661457", "0.6648924", "0.6645056", "0.66137886", "0.6545444", "0.6531044", "0.6497339", "0.64534837", "0.64507073", "0.6445847", "0.63906133", "0.635961", "0.63405216" ]
0.8603124
0
gets a single employee out the database on a name
def get_employeeOnName(self, name): from Employee import Employee cursor = self.dbconnect.get_cursor() cursor.execute('SELECT * FROM employee WHERE name=%s ', (name,)) if (cursor.rowcount != 0): row = cursor.fetchone() return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_employee_by_name(self, name):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE name=%s', (name,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])", "def get_by_name(name: str):\n logger.debug('Retrieving employee by name %s.', name)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.name == name\n ).scalar()\n except Exception as exception:\n logger.error('An error occurred while retrieving employee by name %s.'\n ' Exception: %s', name, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by name %s.', name)\n return employee", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get_employee_by_name(self, name):\n self.lock.acquire()\n for employee in self.__Session.query(Employee).all():\n if (employee.fname+' '+employee.lname == name):\n result = employee\n self.lock.release()\n return result", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def is_manager(employee_id: int) -> Optional[str]:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Department.name\n FROM Employee JOIN Department ON(Employee.empid = Department.manager)\n WHERE Employee.empid = %s\"\"\"\n cur.execute(sql, (employee_id,))\n\n # Attempt to fetch first row\n result = cur.fetchone()\n\n # If nothing is fetched\n if result == None:\n cur.close()\n conn.close()\n return result\n\n\n cur.close()\n conn.close()\n return result[0]\n except Exception as e:\n # If something went really wrong\n print(\"bbb\")\n print(e)\n cur.close()\n conn.close()\n return None", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def get(self, name, user):\n connection = self.connect()\n cursor = connection.cursor()\n cursor.execute(self.sql[\"get\"], {\"name\": name, \"user\": user})\n result = cursor.fetchone()\n if result is not None:\n return result[0].split()\n else:\n raise DoesNotExistException(\n \"Could not find an applicable saved roll with that name.\"\n )", "def get_exercise(name):\n # Get db object and exercises table\n db = get_db()\n exercises = db.exercises\n \n # Search database for exercises with matching name\n cursor = exercises.find({\"name\": str(name)})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='exercise with specified name not found')\n \n context = {}\n for document in cursor:\n temp = document\n temp['exercise_id'] = str(document['_id'])\n del temp['_id']\n context = temp\n \n context['url'] = \"/api/v1/exercises/\" + name + \"/\"\n return flask.jsonify(**context)", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def query_by_name(name):\n\tstudent = session.query(Student).filter_by(\n\t\tname=name).first()\n\treturn student", "def get_employee_training(employee_id):\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = model_factory(TrainingProgramEmployee)\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n *\n FROM\n hrapp_trainingprogramemployee te\n WHERE\n te.employee_id = ?\n \"\"\", (employee_id, ))\n\n return db_cursor.fetchall()", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def getByName( self, people_name ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `name` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_name ) )\n person = Mysql.ex( qry )\n if len( person ) == 0:\n return False\n return person[0]", "def find_employee_id(self,name):\n nam = list(self.emp_id.values())\n val = nam.index(name)\n ids = list(self.emp_id.keys())\n id = ids[val]\n return id", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def by_name(cls, name):\n return dbsession.query(cls).filter_by(_name=str(name)).first()", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def get_examen(self, id_examen):\n\n self.logger.info(\"\\t[+] get_examen [+]\")\n self.logger.info(f\"\\t[+] id_examen {id_examen} [+]\")\n try:\n return self.examens.select().where(self.examens.columns.id_examen == id_examen).execute()\n except Exception as e:\n self.logger.critical(\"\\t[-] Exception occured [-]\")\n self.logger.critical(\"\\t\" + str(e))\n self.logger.critical(\"\\t[-] Exception occured [-]\")", "def __get_one_by_id(\n self, table_name: str, id_name: str, db_id: str\n ) -> Mapping[str, Any]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * FROM {table_name}\n WHERE ({id_name} = ?)\n \"\"\",\n (int(db_id),),\n )\n results = c.fetchall()\n if len(results) != 1:\n raise EntryDoesNotExistException(\n f\"Table {table_name} has no {id_name} {db_id}\"\n )\n return results[0]" ]
[ "0.75500387", "0.7348031", "0.7300037", "0.72360134", "0.7058836", "0.7009213", "0.6654517", "0.66495013", "0.66377455", "0.65564954", "0.6541405", "0.6435797", "0.6376785", "0.6338932", "0.61743134", "0.61031044", "0.60764945", "0.60599095", "0.59950364", "0.5983809", "0.59781575", "0.59689844", "0.5962662", "0.595988", "0.5945464", "0.5945464", "0.5945464", "0.592619", "0.59190995", "0.58895534" ]
0.8025803
0
adds an employee to the database
def add_employee(self, empl): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)', (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern, empl.active, empl.promotor)) cursor.execute('SELECT LASTVAL()') eid = cursor.fetchone()[0] empl.id = eid # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save Employee!\n(%s)' % (error))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise", "def add_employee(self, employee):\n self.employees.add(employee)", "def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201", "def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee", "def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created", "def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)", "def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)", "def add_employee(self, first_name, last_name):\n self.switch_main_menu(\"PIM\")\n self.click_menu(\"Add Employee\")\n self.pim = AddEmployee(self.driver)\n self.pim.add_user_employee(first_name, last_name)", "def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value", "def add_employee():\n\n while True:\n first_name = get_user_string(\"Enter your first name\")\n last_name = get_user_string(\"Enter your last name\")\n grade = get_employee_input_int(\"Enter your grade\")\n db.add_employee(first_name, last_name, grade)\n print(\"New employee \" + first_name + \" \" + last_name + \" has been added to the employee table\")\n user_input = input(\"Do you want to add more employees to the table ? (Y/N)\")\n if(str(user_input).upper()) == 'Y':\n continue\n elif (str(user_input).upper()) == 'N':\n break\n else:\n print(\"Invalid Input\\nReturning to the main menu\")\n break", "def post(self):\n employee = Employee(**self.data)\n _dict = Employee.encode(employee)\n\n _id = DatabaseManager.insert(Collection.EMPLOYEES, _dict)\n employee_dict = DatabaseManager.find_document_by_id(\n Collection.EMPLOYEES, _id, True\n )\n return employee_dict", "def add_person():\n # get values from user\n responses = accept_inputs([\"Name\"])\n # insert into db\n query_no_results(\"insert into person (name) values(?)\", [responses[\"Name\"]])\n print(\"New person created\")", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add(self):\n\n db.session.add(self)\n db.session.commit()", "def add_employeeRole(self, id, role):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employeeRoles values(%s,%s)',\n (id, role))\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save EmployeeRole!\\n(%s)' % (error))", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def createEmployee():\n form = CreateEmployeeForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff = Staff(first_name=form.first_name.data, last_name=form.last_name.data, password=hashed_password, \n email=form.email.data, role=form.role.data, location=form.location.data)\n db.session.add(staff)\n db.session.commit()\n flash(f'Employee Added To Database', category='Success')\n return redirect(url_for('login'))\n return render_template('new_employee.html', title=\"Register\", form=form)", "def add_user(self, username, password, name, department):\n db = sqlite3.connect(self.name)\n cur = db.cursor()\n cur.execute('SELECT MAX(ID) FROM users')\n maxid = cur.fetchone()[0]\n usid = maxid + 1 if maxid is not None else 0\n date = time.strftime('%Y.%m.%d')\n cur.execute(\n 'INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)',\n (usid, username, password, \"user\", name, department, 28)\n )\n db.commit()\n db.close()", "def register():\n add_employee = True\n form = RegistrationForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n username=form.username.data,\n glad_id=form.glad_id.data,\n tel_no=form.tel_no.data,\n role_id=2 , ##form.role_id.data,\n password=form.password.data)\n\n # add employee to the database\n db.session.add(employee)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')", "def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def insert_employee(self,\n region_name,\n last_name,\n first_name,\n hire_date,\n mi=None):\n\n if self.check_input_type(region_name, \"Region\"):\n if self.check_input_type(hire_date, \"Date\"):\n region_info = self.query_region(region_name)\n region_id = region_info[0][0]\n\n if mi != \"\":\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Mi, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, mi, first_name, hire_date\n )\n else:\n query_format = \"insert into employee(Region_ID, \" \\\n \"Emp_Lname, Emp_Fname, Emp_Hiredate) \" \\\n \"values ((select region_id from region where \" \\\n \"region_id='{}'), '{}', '{}', '{}')\"\n query = query_format.format(\n region_id, last_name, first_name, hire_date\n )\n\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Date format not valid!\")\n else:\n ErrorMessageWindow(\"Region input not valid!\")", "def save(self, db):\n db.query(\n \"INSERT INTO staff (name) VALUES(:name)\",\n name=self.name\n )", "def add_to_db(name, email_id):\n conn = None\n try:\n conn = connect_to_db()\n cur = conn.cursor()\n # This is the best way that I found to do an 'upsert' in a database agnostic way.\n # Try to update the data first, and if no records get updated, insert them.\n cur.execute(UPDATE_STMT.format(nm=name, em=email_id))\n if cur.rowcount == 0:\n cur.execute(INSERT_STMT.format(nm=name, em=email_id))\n conn.commit()\n print('Successfully added/updated record!')\n except Exception as e:\n print(str(e))\n disconnect_from_db(conn)\n raise e\n finally:\n disconnect_from_db(conn)", "def add():\n name = request.form['name']\n message = request.form['message']\n\n try:\n newcurs = g.conn.execute(\"\"\"INSERT INTO record\n VALUES (%s, %s );\"\"\", name, message)\n newcurs.close()\n except Exception:\n print \"can not write record to database\"\n return redirect('/error')\n\n return render_template(\"index.html\", **locals())" ]
[ "0.8205563", "0.7556254", "0.74963003", "0.74677837", "0.74195415", "0.71267223", "0.7027136", "0.68783706", "0.6761403", "0.6604914", "0.6410982", "0.63886535", "0.6380282", "0.6344615", "0.6239359", "0.6239359", "0.6229492", "0.6211725", "0.618129", "0.61335653", "0.61314315", "0.61312056", "0.61241335", "0.6122054", "0.606227", "0.6060016", "0.6027342", "0.5945643", "0.59397423", "0.59250766" ]
0.7959675
1
adds a role to an employee
def add_employeeRole(self, id, role): cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO employeeRoles values(%s,%s)', (id, role)) # get id and return updated object self.dbconnect.commit() except(Exception, self.dbconnect.get_error()) as error: self.dbconnect.rollback() raise Exception('\nUnable to save EmployeeRole!\n(%s)' % (error))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_role():\n role = roles.find_or_create_role(request.values.get('role_name', ''))\n user = users.get_or_404(int(request.values.get('user_id', '')))\n if not users.add_role_to_user(user, role):\n return {}, 500\n return {}", "def test_add_role(self):\n pass", "def add_role(role):\n roleOfUser=Role.objects.create(type=role)\n return roleOfUser", "def add_role(email, role):\n from enferno.user.models import Role\n u = User.query.filter(User.email == email).first()\n\n if u is None:\n print('Sorry, this user does not exist!')\n else:\n r = Role.query.filter(Role.name == role).first()\n if r is None:\n print('Sorry, this role does not exist!')\n u = click.prompt('Would you like to create one? Y/N', default='N')\n if u.lower() == 'y':\n r = Role(name=role)\n try:\n db.session.add(r)\n db.session.commit()\n print('Role created successfully, you may add it now to the user')\n except Exception as e:\n db.session.rollback()\n # add role to user\n u.roles.append(r)", "async def add_role(\n client,\n event,\n user: ('user', 'User to add role to'),\n role: ('role', 'The role to give'),\n):\n # Check for permissions\n if not event.user_permissions.can_manage_roles:\n abort('You need `manage roles` permission to invoke this command.')\n \n if not event.guild.cached_permissions_for(client).can_manage_roles:\n abort('I need `manage roles` permission to execute this command.')\n \n if not event.user.has_higher_role_than(role):\n abort('You must have higher role than the role you are trying to give.')\n \n if not client.has_higher_role_than(role):\n abort('I must have higher role than the role you are trying to give.')\n \n # Using `.copy_to` on forms works as well.\n return ADD_ROLE_FORM.copy_with(\n title = f'Add role {role.name} to {user.full_name}',\n custom_id = f'add_role.{user.id}.{role.id}',\n )", "def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})", "def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)", "def test_add_role_simple(self):\n pass", "def manage_addRole(self, role_id, title, description, RESPONSE=None,\n REQUEST=None):\n if not role_id:\n message = 'Please+provide+a+Role+ID'\n else:\n self.addRole(role_id, title, description)\n message = 'Role+added'\n\n if RESPONSE is not None:\n RESPONSE.redirect('%s/manage_roles?manage_tabs_message=%s' %\n (self.absolute_url(), message))", "async def addrole(self, ctx, user: discord.Member=None, *, role=None):\r\n if user is None or role is None:\r\n return await ctx.send(\"Incorrect usage! *;addrole @user role*\")\r\n r = discord.utils.get(ctx.guild.roles, name=str(role))\r\n if r is None:\r\n return await ctx.send(f'{role} was not found')\r\n try:\r\n await user.add_roles(r)\r\n return await ctx.send(f\"**{str(user)}** has been given the role of **{role}** {self.bot.get_emoji(470063310386233344)}\")\r\n except discord.Forbidden:\r\n return await ctx.send(\"Bot does not have enough permissions to give roles.\")", "def add_role():\n check_admin()\n add_role = True\n\n form = RoleForm()\n if form.validate_on_submit():\n role = Role(title=form.title.data)\n\n try:\n db.session.add(role)\n db.session.commit()\n flash('New role successfully created')\n except:\n flash('Error: Role title already exist')\n\n return redirect(url_for('admin.get_roles'))\n\n return render_template('admin/roles/role.html', form=form, add_role=add_role, title='Add Role')", "async def addrole(self, ctx, rolename, user: discord.Member=None):\n author = ctx.message.author\n channel = ctx.message.channel\n server = ctx.message.server\n\n if user is None:\n user = author\n\n role = self._role_from_string(server, rolename)\n\n if role is None:\n await self.bot.say('That role cannot be found.')\n return\n\n if not channel.permissions_for(server.me).manage_roles:\n await self.bot.say('I don\\'t have manage_roles.')\n return\n\n if author.id == settings.owner:\n pass\n elif not channel.permissions_for(author).manage_roles:\n raise commands.CheckFailure\n\n await self.bot.add_roles(user, role)\n await self.bot.say('Added role {} to {}'.format(role.name, user.name))", "async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)", "def add_role():\n\tcheck_admin()\n\tadd_role = True\n\n\tform = RoleForm()\n\tif form.validate_on_submit():\n\t\trole= Role(name= form.name.data,description=form.description.data)\n\n\t\ttry:\n\t\t\t#add role to the database \n\t\t\tdb.session.add(role)\n\t\t\tdb.session.commit()\n\t\t\tflash('You have successfully added a new role ')\n\t\texcept:\n\t\t\t#incase the role already exists\n\t\t flash(\"Error:the role already exists\")\n\n\t\t#redirect to the roles page\n\t\treturn redirect(url_for('admin.list_roles'))\n\n\t\t#load the role template\n\treturn render_template('admin/roles/role.html', add_role=add_role, form = form,title='Add Role')", "def add_role():\r\n check_admin()\r\n\r\n add_role = True\r\n\r\n form = RoleForm()\r\n if form.validate_on_submit():\r\n role = Role(name=form.name.data,\r\n description=form.description.data)\r\n\r\n try:\r\n # add role to the database\r\n db.session.add(role)\r\n db.session.commit()\r\n flash('You have successfully added a new role.')\r\n except:\r\n # in case role name already exists\r\n flash('Error: role name already exists.',category='error')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n # load role template\r\n return render_template('admin/roles/role.html', add_role=add_role,\r\n form=form, title='Add Role')", "def addRole(self, role):\n self._client.addRole(role)", "def test_add_role_simple_post(self):\n pass", "def add_employee(self, employee):\n self.employees.add(employee)", "def add_role(self, name):\n role = Role.by_name(name)\n if not role:\n role = Role(name)\n db.add(role)\n if not role in self.roles:\n self.roles.append(role)", "async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")", "def define_role(self, role):\n\n self._db_manager.create_role(role)", "async def addrole(self, ctx: context.CustomContext):\n\n await ctx.send(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with the name of the role you want to create.\"\n )\n\n role_name = await ctx.converted_input(converter=converter.CaseInsensitiveRole)\n\n if isinstance(role_name, str):\n await ctx.send(\n f\"{config.YES} I will **create a new role** on this server named `{role_name}` for this.\"\n )\n try:\n discord_role = await ctx.guild.create_role(name=role_name)\n except discord.Forbidden:\n raise exceptions.ForbiddenError(\n exceptions.ForbiddenTask.CREATE_ROLE, role_name\n )\n\n else:\n discord_role = role_name\n\n await ctx.send(\n f\"{config.YES} I'll use the **pre-existing role** named `{discord_role.name}` for this.\"\n )\n\n role_join_message = await ctx.input(\n f\"{config.USER_INTERACTION_REQUIRED} Reply with a short message the user should see when they get the role.\"\n )\n\n try:\n await self.bot.db.execute(\n \"INSERT INTO selfrole (guild_id, role_id, join_message) VALUES ($1, $2, $3) \"\n \"ON CONFLICT (guild_id, role_id) DO UPDATE SET join_message = $3\",\n ctx.guild.id,\n discord_role.id,\n role_join_message,\n )\n except asyncpg.UniqueViolationError:\n return await ctx.send(\n f\"{config.NO} `{discord_role.name}` is already a selfrole on this server.\"\n )\n\n await ctx.send(f\"{config.YES} `{discord_role.name}` was added as a selfrole.\")", "def add_role(self, role):\n try:\n self.db_proxy.nameCheck(role.theName, 'role')\n except ARM.ARMException as ex:\n self.close()\n raise ARMHTTPError(ex)\n\n role_params = RoleParameters(\n name=role.theName,\n rType=role.theType,\n sCode=role.theShortCode,\n desc=role.theDescription,\n cProperties=[]\n )\n\n role_id = self.db_proxy.addRole(role_params)\n\n return role_id", "def addRole(self, name, description=\"\"):\n params = {\n \"f\" : \"json\",\n \"rolename\" : name,\n \"description\" : description\n }\n aURL = self._url + \"/roles/add\"\n return self._con.post(path=aURL, postdata=params)", "def addRole(self, role=None, roleName=None, kvDict=None):\n return _modelActionBase(self, instance=role, instanceName=roleName, kvDict=kvDict,\n model=get_model('role'), db=db, action='add', modelType='role')", "def test_edit_role_add_new_role(self):\n # Add node with controller role\n Nodes().nodes_discovered[0].checkbox.click()\n RolesPanel().controller.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n # Add cinder role\n with Nodes() as n:\n n.nodes[0].checkbox.click()\n n.edit_roles.click()\n RolesPanel().cinder.click()\n Nodes().apply_changes.click()\n time.sleep(1)\n with Nodes() as n:\n self.assertIn(ROLE_CONTROLLER, n.nodes[0].roles.text,\n 'Controller role')\n self.assertIn(ROLE_CINDER, n.nodes[0].roles.text,\n 'Cinder role')", "def set_role(userid, role, group, request=None):", "def test_add_role_to_project_member(self):\n pass", "async def add_role_admin(request, role_id):\n required_fields = [\"id\"]\n utils.validate_fields(required_fields, request.json)\n\n txn_key, txn_user_id = await utils.get_transactor_key(request)\n proposal_id = str(uuid4())\n batch_list = Role().admin.propose.batch_list(\n signer_keypair=txn_key,\n signer_user_id=txn_user_id,\n proposal_id=proposal_id,\n role_id=role_id,\n next_id=request.json.get(\"id\"),\n reason=request.json.get(\"reason\"),\n metadata=request.json.get(\"metadata\"),\n )\n await utils.send(\n request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT\n )\n return json({\"proposal_id\": proposal_id})", "def addUserRole(self, name, role):\n self._client.addUserRole(name, role)" ]
[ "0.7416667", "0.73289865", "0.72949153", "0.72568375", "0.7244213", "0.71598387", "0.7032163", "0.699969", "0.6978381", "0.69690233", "0.6907972", "0.68849903", "0.687573", "0.6826044", "0.6823985", "0.6823678", "0.67929417", "0.67829245", "0.6731767", "0.6684479", "0.66809964", "0.66765994", "0.6669527", "0.6638014", "0.66187716", "0.66142064", "0.6590134", "0.65658325", "0.65430516", "0.65273905" ]
0.80732065
0
gets al the roles of an employee
def get_employeeRoles(self, id): cursor = self.dbconnect.get_cursor() cursor.execute('select * from employeeRoles where employee=%s', (id,)) roles = list() for row in cursor: roles.append(row[1]) return roles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_roles(role):", "def getRoles(self):", "def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]", "def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)", "def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles", "def get_roles(self, principal_id):", "def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)", "def get_roles():\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_roles method\n response = roles_operations.get_roles()\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + str(role.get_display_label()))\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + str(forecast_manager.get_name()))\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + str(role.get_description()))\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + str(reporting_to.get_name()))\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())", "def get_roles(self):\n return [role.role_id for role in self.roles if role]", "def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")", "def get_roles():\r\n global _roles\r\n return _roles", "def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)", "def get_roles(self, **search_args):\n return self.openbis.get_role_assignments(person=self, **search_args)", "def list(self):\n return self.client.find_all_roles()", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def get_roles(self) -> requests.models.Response:\n return self.get('v1/roles')", "def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def getRoles():\n return jsonify(listRoles(ROLES_DIR))", "def getAllRoles(self):\n\n # Learn URL of AllRoles service\n url = self.config.get(\"Authorization\",\"allroles\") # http://erra.ccss.cz/g4i-portlet/service/list/roles/en\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles url: %s\"% url)\n \n # Request all roles from LifeRay\n import httplib2\n h = httplib2.Http()\n header, content = h.request(url, \"GET\")\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response header: %s\"% header)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response content: %s\"% content)\n\n # Parse the response\n try:\n allRolesJson = json.loads(content)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles reply succesfully parsed\")\n except ValueError,e:\n logging.error(\"[LaymanAuthLiferay][getAllRoles] Cannot parse AllRoles reply: '%s'\"% content)\n raise AuthError(500, \"Cannot parse GET All Roles response [%s] as JSON:%s\"% (content,e)) \n \n roles = allRolesJson[\"roles\"]\n\n # lower() and spaces\n for rr in roles:\n rr[\"roleName\"] = rr[\"roleName\"].lower()\n rr[\"roleName\"] = \"_\".join(rr[\"roleName\"].split(' '))\n\n # Return roles\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] Return roles: %s\"% str(roles))\n return roles", "def get_granted_roles(self):", "def getRoles(context):\n\n pmemb = getToolByName(getSite(), 'portal_membership')\n roles = [role for role in pmemb.getPortalRoles() if role != 'Owner']\n return SimpleVocabulary.fromValues(roles)", "def get(self):\n return self._roles.get(self._id)", "def getRoles(self):\n\t\tpayload = ''\n\t\tif self.Roles:\n\t\t\tif type(self.Roles) != int:\n\t\t\t\tfor x in range(0,len(self.Roles)):\n\t\t\t\t\tpayload += \"%s\" % (self.Roles[x])\n\t\t\t\treturn self.Roles\n\t\t\telse:\n\t\t\t\treturn None", "def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]", "def test_list_roles(self):\n pass", "def get_roles():\n return config.get_cfg_storage(ID_ROLE)", "def get_all_roles():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT role FROM movie_crew\")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def get_roles_descriptions():\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"select distinct role, job_description from department where job_description is not null \")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def list_roles(self, hints):\n raise exception.NotImplemented() # pragma: no cover" ]
[ "0.7949602", "0.77490324", "0.7219508", "0.7100693", "0.70895886", "0.70715743", "0.6987245", "0.6968574", "0.6919542", "0.6890405", "0.6882044", "0.6865154", "0.6842712", "0.67842984", "0.67706275", "0.67660475", "0.6756659", "0.673916", "0.6718372", "0.6637145", "0.66315967", "0.6626008", "0.6624409", "0.66179127", "0.6606336", "0.6598714", "0.6573045", "0.6572775", "0.6529231", "0.6503123" ]
0.81541693
0
changes the data of an employee
def change_employee(self, employee): cursor = self.dbconnect.get_cursor() try: if employee.id == None: raise Exception('no id given') cursor.execute('select * from employee where employeeID=%s', (str(employee.id),)) if cursor.rowcount == 0: raise Exception('no employee found with that id') cursor.execute( 'update employee set name= %s,email= %s,office= %s,title= %s,INTernORextern= %s,active= %s,promotor= %s where employeeID=%s', (employee.name, employee.email, employee.office, employee.title, employee.internOrExtern, employee.active, employee.promotor, employee.id)) self.dbconnect.commit() except: self.dbconnect.rollback() raise Exception('unable to change employee')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def change_employee(self,changed_identity):\r\n\r\n changed_emp = Employee(*changed_identity)\r\n changed_str = changed_emp.get_changes_registration_str()\r\n\r\n return_value = self.change_object_in_DB(\"employee\", changed_str, changed_emp._id) # Bring 'id' seperately, so next function can find line number\r\n return return_value", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def setEmployees(self, employees):\n self.employees = employees", "def employee(self, employee: object):\n\n self._employee = employee", "def update(self, request, pk):\n serializer = data_serializers.UpdateEmployeeRequestSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_employee_entity = self.controller.update_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.ObjectEntityDoesNotExist\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def employees(self, employees: object):\n\n self._employees = employees", "def make_salaried(self,salary,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"2\"\n print(\"{}{}\".format(name,\" was successfully changed to be a salaried employee\"))\n self.emp_dict[id][7] = salary\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def update_employee(emp_id, key=None, value=None, items=None):\n if items is None:\n if key is None or value is None:\n return {\"Error\": \"At least one key/value pair is required\"}\n items = {key: value}\n elif isinstance(items, str):\n items = salt.utils.yaml.safe_load(items)\n\n xml_items = \"\"\n for pair in items:\n xml_items += '<field id=\"{}\">{}</field>'.format(pair, items[pair])\n xml_items = \"<employee>{}</employee>\".format(xml_items)\n\n status, result = _query(\n action=\"employees\",\n command=emp_id,\n data=xml_items,\n method=\"POST\",\n )\n\n return show_employee(emp_id, \",\".join(items.keys()))", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def replace_employee(employees, old_employee, new_employee):\n #getting index of the old employee and saving it\n index = employees.index(old_employee)\n #deleting the old employee\n del employees[index] #yes, I remember about \"pop\" built-in function from the lecture, just like this one better :)\n #inserting the new employee to the position of the old one\n employees.insert(index, new_employee)", "def UpdateEmployee():\n staff = current_user\n form = UpdateEmployeeForm()\n if form.validate_on_submit():\n staff.first_name=form.first_name.data.lower()\n staff.last_name=form.last_name.data.lower()\n staff.email=form.email.data\n staff.location=form.location.data\n db.session.commit()\n flash(f'Employee Updated', category='Success')\n elif request.method == 'GET':\n form.first_name.data=staff.first_name.capitalize()\n form.last_name.data=staff.last_name.capitalize()\n form.email.data=staff.email\n form.role.choices=[staff.role]\n form.location.data=staff.location\n return render_template('update_employee.html', title=\"Update Employee\", form=form)", "def make_commissioned(self,salary,commission,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"3\"\n print(\"{}{}\".format(name,\" was successfully changed to be a commissioned employee\"))\n self.emp_dict[id][7] = salary\n self.emp_dict[id][9] = commission\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)", "def update_employee(cls, schema, uuid, employee_json):\n employee = cls.get_employee_by_uuid(uuid)\n if employee is None:\n raise ValueError('Invalid employee uuid')\n employee = schema.load(\n employee_json, session=db.session, instance=employee\n )\n db.session.add(employee)\n db.session.commit()\n return employee", "def update_data():\n pass", "def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def put(self, employee_id):\n\n employee = EmployeeModel.find_by_id(employee_id)\n if employee is None:\n return {'message': \"There is no employee with this ID, or your access_token is invalid.\"}, 404\n else:\n \"\"\" check if employee entered the building today\"\"\"\n if WorkdayModel.find_latest_workday(employee.id):\n \"\"\"checking if employee already entered building today\"\"\"\n last_workday = WorkdayModel.find_latest_workday(employee.id)\n\n if last_workday.time_in.day == datetime.today().day:\n last_workday.time_out = datetime.today()\n # calculate hours_worked| .time converts to H:M\n duration = last_workday.time_out - last_workday.time_in\n # duration is a datetime.timedelta\n duration = (datetime.min + duration).time()\n last_workday.hours_worked = duration\n try:\n last_workday.save_to_db()\n except:\n return {'message': 'An error occurred updating worked hours'}, 500\n\n return last_workday.json()\n\n return {'message': 'First use of card, or employee did not start work today'}, 200", "def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()", "def update(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data", "def write(self, vals):\n\n for record in self:\n employee_id = record.env['hr.employee'].browse(record.id)\n\n change_type = change_period = False\n\n if vals.get('contract_type'):\n change_type = True if vals['contract_type'] != employee_id.contract_type else False\n\n if vals.get('contract_period'):\n change_period = True if vals['contract_period'] != employee_id.contract_period else False\n\n if change_type or change_period:\n # _generate_nik parameter is vals\n new_vals = {\n 'company_id': record.company_id.id,\n # 'estate_id': record.estate_id.id, extend at estate module\n 'contract_type': vals['contract_type'] if 'contract_type' in vals else record.contract_type,\n 'contract_period': vals['contract_period'] if 'contract_period' in vals else record.contract_period,\n # 'nik_number': record.nik_number,\n 'internship': record.internship,\n 'outsource': record.outsource\n }\n\n vals['nik_number'] = self.generate_nik(new_vals)\n _logger.info(_('Employee %s has new Employee Identity Number %s: ' % (employee_id.name, vals['nik_number'])))\n return super(Employee, self).write(vals)", "def updateEMPStudyData(self, study_id, study_score, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_study_data', [study_id, study_score, web_app_user_id])", "def process_employees_salary(self, employees_info: List[List[str]]) -> None:\n pass", "def setData(self,newData):\r\n pass", "def edit_user(self, username, employee, role, status, change_pwd=False, *password):\n self.click(self.user_edit_save_btn)\n self.set_combox_value(role, self.user_role_select)\n self.input_text(employee, self.emp_name_input)\n self.input_text(username, self.user_name_input)\n self.set_combox_value(status, self.user_status_select)\n if change_pwd:\n self.click(self.change_password)\n self.input_text(password, self.user_password_input)\n self.input_text(password, self.user_confirm_password)\n self.click(self.user_edit_save_btn)\n self.wait_unit_el_present(self.user_table)\n Log.info(\"User is edited and saved.\")", "def profile_page(cls, employee_id, logger=None):\n if logger is None:\n logger = cls._logger\n\n database_connection = DatabaseConnection(f\"employees.csv\")\n table = database_connection.table\n employee = Employee(employee_id)\n\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n\n while True:\n\n choice = input(\n \"Please choose: \"\n \"(1) check data, \"\n \"(2) update first name, \"\n \"(3) update last name, \"\n \"(4) save changes, \"\n \"(5) exit without saving \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n elif choice=='1':\n view = table[(table['employee_id']==employee.get_employee_id())]\n logger.log(view)\n elif choice=='2':\n first_name = input(\"Enter your first name: \")\n employee.set_first_name(first_name)\n elif choice=='3':\n last_name = input(\"Enter your last name: \")\n employee.set_last_name(last_name)\n elif choice=='4':\n table[\n (table['employee_id']==employee.get_employee_id())\n ] = pd.Series(\n {'employee_id': employee.get_employee_id(),\n 'first_name': employee.get_first_name(),\n 'last_name': employee.get_last_name(),\n }\n )\n database_connection.overwrite()\n logger.log(\"Information saved!\")\n else:\n break", "def add_employee(self, emp):\n if emp not in self.employees: \n self.employees.append(emp)", "def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"tthelen@uos.de\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")" ]
[ "0.7302678", "0.718177", "0.7112663", "0.7027216", "0.7005247", "0.68823713", "0.6850561", "0.67905223", "0.6775437", "0.6432232", "0.6327525", "0.6219686", "0.61889017", "0.6159903", "0.61581737", "0.6071851", "0.60707927", "0.597288", "0.5945182", "0.58981615", "0.5867029", "0.5863868", "0.58178544", "0.5784019", "0.5756484", "0.57548106", "0.5739588", "0.5733408", "0.5715754", "0.57060283" ]
0.7703579
0
get all the projects of an employee IMPORTANT not all fields will be completed only the fields in the project table and that of the activeYears
def get_employeeProjects(self, id): from Project import Project cursor = self.dbconnect.get_cursor() cursor.execute('select project from projectpromotor where employee=%s', (id,)) projectsId = list() for row in cursor: projectsId.append(row[0]) projects = list() for projId in projectsId: cursor.execute('select * from project where projectID=%s', (projId,)) # returns exactly one row from the table row = cursor.fetchone() project = Project(row[0], row[1], row[2], row[3]) cursor.execute('select year from projectYearConnection where projectID=%s', (projId,)) years = list() for row in cursor: years.append(row[0]) project.activeYear = years projects.append(project) return projects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def get_projects():\n return Project.query.all()", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def get_projects(self, include_stats, is_active_val=None):\n\n # read all kinds of project info and computed counts from the db\n # into a pandas data frame\n projects_df = self._read_projects_df_from_db(\n include_stats=include_stats)\n\n # if an active value has been provided, look only at project records\n # that have that active value. NB this has to be a test against None,\n # not against \"false-ish\" (if not is_active_val)\n if is_active_val is not None:\n is_active_val_mask = projects_df[p.IS_ACTIVE_KEY] == is_active_val\n filtered_df = projects_df.loc[is_active_val_mask]\n projects_df = filtered_df\n\n if include_stats:\n # cut stats columns out into own df (w same index as projects one)\n stats_keys = p.get_computed_stats_keys()\n stats_df = projects_df[stats_keys].copy()\n projects_df = projects_df.drop(stats_keys, axis=1)\n\n # within computed stats columns (ONLY--does not apply to\n # descriptive columns from the project table, where None is\n # a real, non-numeric value), NaN and None (which pandas treats as\n # interchangeable :-| ) should be converted to zero. Everything\n # else should be cast to an integer; for some weird reason pandas\n # is pulling in counts as floats\n stats_df = stats_df.fillna(0).astype(int)\n\n stats_dict = stats_df.to_dict(orient='index')\n\n result = []\n # NB: *dataframe*'s to_dict automatically converts numpy data types\n # (e.g., numpy.bool_, numpy.int64) to appropriate python-native data\n # types, but *series* to_dict does NOT do this automatic conversion\n # (at least, as of this writing). Be cautious if refactoring the below\n projects_dict = projects_df.to_dict(orient='index')\n for k, v in projects_dict.items():\n if include_stats:\n v[p.COMPUTED_STATS_KEY] = stats_dict[k]\n result.append(p.Project.from_dict(v))\n\n return result", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def active_projects(self):\n return self.projects.filter(active=True)", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def test_get_projects_expanded(self):\n pass", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def projects(self):\r\n return p.Projects(self)", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def getProjectsQueryForEvalForOrgs(org_keys):\n query = getProjectsQueryForOrgs(org_keys)\n query.filter(\n 'status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return query", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects_of_user(self, user_id):\n res = self.conn.cursor().execute(\"\"\"SELECT * FROM projects p JOIN users_projects up \n ON p.id = up.project_id \n WHERE owner=? OR up.user_id=?\n GROUP BY p.id\n ORDER BY last_update DESC\"\"\", (user_id, user_id,))\n return res.fetchall()", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def get_reanalysis_projects_by_accession(self, accession):\n request_url = self.api_base_url + \"projects/reanalysis/\" + accession\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()" ]
[ "0.6504841", "0.6427155", "0.6327203", "0.6314313", "0.6288553", "0.62257266", "0.62068164", "0.6192607", "0.6165686", "0.6157085", "0.6130755", "0.61147606", "0.61008775", "0.60764706", "0.60260594", "0.60055494", "0.5962676", "0.5942284", "0.5927183", "0.58933854", "0.5890941", "0.58865345", "0.5879308", "0.5842099", "0.5814713", "0.58113974", "0.58000505", "0.5764027", "0.57557017", "0.5742416" ]
0.72116566
0
The Simple Moving Average (SMA) is calculated by adding the price of an instrument over a number of time periods and then dividing the sum by the number of time periods. The SMA is basically the average price of the given time period, with equal weighting given to the price of each period. Simple Moving Average SMA = ( Sum ( Price, n ) ) / n
def SimpleMovingAverage(self, timeperiod = 14): return ta.SMA(self.data.close,timeperiod)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SMA(serie, n):\r\n\r\n return serie.rolling(window=n).mean()", "def get_SMA(values, window=20):\n\treturn values.rolling(window, center=False).mean()", "def sma(matrix, interval):\n\n # declare empty SMA numpy array\n s = np.zeros((matrix.shape[0] - interval))\n\n # calculate the value of each point in the Simple Moving Average array\n for t in range(0, s.shape[0]):\n s[t] = np.sum(matrix[t:t + interval])/interval\n\n return s", "def sma(matrix, interval):\n\n # declare empty SMA numpy array\n s = np.zeros((matrix.shape[0] - interval))\n\n # calculate the value of each point in the Simple Moving Average array\n for t in range(0, s.shape[0]):\n s[t] = np.sum(matrix[t:t + interval])/interval\n\n return s", "def add_simple_moving_average(smas, n, data):\n total = sum([data[-1-i] for i in range(n)])\n smas.append(total/n)", "def SMA(self, n=PERIOD_7, **kwargs):\n\n prices = self.df.close\n\n sma = prices.rolling(n, min_periods=MIN_PERIOD).mean()\n\n self.df[\"sma_\" + str(n)] = sma\n\n return sma", "def get_sma(self,period):\n #df=pandas.DataFrame()\n sma=self.close.rolling(period).mean()\n return sma", "def sma(self) -> float:\n return self._sma", "def get_moving_average(close, span):\n i = SMAIndicator(close, window=span)\n return i.sma_indicator()", "def sma(self, normalize=False, window=20):\n adj_close = self.daily['Adj Close']\n if normalize: adj_close = self.normalize(adj_close)\n sma = adj_close.rolling(window).mean()\n return sma", "def SMA(df, base, target, period):\n\n df[target] = df[base].rolling(window=period).mean()\n df[target].fillna(0, inplace=True)\n\n return df", "def arima_sma(prices, signal, name):\n\n sma_window = signal['params']['sma_window']\n sma_close = talib.SMA(prices['close'], sma_window).to_numpy()[:, None]\n signal['data'] = arima(sma_close, signal['params']['arima_window'], name)", "def test_sma(self):\n periods = 200\n sma_qufilab = qufilab.sma(self.close, periods)\n sma_talib = talib.SMA(self.close, periods)\n np.testing.assert_allclose(sma_qufilab, sma_talib, rtol = self.tolerance)", "def sma(y, n):\n N = len(y) - n\n if n < 0:\n raise ValueError(\"Input doesn't contain enough data for moving average.\")\n\n out = [y[i:i+n].mean() for i in range(len(y) - n)]\n out = np.array(out)\n\n return out", "def get_sma(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.SMA(data)\n if result is None:\n raise IndicatorException\n return result", "def moving_average(data, beta):\n avg = 0\n maverages = []\n for i in range(len(data)):\n avg = avg * beta + (1 - beta) * data[i]\n maverages.append(avg / (1 - (beta ** (i + 1))))\n return maverages", "def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def ma(df, close_price_col_name=\"Close\", ma_col_name=\"MA\"):\r\n\r\n\t# Check N positive integer\r\n\twhile True:\r\n\r\n\t\tN = input(\"Please input period for moving average model (a positive integer (recommend: 10, 20, 50, 100, or 200 )): \")\r\n\r\n\t\ttry:\r\n\t\t\tif int(N) > 0:\r\n\t\t\t\tbreak\r\n\r\n\t\t\telif \".\" in N:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a float \")\r\n\t\t\t\tcontinue\r\n\r\n\t\t\telif int(N) < 0:\r\n\t\t\t\tprint(\"Please enter a positive integer, not a negative one \")\r\n\t\t\t\tcontinue\r\n\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"Please input a positive integer, not a string\")\r\n\t\t\tcontinue\r\n\r\n\t# Add column to store value of MA\r\n\tdf[ma_col_name] = df[close_price_col_name].rolling(window=int(N), min_periods=0).mean()\r\n\r\n\t# Plot\r\n\tplt.plot(df[close_price_col_name], label=\"Closing price\")\r\n\tplt.plot(df[ma_col_name], label=\"Moving average \" + N + \" days\")\r\n\tplt.title(\"Visualization of Moving Average \" + N + \" days\")\r\n\tplt.xlabel(\"Date\")\r\n\tplt.ylabel(\"Closing price\")\r\n\tplt.legend(loc='upper left')\r\n\tplt.show()\r\n\r\n\tdel df[ma_col_name] # delete the MA column for re-graphing\r", "def EMA_tick(n_periods, current_value, previous_ema):\n\n most_recent_weight = 2 / (n_periods + 1)\n return (current_value - previous_ema) * most_recent_weight + previous_ema", "def ema(s, n):\r\n\r\n ema = []\r\n j = 1\r\n\r\n #get n sma first and calculate the next n period ema\r\n sma = sum(s[:n]) / n\r\n multiplier = 2 / float(1 + n)\r\n ema.append(sma)\r\n\r\n #EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)\r\n ema.append(( (s[n] - sma) * multiplier) + sma)\r\n\r\n #now calculate the rest of the values\r\n for i in s[n+1:]:\r\n tmp = ( (i - ema[j]) * multiplier) + ema[j]\r\n j = j + 1\r\n ema.append(tmp)\r\n\r\n return ema", "def moving_average(self, a, n=3):\n ret = np.nancumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n", "def moving_average(data, period, type='simple'):\n\ttry:\n\t\tx = np.asarray(data['Adj Close'])\n\texcept:\n\t\tx = np.asarray(data)\n\n\tif type == 'simple':\n\t\tweights = np.ones(period)\n\telse:\n\t\tweights = np.exp(np.linspace(-1., 0., period))\n\n\tweights /= weights.sum()\n\n\ta = np.convolve(x, weights, mode='full')[:len(x)]\n\ta[:period] = a[period]\n\treturn a", "def moving_average(a, n=3) :\r\n a = a.ravel()\r\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\r\n ret = np.cumsum(a, dtype = float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n ret=ret[n - 1:] / n\r\n return ret", "def simple_moving_average(n, data):\n result = []\n for m in range(n-1, len(data)):\n total = sum([data[m-i] for i in range(n)])\n result.append(total/n)\n return result", "def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret", "def TAS(px, high, low, w=10, n=3):\r\n\r\n minn = low.rolling(window=w).min() # min de minimos\r\n maxx = high.rolling(window=w).max() # max de maximos\r\n\r\n k = 100 * (px - minn) / (maxx - minn)\r\n d = SMA(k, n)\r\n return k, d", "def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp", "def WMA(serie, n=10):\r\n wg = np.arange(1, n+1)\r\n wma = serie.rolling(n).apply(lambda x: np.dot(x, wg)/wg.sum(), raw=True)\r\n\r\n return wma", "def EMA(serie, n):\r\n\r\n ewm = serie.ewm(n, adjust=False).mean()\r\n ewm[0:n] = [np.nan]*n\r\n return ewm", "def SMA(A: pd.DataFrame, n) -> pd.DataFrame:\r\n At = pivot_table(A)\r\n for i in range(len(At.columns)):\r\n At.iloc[:, i] = talib.SMA(At.iloc[:, i], n)\r\n res = stack_table(At)\r\n return res" ]
[ "0.7370013", "0.73070514", "0.72123134", "0.72123134", "0.71889323", "0.71044785", "0.69947493", "0.6878451", "0.6826726", "0.67439926", "0.6694565", "0.6685296", "0.6657218", "0.6633062", "0.6631047", "0.6587248", "0.6556056", "0.6541206", "0.6492296", "0.64602447", "0.64139616", "0.64041024", "0.6383691", "0.6383059", "0.63778275", "0.63331485", "0.6326961", "0.63199466", "0.6312502", "0.63074315" ]
0.8110601
0
Average True Range Is a lagging indicator, used to provide insights into volatility.
def AverageTrueRange(self, timeperiod = 14): return ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_true_range(self, period=14):\n tr = self._true_range_computation(period=period * 2)\n return pd.Series(tr.rolling(center=False, window=period,\n min_periods=period - 1).mean(),\n name='{} day ATR Ticker: {}'.format(period,\n self.ticker)).tail(\n period)", "def should_average(self):\n return self._should_average", "def rollingAvg( lag, oldSet ):\r\n\r\n newSet = []\r\n\r\n # insert lag-1 number of nans at beginning of list\r\n for i in range(0, lag - 1):\r\n newSet.append(Decimal('nan'))\r\n\r\n # calculate new values for list\r\n for i in range((lag - 1), len(oldSet)):\r\n sum = 0\r\n for j in range(lag):\r\n sum += oldSet[i - j]\r\n\r\n avg = sum / Decimal(lag)\r\n newSet.append(Decimal(avg))\r\n\r\n return newSet", "def atr(df, lag, normalize=False):\n\n def _true_range(window):\n divisor = (1.0 * float(not normalize)) + ((float(normalize) * window[-1][\"c\"]))\n\n tr1 = window[-1][\"h\"] - window[-1][\"l\"]\n tr2 = window[-1][\"h\"] - window[-2][\"c\"]\n tr3 = window[-1][\"l\"] - window[-2][\"c\"]\n return max(tr1, tr2, tr3) / divisor\n\n def _sma(window):\n avg = round(reduce(lambda a, b: a + b, window) / len(window), 2)\n return avg\n\n tr = [_true_range(df[i : i + 2]) for i in range(len(df) - 1)]\n return [_sma(tr[i : i + lag + 1]) for i in range(len(tr) - lag)]", "def get_avg_range(range_array):\n # Average the ranges\n range_count = 0\n range_accum = 0.0\n\n if range_array:\n # Accumulate the data\n for beam in range(len(range_array)):\n if range_array[beam] > 0.0 and not Ensemble.is_bad_velocity(range_array[beam]):\n range_count += 1\n range_accum += range_array[beam]\n\n if range_count > 0:\n return range_accum / range_count\n else:\n return 0.0", "def AverageTrueRangeStopLoss(self, timeperiod = 14, multiplier = 2):\r\n stopLoss = ta.ATR(self.data.high, self.data.low, self.data.close, timeperiod)\r\n \r\n plus_dm = ta.PLUS_DM(self.data.high,self.data.low, timeperiod)\r\n minus_dm = ta.MINUS_DM(self.data.high,self.data.low, timeperiod)\r\n \r\n if plus_dm > minus_dm:\r\n stopLoss = self.data.close - multiplier * stopLoss\r\n else:\r\n stopLoss = self.data.close + multiplier * stopLoss\r\n \r\n\r\n stopLoss.dropna(inplace=True) \r\n \r\n return stopLoss", "def conditional_mean(self, gp):\n raise NotImplementedError", "def indicator_logic(self, candle):\n # Initialize variables\n sma, upper, lower = 2, -1.0, -1.0 # 'sma' = 2 is clever way to generate 'a favor' e 'contra'\n\n # Append close to moving average\n self.ma.append(candle.close[self.up])\n\n # Check if there are enough candles to calculate moving average\n if len(self.ma) == self.period:\n\n # Initialize upper and lower values for when there is a valid moving average\n upper, lower = 0.0, 0.0\n\n # Calculates moving average\n avg = sum(self.ma) / self.period\n\n # Tells if current close is above moving average\n sma = 1 if candle.close[self.up] > avg else 0\n\n # Calculates standard deviation\n std = pstdev(self.ma)\n\n # Calculates difference between current candle and moving average\n diff = candle.close[self.up] - avg\n\n # Transform difference to standard deviations\n if diff > 0 and std != 0:\n # Value of above\n upper = diff / std\n elif diff < 0 and std != 0:\n # Value if below\n lower = -diff / std\n\n # Returns values\n return sma, upper, lower", "def __call__(self, x):\n return np.mean(self.observations <= x)", "def average_age(self, start=1, end=None):\n picks = self.pick_set.filter(number__gte=start)\n if end is not None:\n picks = picks.filter(number__lte=end)\n\n dt = datetime.date(self.year, 1, 1)\n ages = [e.player.age(dt) for e in picks]\n ages = [e for e in ages if e]\n average = sum(ages) / len(ages)\n return average", "def step_change(data, span=10, lag=1):\n moving_average = data.ewm(span=span).mean()\n lagged = pd.Series(np.append(np.repeat(np.nan, lag), moving_average[:len(moving_average)-lag]))\n diffs = data[lag:] - lagged\n pct_diff = diffs/moving_average\n max_diff = max(pct_diff)\n mean_diff = np.mean(pct_diff)\n return moving_average, diffs, pct_diff, max_diff, avg_diff", "def average(self,start_window, end_window):\n query = f\"select avg(age) from `{self.table_id}` where timestamp between {start_window} and {end_window}\"\n query_job = self.client.query(query)\n return query_job.result", "def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm", "def slg_average(df,start_year,end_year,bat_met,player_name):\n base_fields = ['AB','HR','X3B','X2B','SLG']\n emp_list = check_base_fields(df,base_fields)\n\n if not emp_list:\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n return round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n\n else:\n df = original_dataframe(start_year,end_year,bat_met+emp_list,player_name)\n df['X1B'] = round(df['SLG']*df['AB'] - (4*df['HR'] + 3*df['X3B'] + 2*df['X2B']),0)\n SLG = round((df['X1B'].sum(axis = 0) + df['X2B'].sum(axis = 0) * 2 + df['X3B'].sum(axis = 0) * 3 + df['HR'].sum(axis = 0) * 4) / df['AB'].sum(axis = 0),3)\n del df['X1B']\n return SLG", "def mean_error_rate(y_true, y_interval):\n _check_interval_array(y_interval)\n\n wrong_intervals = ((y_true < y_interval[:, 0]) | (y_true > y_interval[:, 1])).sum()\n\n return wrong_intervals / y_true.shape[0]", "def average(self):\n return (self.current + self.last) / 2.0", "def _get_acc(logits: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n predictions = logits > 0\n target_bool = target > 0.5\n return (predictions == target_bool).float().mean()", "def mean_in_range(arr, args):\n mn = np.mean(arr)\n res = mn > args[0] and mn < args[1]\n return ct.Result(res, 'mean_in_range')", "def calc_meanadiff(sig):\n\n return np.mean(abs(np.diff(sig)))", "def load_average(self):\n return _favg(self.load_samples)", "def get_average_MAE(true_pred_df): \n age_group = true_pred_df.groupby('y_true')\n \n mae_average = []\n for age, age_data in age_group:\n mae_average.append(np.mean(age_data.mae))\n \n return mae_average", "def mean_vol(df):\n return df.tail(5)['volume'].mean(), df.tail(20)['volume'].mean()", "def moving_avg(df, key, lag):\n\n def _sma(key, window):\n values = list(map(lambda w: w[key], window))\n avg = round(reduce(lambda a, b: a + b, values) / len(values), 2)\n return avg\n\n return [_sma(key, df[i : i + lag + 1]) for i in range(len(df) - lag)]", "def soft_acc(self, y_true, y_pred):\n try:\n score= backend.mean(backend.abs(y_true - y_pred) <= self.prediction_tolerance)\n except Exception:\n pass\n return score", "def take_one_averaged(self):\n self.na.set_center_frequency(6.160574e9)\n self.na.set_span(10e6)\n self.na.set_power(-5, 1)\n self.na.set_ifbw(1e3)\n\n self.na.set_query_timeout(40e3)\n set_format = self.na.set_format('polar')\n print \"set_format returned: \", set_format\n self.na.set_trigger_source(\"manual\")\n self.na.set_averages(10)\n self.na.set_trigger_average_mode()\n\n self.na.clear_averages(channel=1)\n self.na.trigger_single(channel=1)\n fpts, xs, ys = self.na.read_data()\n #\n plt.figure()\n plt.plot(fpts, xs)\n plt.plot(fpts, ys)\n plt.show()", "def mean(vals):", "def test_avg_l(self):\n u_spec = leabra.UnitSpec(g_bar_e=0.3, g_bar_l=0.3, g_bar_i=1.0)\n u = leabra.Unit(spec=u_spec)\n\n for _ in range(20):\n u.add_excitatory(1.0)\n u.calculate_net_in()\n u.cycle('minus')\n\n self.assertEqual(u.avg_l, 0.40)\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(0.52, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 0.52 is the value of emergent\n\n for _ in range(100):\n u.spec.update_avg_l(u)\n self.assertTrue(np.allclose(1.64, u.avg_l, rtol=0.1, atol=0.1))\n #TODO: verify that 1.64 is the value of emergent", "def mean_average_position():\n pass", "def averaged_risk(self):\n return self._averaged_risk", "def averaged_risk(self):\n return self._averaged_risk" ]
[ "0.6600089", "0.6148675", "0.6051582", "0.59976155", "0.57290894", "0.5724756", "0.5678576", "0.56369007", "0.55565417", "0.5463608", "0.5455246", "0.5452944", "0.54398197", "0.5430366", "0.5412434", "0.53751826", "0.5327749", "0.5327182", "0.5326094", "0.52998155", "0.52757585", "0.52621", "0.5249347", "0.5246109", "0.5239227", "0.5238913", "0.52334976", "0.52284217", "0.52228266", "0.52228266" ]
0.6382062
1
Starting at the current column header, shift to the right col_shift times
def get_header(col_current, col_shift): header = col_current for i in range(col_shift): header = header.right return header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_column(self, coords, direction):\n self.shift_cells(self.get_column(coords, direction), direction)", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def shift_right(self):\n self.pointer = (self.pointer + 1) % len(self.data)", "def new_column( self, delta = 1, ):\n self.ix_row = 0\n self.ix_col += delta", "def draw_next_column(self):\n self.xPos += self.XCOLUMNSKIP + self.XCOLUMNSEP\n self.yPos = self.YORIGIN + Blender.Window.GetAreaSize()[1]", "def cols(self, col):\n self.col += col", "def col_data_mover_at(row, col):\n if col == 0:\n return NAME_SCHEME[\"memory move\"].format(prefix=f\"l{row}\")\n else:\n return NAME_SCHEME[\"register move right\"].format(pe=f\"pe_{row}_{col - 1}\")", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def shift(row):\r\n new_lst = []\r\n for i in range(4):\r\n if row[i] != 0:\r\n new_lst.append(row[i])\r\n if len(new_lst) < len(row):\r\n new_lst.extend([0] * (len(row) - len(new_lst)))\r\n row = new_lst\r\n\r\n return row", "def _modify_columns(self, cols, X, y=None):", "def _drag_col(self, event):\n x = self._dx + event.x # get dragged column new left x coordinate\n self._visual_drag.place_configure(x=x) # update column preview position\n # if one border of the dragged column is beyon the middle of the\n # neighboring column, swap them\n if (self._dragged_col_neighbor_widths[0] is not None and\n x < self._dragged_col_x - self._dragged_col_neighbor_widths[0] / 2):\n self._swap_columns('left')\n elif (self._dragged_col_neighbor_widths[1] is not None and\n x > self._dragged_col_x + self._dragged_col_neighbor_widths[1] / 2):\n self._swap_columns('right')\n # horizontal scrolling if the cursor reaches the side of the table\n if x < 0 and self.xview()[0] > 0:\n # scroll left and update dragged column x coordinate\n self.xview_scroll(-10, 'units')\n self._dragged_col_x += 10\n elif x + self._dragged_col_width / 2 > self.winfo_width() and self.xview()[1] < 1:\n # scroll right and update dragged column x coordinate\n self.xview_scroll(10, 'units')\n self._dragged_col_x -= 10", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def _swapcolumns(self):\n return self.reindex_axis([self.columns[1], self.columns[0]], axis=1)", "def rel_shift(x, klen=-1):\n x_size = x.shape\n\n x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])\n x = x[1:, ...]\n x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])\n # x = x[:, 0:klen, :, :]\n x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))\n\n return x", "def __rshift__(self, other: Any) -> ColumnOperators:\n return self.operate(rshift, other)", "def _shift(BD):\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD", "def shift_column(code, n, s):\n def shift(s, n):\n if n == 0 or len(s) == 1:\n return s\n else:\n return shift(s[-1] + s[:-1], n-1)\n\n if type(code) is not list:\n return code\n else:\n n = int(n)\n s = int(s) % len(code)\n if s > 0 and n < len(code[0]):\n column = select_column(code, n)\n column = shift(column, s)\n for i in range(0, len(column)):\n new = list(code[i])\n new[n] = column[i]\n code[i] = ''.join(new)\n return code\n else:\n return code", "def rshift(self):\n self.lcd_byte(0x1C, LCD_CMD)", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def appforth(df, line):\n df.loc[-1]=line\n df.index = df.index + 1 # shifting index\n df = df.sort_index() # sorting by index\n return df", "def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])", "def resize_last_column(self):\n\n total_width = 0\n last_width = 0\n last_column = -1\n for i in range(0, self.column_count):\n w = self.GetColumnWidth(i)\n total_width += w\n last_width = w\n if w > 0:\n last_column = i\n\n if total_width < (self.GetSize()[0] - 20) and last_column > -1:\n self.SetColumnWidth(last_column, last_width + self.GetSize()[0] - total_width)", "def shift(self, days):\n # Since to predict close price of day n we need the indicators\n # of day n-1 we move the above columns days_forward to the bottom\n self.df['MA'] = self.df['MA'].shift(days)\n self.df['WMA'] = self.df['WMA'].shift(days)\n self.df['MOM'] = self.df['MOM'].shift(days)\n self.df['STOCH'] = self.df['STOCH'].shift(days)\n self.df['STOCHD'] = self.df['STOCHD'].shift(days)\n self.df['MACD'] = self.df['MACD'].shift(days)\n self.df['WILLIAMS'] = self.df['WILLIAMS'].shift(days)\n self.df['ADL'] = self.df['ADL'].shift(days)\n self.df['CCI'] = self.df['CCI'].shift(days)\n if 'sent_trends' in self.df.columns:\n self.df['sent_trends'] = self.df['sent_trends'].shift(days)\n\n # Drop rows with nan\n self.df.dropna(inplace=True)", "def add_end_caps(self):\n\n # Far top dummy row (first row above array is NOT flipped if even number of rows)\n flip_dummy = (self.row_size + self.rbl[1]) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, self.rbl[1] + flip_dummy) + self.bitcell_array_inst.ul()\n self.dummy_row_insts[1].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n\n # Far bottom dummy row (first row below array IS flipped)\n flip_dummy = (self.rbl[0] + 1) % 2\n dummy_row_offset = self.bitcell_offset.scale(0, -self.rbl[0] - 1 + flip_dummy) + self.unused_offset\n self.dummy_row_insts[0].place(offset=dummy_row_offset,\n mirror=\"MX\" if flip_dummy else \"R0\")\n # Far left dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(-len(self.left_rbl) - 1, -self.rbl[0] - 1) + self.unused_offset\n self.dummy_col_insts[0].place(offset=dummy_col_offset)\n\n # Far right dummy col\n # Shifted down by the number of left RBLs even if we aren't adding replica column to this bitcell array\n dummy_col_offset = self.bitcell_offset.scale(len(self.right_rbl), -self.rbl[0] - 1) + self.bitcell_array_inst.lr()\n self.dummy_col_insts[1].place(offset=dummy_col_offset)", "def adjustFrame(frame, shifts):\n if min(shifts)<0:\n botShifts = [colShift-min(shifts) for colShift in shifts]\n else:\n botShifts = [colShift for colShift in shifts]\n topShifts = [max(botShifts)-shift for shift in botShifts]\n newFrame=np.empty([frame.shape[1],frame.shape[0]+max(botShifts)])\n for i, col in enumerate(frame.T):\n newCol = np.concatenate((np.zeros(topShifts[i]),col,np.zeros(botShifts[i])))\n newFrame[i]=newCol\n newFrame=newFrame.T\n \n return newFrame", "def adjustFrame(frame, shifts):\n if min(shifts)<0:\n botShifts = [colShift-min(shifts) for colShift in shifts]\n else:\n botShifts = [colShift for colShift in shifts]\n topShifts = [max(botShifts)-shift for shift in botShifts]\n newFrame=np.empty([frame.shape[1],frame.shape[0]+max(botShifts)])\n for i, col in enumerate(frame.T):\n newCol = np.concatenate((np.zeros(topShifts[i]),col,np.zeros(botShifts[i])))\n newFrame[i]=newCol\n newFrame=newFrame.T\n \n return newFrame", "def shift(self, col_shift=0, row_shift=0):\n\n if (self.min_col + col_shift <= 0\n or self.min_row + row_shift <= 0):\n raise ValueError(\"Invalid shift value: col_shift={0}, row_shift={1}\".format(col_shift, row_shift))\n self.min_col += col_shift\n self.min_row += row_shift\n self.max_col += col_shift\n self.max_row += row_shift", "def width(self, width):\n self.col += width", "def __lshift__(self, other):\n for c in self.__table__.columns:\n self.__setattr__(c.name, other.__getattribute__(c.name))", "def __lshift__(self,fpath):\n raise NotImplemented" ]
[ "0.6121716", "0.59776706", "0.5851196", "0.5847812", "0.57968843", "0.5760453", "0.566838", "0.5658573", "0.558538", "0.5567467", "0.5556038", "0.5546835", "0.55436087", "0.5541556", "0.5532854", "0.5511011", "0.5500459", "0.5493521", "0.5485904", "0.54635084", "0.5462295", "0.54582185", "0.54580015", "0.5441772", "0.5436838", "0.5436838", "0.5415506", "0.54096705", "0.54043746", "0.5399213" ]
0.69813544
0
Remove the specified column header from the header chain All rows that appear in this column are also removed
def remove_col(self, col_header): # Remove the column header from the header chain col_header.right.left = col_header.left col_header.left.right = col_header.right # Loop down through the column and remove the rows cell = col_header.down while cell != col_header: row_cell = cell.right # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell.up row_cell.up.down = row_cell.down row_cell.header.sum -= 1 # Move on to the next cell in the row row_cell = row_cell.right # Move on to the next row cell = cell.down
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unremove_col(self, col_header):\n # Add the column head back into the chain\n col_header.right.left = col_header\n col_header.left.right = col_header\n # Loop up through the column and add the rows back in\n # Doing this in exactly the reverse order of the removing ensures that we return\n # to the state we were in before the removal\n cell = col_header.up\n while cell != col_header:\n row_cell = cell.left\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell\n row_cell.up.down = row_cell\n row_cell.header.sum += 1\n # Move on to the next cell in the row\n row_cell = row_cell.left\n # Move on to the next row\n cell = cell.up", "def RemoveColumn(self, column):\r\n\r\n self._header_win.RemoveColumn(column)\r\n self._header_win.Refresh()", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1", "def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True", "def remove_header( self, *names ):\n for name in names:\n del self[ name.strip() ]", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def RemoveColumn(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n self._total_col_width -= self._columns[column].GetWidth()\r\n self._columns.pop(column)\r\n self._owner.AdjustMyScrollbars()\r\n self._owner._dirty = True", "def _delcolumns(self, columnname, columndata=\"\"):\n\n del self[columnname]", "def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)", "def removekwd(header, kwd):\n if kwd in header.keys():\n header.remove(kwd)\n return", "def clear_header(self):\n\n if self.terminate:\n return\n\n self.windows['HEADER'].erase()\n # if not self.active_portfolio:\n self.windows['HEADER'].addstr(0, 0, 'Portfolio: None')", "def remove_column(self, name):\n if name not in self.column_names():\n raise KeyError('Cannot find column %s' % name)\n self.__is_dirty__ = True\n try:\n with cython_context():\n if self._is_vertex_frame():\n assert name != '__id', 'Cannot remove \\\"__id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)\n self.__graph__.__proxy__ = graph_proxy\n elif self._is_edge_frame():\n assert name != '__src_id', 'Cannot remove \\\"__src_id\\\" column'\n assert name != '__dst_id', 'Cannot remove \\\"__dst_id\\\" column'\n graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)\n self.__graph__.__proxy__ = graph_proxy\n except:\n self.__is_dirty__ = False\n raise", "def deleteColumn(self, column):\n if (column >= self._width or column <= -self._width):\n raise IndexError('Invalid index, row %d does not exist' % column)\n returnvalue = list()\n self._width -= 1\n for row in self._value:\n returnvalue.append(row.pop(column))\n return returnvalue", "def remove_head_line(self, gtfs_file, path):\n out_list = []\n header = GtfsHeader.return_header(self, gtfs_file).strip()\n in_file = os.path.join(os.path.expanduser(path), '{}.tmp'.format(gtfs_file))\n\n lines = open(in_file).readlines()\n cnt = 0\n for line in lines:\n if header in line:\n cnt += 1\n print('>>> Found header {} in {}.'.format(cnt, gtfs_file))\n lines.remove(line)\n # out_list.append(header.strip())\n\n for line in lines:\n out_list.append(line.strip())\n out_file = in_file\n\n f = open(out_file, 'w')\n for line in out_list:\n f.write('{}\\n'.format(line.strip()))\n f.close()", "def remove_attr(self, key):\n del self.header[key]", "def remove_header(self, name, value=None):\r\n\r\n found_it = 0\r\n\r\n # Remove things from the old dict as well\r\n if (name in self.reply_headers and\r\n (value is None or\r\n self.reply_headers[name] == value)):\r\n del self.reply_headers[name]\r\n found_it = 1\r\n\r\n\r\n removed_headers = []\r\n if not value is None:\r\n if (name, value) in self.__reply_header_list:\r\n removed_headers = [(name, value)]\r\n found_it = 1\r\n else:\r\n for h in self.__reply_header_list:\r\n if h[0] == name:\r\n removed_headers.append(h)\r\n found_it = 1\r\n\r\n if not found_it:\r\n if value is None:\r\n search_value = \"%s\" % name\r\n else:\r\n search_value = \"%s: %s\" % (name, value)\r\n\r\n raise LookupError(\"Header '%s' not found\" % search_value)\r\n\r\n for h in removed_headers:\r\n self.__reply_header_list.remove(h)", "def delcolumn(self, column, accept_small_names=True):\n if column in self.keys():\n self[column] = \"\"\n return\n elif accept_small_names:\n if self[\"__psvcolumnstracker__\"].get(column):\n self.__delattr__(column)\n return\n if not accept_small_names:\n raise ValueError(\"'{}'\".format(column))\n else:\n raise ValueError(\"'{}'. Make sure the shorterned columns name have no collisions\".format(column))", "def delete_headers(self, ):\n if self.AttributeNames.HEADERS in self.attrs:\n del self.attrs[self.AttributeNames.HEADERS]\n return self", "def remove_column(df,col_name):\n return df.drop(col_name)", "def remove_columns ( infilename, outfilename, cols_to_remove ):\n xcols = cols_to_remove\n xcols.sort()\n xcols.reverse()\n \n reader = csv.reader( open( infilename, 'rt' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer = csv.writer( open( outfilename, 'wb' ), quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n for row in reader:\n vals = row\n for x in xcols :\n vals.pop( x )\n writer.writerow( vals )", "def _clean(header):\n # TODO: find a way to identify cubes containing time\n header['ctype1'] = 'HPLN-TAN' # Helioprojective longitude, TAN projection\n header['ctype2'] = 'HPLT-TAN' # Helioprojective latitude, TAN projection\n header['ctype3'] = 'WAVE ' # Wavelength axis, default (TAB) projection\n header['naxis'] = 3\n return header", "def depart_thead(self, node):\n self.Table.add_header_line(\"|\")", "def del_header(self, name):\n key = name.upper()\n if key not in _RESPONSE_HEADER_DICT:\n key = name\n if key in self._headers:\n del self._headers[key]", "def __delitem__(self, key):\n\n del self._headers[key.lower()]", "def removeRow(self, index: int) -> None:\n ...", "def test_remove_columns(self):\n table = Table('table1', key=['col1', 'col2'])[\n Column('col1'),\n Column('col2'),\n Column('col3'),\n Column('col4'),\n ]\n\n table.remove_columns(('col2', 'col3'))\n\n self.assertEqual(2, len(table.columns))\n self.assertEqual('col1', table.columns[0].name)\n self.assertEqual('col4', table.columns[1].name)\n self.assertEqual([], table.key)", "def remove_row(self, row_id):", "def del_header_value(old_rmap, new_rmap, key):\n mapping = rmap.load_mapping(old_rmap)\n del mapping.header[key]\n mapping.write(new_rmap)" ]
[ "0.7807759", "0.7441563", "0.71072763", "0.67735565", "0.6634205", "0.6594297", "0.6533182", "0.6480123", "0.6172724", "0.6143548", "0.6119587", "0.6100099", "0.60919523", "0.6055784", "0.60248214", "0.60123044", "0.60102904", "0.5948839", "0.5927248", "0.5872746", "0.5860796", "0.5850218", "0.58420604", "0.5832519", "0.5811461", "0.57704747", "0.5750552", "0.5731311", "0.5705728", "0.5679308" ]
0.80928975
0
Adds the specified column header back into the header chain Also adds all rows that this column removed back in
def unremove_col(self, col_header): # Add the column head back into the chain col_header.right.left = col_header col_header.left.right = col_header # Loop up through the column and add the rows back in # Doing this in exactly the reverse order of the removing ensures that we return # to the state we were in before the removal cell = col_header.up while cell != col_header: row_cell = cell.left # Move through all cells in this row and update their up/down links while row_cell != cell: row_cell.down.up = row_cell row_cell.up.down = row_cell row_cell.header.sum += 1 # Move on to the next cell in the row row_cell = row_cell.left # Move on to the next row cell = cell.up
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_col(self, col_header):\n # Remove the column header from the header chain\n col_header.right.left = col_header.left\n col_header.left.right = col_header.right\n # Loop down through the column and remove the rows\n cell = col_header.down\n while cell != col_header:\n row_cell = cell.right\n # Move through all cells in this row and update their up/down links\n while row_cell != cell:\n row_cell.down.up = row_cell.up\n row_cell.up.down = row_cell.down\n row_cell.header.sum -= 1\n # Move on to the next cell in the row\n row_cell = row_cell.right\n # Move on to the next row\n cell = cell.down", "def _reset_header(self):\n new_header = []\n for col_name in self.header:\n is_left = self.left_cols.get(col_name)\n if is_left:\n new_header.append(col_name)\n self.header = new_header", "def RemoveColumn(self, column):\r\n\r\n self._header_win.RemoveColumn(column)\r\n self._header_win.Refresh()", "def add_header(self, *column_headers):\n raise NotImplementedError", "def add_header(self, *column_headers):\n header = \"| \"\n header += \" | \".join(column_headers)\n header += \" |\\n\"\n header += '|'\n header += \"|\".join(\"-\" * (len(header) + 2) for header in column_headers)\n header += \"|\\n\"\n self.col_widths = [len(header) for header in column_headers]\n self.result += header", "def add_header(self, *column_headers):\n header = \"<tr>\"\n header += \" \".join(f\"<th>{header}</th> \" for header in column_headers)\n header += \"</tr>\\n\"\n self.result += header", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def depart_thead(self, node):\n self.Table.add_header_line(\"|\")", "def headers_processor(headers):\n def apply_headers(row_set, row):\n _row = []\n pairs = izip_longest(row, headers)\n for i, (cell, header) in enumerate(pairs):\n if cell is None:\n cell = Cell(None)\n cell.column = header\n if not cell.column:\n cell.column = \"column_%d\" % i\n cell.column_autogenerated = True\n _row.append(cell)\n return _row\n return apply_headers", "def set_column_headers(self, headers):\n if isinstance(self.columns.idx[0], int):\n self.data = [sorted(headers)] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment\n\n elif isinstance(self.columns.idx[0], str):\n datum = {}\n for i, key in enumerate(self.columns.idx):\n datum.update({key: headers[i]})\n self.data = [datum] + self.data\n\n increment = [i + 1 for i in self.rows.idx]\n self.rows.idx = [0] + increment", "def addcolumn(self, column):\n if column not in self.headersindex:\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"ALTER TABLE \\'%s\\' ADD COLUMN %s\" % (self.name, column.to_declaration()))", "def header_data_columns(head_line, data_cols, header):\n\n colnames = head_line.split(\",\")\n\n # Remove triling blancks and end of lines\n colnames = [x.strip() for x in colnames]\n\n # Difference between columns in the header and in the data\n diff = len(data_cols) - len(colnames)\n\n if diff > 0:\n # Add dum headers\n dums = \"\"\n for idiff in range(diff):\n dums = dums + \",dum\" + str(idiff)\n\n new_head = str(head_line.rstrip()) + dums + \" \\n\"\n header.append(new_head)\n\n elif diff < 0:\n sys.exit(\n \"STOP novonix_clean.header_data_columns \\n\"\n + \"REASON less data columns than header names \\n\"\n )\n else:\n header.append(head_line)\n\n return", "def header_clean_row(row_of_data):\n header = row_of_data.get('header')[1]\n z = list(set(remove_filler_words([header])))\n return z", "def _remove_column(self, column: str) -> None:\n dtype, loc, order = self._column_info.pop(column).values\n self._data[dtype] = np.delete(self._data[dtype], loc, axis=1)\n if self._data[dtype].shape[1] == 0:\n del self._data[dtype]\n\n for col, col_obj in self._column_info.items():\n if col_obj.dtype == dtype and col_obj.loc > loc:\n col_obj.loc -= 1", "def _augment_filter(self, header):\n return header", "def add_headers(dataset, headers):\n dataset.columns = headers\n return dataset", "def add_headers(dataset, headers):\n dataset.columns = headers\n return dataset", "def _configure_bintable_header(new_header, table_headers):\n\n # Using a single header to get the column descriptions\n column_info = {}\n for kwd in table_headers[0]:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = table_headers[0][kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n cards = []\n for att in ['TTYPE', 'TFORM', 'TUNIT', 'TDISP', 'TDIM']:\n try:\n cards.append(table_headers[0].cards[att+num])\n except KeyError:\n pass # if we don't have info for this keyword, just skip it\n \n column_info[colname] = (num, cards)\n\n # Adding column descriptions and additional info\n for kwd in new_header:\n if \"TTYPE\" not in kwd:\n continue\n \n colname = new_header[kwd]\n num = kwd.replace(\"TTYPE\", \"\")\n \n info_row = column_info.get(colname)\n if not info_row:\n new_header.comments[kwd] = 'column name'\n new_header.comments[kwd.replace(\"TTYPE\", \"TFORM\")] = 'column format'\n continue\n \n info_num = info_row[0]\n cards = info_row[1]\n \n for key, val, desc in cards:\n key_new = key.replace(info_num, num)\n try:\n ext_card = new_header.cards[key_new]\n \n if ext_card[1]:\n val = ext_card[1]\n if ext_card[2]:\n desc = ext_card[2]\n \n new_header[key_new] = (val, desc)\n except KeyError: # card does not already exist, just add new one\n new_header.set(key_new, val, desc, after=kwd)\n\n # Adding any additional keywords from the original cutout headers\n shared_keywords = _combine_headers(table_headers, constant_only=True)\n for kwd in shared_keywords:\n if kwd in new_header: # Don't overwrite anything already there\n continue\n\n if any(x in kwd for x in [\"WCA\", \"WCS\", \"CTY\", \"CRP\", \"CRV\", \"CUN\",\n \"CDL\", \"11PC\", \"12PC\", \"21PC\", \"22PC\"]): # Skipping column WCS keywords\n continue\n\n new_header.append(shared_keywords.cards[kwd])", "def _modify_columns(self, cols, X, y=None):", "def writeheader(writer):\n writer.writerow(dict((fn, fn) for fn in writer.fieldnames))", "def _change_header(self, add=False):\n if self.data['history_file'] is None:\n return\n good_heading = self.data['history_header'] % self.data\n # ^^^ history_header is a string with %(abc)s replacements.\n headings = self.data['headings']\n history_lines = self.data['history_lines']\n previous = ''\n underline_char = '-'\n empty = False\n if not history_lines:\n # Remember that we were empty to start with.\n empty = True\n # prepare header line\n history_lines.append('')\n if len(history_lines) <= 1:\n # prepare underline\n history_lines.append(underline_char)\n if not headings:\n # Mock a heading\n headings = [{'line': 0}]\n inject_location = 0\n first = headings[0]\n inject_location = first['line']\n underline_line = first['line'] + 1\n try:\n underline_char = history_lines[underline_line][0]\n except IndexError:\n logger.debug(\"No character on line below header.\")\n underline_char = '-'\n previous = history_lines[inject_location]\n if add:\n inject = [\n good_heading,\n underline_char * len(good_heading),\n '',\n self.data['nothing_changed_yet'],\n '',\n '',\n ]\n if empty:\n history_lines = []\n history_lines[inject_location:inject_location] = inject\n else:\n # edit current line\n history_lines[inject_location] = good_heading\n logger.debug(\"Set heading from %r to %r.\", previous, good_heading)\n history_lines[underline_line] = utils.fix_rst_heading(\n heading=good_heading,\n below=history_lines[underline_line])\n logger.debug(\"Set line below heading to %r\",\n history_lines[underline_line])\n # Setting history_lines is not needed, except when we have replaced the\n # original instead of changing it. So just set it.\n self.data['history_lines'] = history_lines", "def add_headers(headers, out):\r\n out.write(common.to_csv_line(headers, \"efficient\"))", "def delColumn(self,column):\n data = self.data\n for rowData in data.values():\n if column in rowData:\n del rowData[column]\n self.hasChanged = True", "def pop_header_name(\n row: list[Hashable], index_col: int | Sequence[int]\n) -> tuple[Hashable | None, list[Hashable]]:\n # Pop out header name and fill w/blank.\n if is_list_like(index_col):\n assert isinstance(index_col, Iterable)\n i = max(index_col)\n else:\n assert not isinstance(index_col, Iterable)\n i = index_col\n\n header_name = row[i]\n header_name = None if header_name == \"\" else header_name\n\n return header_name, row[:i] + [\"\"] + row[i + 1 :]", "def fix_header(infile, outfile, colnum):\n\n with open(infile, mode='r') as fid:\n colnum -= 1 # adj. colnum to account for zero-based indexing\n cread = csv.reader(fid)\n ctr = 0\n\n with open(outfile, mode='w') as new_file:\n cwrite = csv.writer(new_file)\n\n for row in cread:\n if ctr==0:\n # we're in the header\n outrow = row[:colnum] + [stamp2iso(elem) for elem in row[colnum:]]\n ctr += 1\n else:\n outrow = row\n cwrite.writerow(outrow)", "def _postprocess_name_columns(\n table: pyarrow.Table, has_header: bool, settings: Settings\n) -> Tuple[pyarrow.Table, List[I18nMessage]]:\n if has_header and table.num_rows > 0:\n names, warnings = gen_unique_clean_colnames_and_warn(\n list((c[0].as_py() if c[0].is_valid else \"\") for c in table.columns),\n settings=settings,\n )\n\n # Remove header (zero-copy: builds new pa.Table with same backing data)\n table = table.slice(1)\n else:\n names = [f\"Column {i + 1}\" for i in range(len(table.columns))]\n warnings = []\n\n return (\n pyarrow.table(dict(zip(names, table.columns))),\n warnings,\n )", "def rollback(self) -> None:\n for k in self._moved_cols:\n self._cols[k].move_back()", "def parseColHeader(self, i, j) :\n cell_content = self.processString(self.source_cell.value)\n if self.isEmpty(i,j):\n if self.insideMergeBox(i,j):\n k, l = self.getMergeBoxCoord(i,j)\n \n # If we are in a vertical merge box, skip adding the dimension\n if l == j:\n return\n\n # Update cell content \n cell_content = self.processString(self.r_sheet.cell(k,l).value)\n else:\n return\n\n # Add the value qname to the column_dimensions list for that column\n self.column_dimensions.setdefault(j,[self.sheet_qname]).append(cell_content)\n \n # Add the data to the graph\n resource = self.getColHeaderValueURI(self.column_dimensions[j])\n self.graph.add((resource, RDF.type, self.namespaces['tablink']['ColumnHeader']))\n self.graph.add((resource, self.namespaces['skos']['prefLabel'], Literal(cell_content)))\n self.graph.add((resource, self.namespaces['tablink']['cell'], Literal(self.source_cell_name)))\n return", "def addheader(datasets):\n header = get_header()\n for i in range(0, len(datasets)):\n datasets[i].columns = header\n return datasets", "def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)" ]
[ "0.71142775", "0.6889686", "0.65644413", "0.6336859", "0.608312", "0.60671866", "0.6046899", "0.6015363", "0.5942229", "0.58050436", "0.5728618", "0.57080615", "0.5686853", "0.5644749", "0.55979675", "0.5594624", "0.5594624", "0.5585846", "0.5558042", "0.5540971", "0.55384934", "0.54837596", "0.5465062", "0.5460001", "0.5458509", "0.5451826", "0.5445517", "0.541178", "0.5411285", "0.5401584" ]
0.73550874
0
Find the column that has the minimum number of cells in it to minimize branching Returning a column with 0 cells in it is ok this gets dealt with in the solving loop
def get_minimum_column(self): min_col = self.root.right current_col = min_col.right while current_col != self.root: if current_col.sum < min_col.sum: min_col = current_col # Move on to the next column current_col = current_col.right return min_col
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_reduce_nb(col, a, *args):\n return np.nanmin(a)", "def find_smallest(self):\n # add max value to covered rows and columns to ignore the covered cells\n maxval = self.C.max()\n C = self.C + self.row_cover[:, np.newaxis]*maxval\n C += self.col_cover*maxval\n # return the smallest value\n return C.min()", "def minimize(self, grid):\n self.deep += 1\n cells = grid.getAvailableCells()\n if cells == [] or self.deep > self.maxDeep:\n self.deep -= 1\n return self.evaluate(grid)\n\n ab_value = MiniMaxAlgorithm.infinity\n for cell in cells:\n for cell_value in self.possibleNewTiles:\n next_grid = grid.clone()\n next_grid.setCellValue(cell, cell_value)\n next_value = self.maximize(next_grid)\n ab_value = min(ab_value, next_value)\n if ab_value <= next_value:\n self.deep -= 1\n return ab_value\n\n self.deep -= 1\n return ab_value", "def cell_cost(row_count, col_count):\r\n while row_count < n_rows:\r\n if col_count >= n_cols:\r\n row_count += 1\r\n col_count = 0\r\n else:\r\n cost = grid[row_count][col_count]\r\n if row_count != 0:\r\n values = []\r\n for i in range(-1, 2):\r\n if col_count + i > -1 and col_count + i < n_cols:\r\n values.append(grid[row_count - 1][col_count + i])\r\n cost += min(values)\r\n grid[row_count][col_count] = cost\r\n col_count += 1", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def find_first_free_cell(board, picked_column):\n for row in reversed(range(len(board))):\n if board[row][picked_column] == 0:\n return row", "def get_smallest_h_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n return min(node_list, key=lambda x: x.h_cost)", "def minimum_f_cell(self):\n return sorted(self.open_cells,key = lambda cell: cell.f)[0]", "def min_number(self, rows: List[Row], column: NumberColumn) -> Number:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return 0.0 # type: ignore\n if not all([isinstance(value, Number) for value in cell_values]):\n raise ExecutionError(f\"Invalid values for number selection function: {cell_values}\")\n return min(cell_values) # type: ignore", "def fit(self, col):\n self.find_max_min(col)", "def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def get_min(self):\n min_value= self.df[self.col_name].min()\n return min_value", "def get_nearest_col(self):\n return (self.rect.left - (self.screen.get_width() // 5)) // self.maze.block_size", "def cell_cost(row, col):\r\n if row < 0 or row >= n_rows or col < 0 or col >= n_cols:\r\n return INFINITY # Off grid cells are treated as infinities\r\n elif cashe[row][col] is None:\r\n cost = grid[row][col]\r\n if row != 0:\r\n doom = [cell_cost(row - 1, col + delta_col) for delta_col in range(-1, 2)]\r\n cost += min(doom)\r\n cashe[row][col] = cost\r\n return cashe[row][col]\r\n else:\r\n return cashe[row][col]", "def get_column_with_min_value(data):\n if not isinstance(data, pd.DataFrame):\n raise TypeError('Invalid input type: type(data) = {}'.format(type(data)))\n min_col_name = pd.Series(index=data.index)\n for idx, row in data.iterrows():\n min_col_name[idx] = row.argmin()\n return min_col_name", "def customMin(x,mergedSegments, minValidData = 0.8):\n if mergedSegments.loc[x].nonNullProp >= minValidData : \n return np.inf\n\n idx = min(criteriaMatrix.get(x),\n key=lambda y : np.inf if y not in inversedIndex.values\n else criteriaMatrix.get(x).get(y)\n )\n return np.inf if idx not in inversedIndex.values else criteriaMatrix.get(x).get(idx)", "def get_smallest_f_cost_unvisited_node(self):\n node_list = []\n for column in self.grid:\n for node in column:\n if node.pos in self.unvisited_pos:\n node_list.append(node)\n min_f_cost_node = min(node_list, key=lambda x: x.g_cost)\n min_f_cost_list = []\n for column in self.grid:\n for node in column:\n if (\n node.f_cost == min_f_cost_node.f_cost\n and node.pos in self.unvisited_pos\n ):\n min_f_cost_list.append(node)\n return min_f_cost_node, len(min_f_cost_list)", "def find_job_smallest_colset():\r\n smallest_colset_value = None\r\n smallest_colset_key = \"\"\r\n smallest_colset_length = 99999\r\n\r\n # iterate over all tasks and find smallest\r\n for key in r.scan_iter():\r\n value = r.get(key).decode(\"utf-8\")\r\n task = json.loads(value)\r\n colset_length = len(task[\"columns\"])\r\n\r\n if colset_length < smallest_colset_length:\r\n smallest_colset_value = task\r\n smallest_colset_key = key\r\n smallest_colset_length = colset_length\r\n\r\n return smallest_colset_value", "def argmin_reduce_nb(col, a, *args):\n a = np.copy(a)\n mask = np.isnan(a)\n if np.all(mask):\n raise ValueError(\"All-NaN slice encountered\")\n a[mask] = np.inf\n return np.argmin(a)", "def known_mines(self):\n \n if len(self.cells) == self.count:\n return self.cells", "def minimum_spanning_arborescence(sol):", "def minim(self) -> int:\n\t\treturn 2", "def excel_min_col(self, sheet_name):\n return self.wb[sheet_name].min_column", "def expanding_min_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_min_1d_nb(a[:, col], minp=minp)\n return out", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def find_min_distance():\n return np.argmin(d)", "def localmin(x):\r\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1", "def get_min_filled_threshold(df):\n percentage = 0.1\n return df.shape[0] * percentage", "def localmin(x):\n return (np.diff(np.sign(np.diff(x))) > 0).nonzero()[0] + 1" ]
[ "0.6645709", "0.6558186", "0.64990723", "0.6368767", "0.63344336", "0.6301566", "0.6237791", "0.6110719", "0.60172206", "0.60031587", "0.59943664", "0.59906703", "0.5983194", "0.59770536", "0.5949309", "0.5948226", "0.59324056", "0.592747", "0.5884803", "0.58845806", "0.58685064", "0.58682936", "0.58529323", "0.5841027", "0.5836812", "0.5836258", "0.5804371", "0.5783265", "0.5777008", "0.5755377" ]
0.69948006
0
This method swaps out the numpy instance in the module, should it have one, to the one in the fake instance we have here.
def _swap_numpy(self, module): # Check to make sure this is not one of the string options from the YAML if not isinstance(module, str): if hasattr(module, 'numpy'): # Check if it has a self.numpy object # TODO: Replace this with the correct variable module.numpy = self.fake.numpy # Swap out with the class's instance of numpy return module # Return out the mutated module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def test_inplace_set_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n shp = (100/4,1024)#100KB\r\n\r\n x = numpy.zeros(shp, dtype=dtype)\r\n x = self.cast_value(x)\r\n x_shared = self.shared_constructor(x, borrow=True)\r\n\r\n old_data = x_shared.container.storage[0]\r\n nd = numpy.ones(shp, dtype=dtype)\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n x_shared.container.value[:] = nd\r\n assert (numpy.asarray(x_shared.get_value(borrow=True))==nd).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n nd[0]+=1\r\n x_shared.container.value[0] = nd[0]\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[0])==nd[0]).all()\r\n assert (numpy.asarray(x_shared.get_value(borrow=True)[1:])==nd[1:]).all()\r\n #This should always share value!\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n assert may_share_memory(old_data, x_shared.get_value(borrow=True, return_internal_type=True))\r\n\r\n if x.__class__.__name__ != 'csr_matrix':\r\n #sparse matrix don't support inplace affectation\r\n nd += 1\r\n #THIS DON't DO WHAT WE EXPECT the contain of a is not updated for CudaNdarray, but it is for ndarray\r\n x_shared.get_value(borrow=True)[:] = nd\r\n #assert (numpy.asarray(x_shared.get_value(borrow=True))!=nd).all()\r\n assert may_share_memory(old_data, x_shared.container.storage[0])\r\n x_shared.get_value(borrow=True)\r\n\r\n # Test by set_value with borrow=False\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd, borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=False when new data cast.\r\n # specificaly useful for gpu data\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd), borrow=False)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace\r\n\r\n # Test by set_value with borrow=True\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(nd.copy(), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)),\r\n self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_value_inplace\r\n\r\n # Test by set_value with borrow=True when new data cast.\r\n nd += 1\r\n old_data = x_shared.container.storage[0]\r\n x_shared.set_value(self.cast_value(nd.copy()), borrow=True)\r\n assert numpy.allclose(self.ref_fct(x_shared.get_value(borrow=True)), self.ref_fct(self.cast_value(nd)))\r\n assert may_share_memory(old_data, x_shared.container.storage[0]) == self.set_cast_value_inplace", "def test_Numpy_import(benchmark):\n\n def Benchmark():\n import numpy as np\n a = np.ndarray(1)\n del a\n\n benchmark(Benchmark)", "def set_value(self, new_value, borrow=False):\n new_value = np.array(new_value, copy = not borrow)\n try:\n if self.shape != new_value.shape:\n self.resize(new_value.shape, refcheck=False)\n # refcheck is necessary to get this to work, but bypasses\n # the reference checks. Reference errors might occur if\n # a reference to this ShimmedTensorShared variable exists elsewhere,\n # and we try to access it after the resize. This is the kind\n # of thing you shouldn't do anyway with Theano variables.\n self[:] = new_value\n except IndexError:\n # Scalars will fail on the above\n assert(isscalar(new_value))\n # np.isscalar will fail on 0-dim arrays; isscalar works\n self = super(ShimmedTensorShared, self).__setitem__(None, new_value)", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def numpy(self):\n for key, value in self.__dict__.items():\n self.__dict__[key] = value.numpy()\n return self", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def __setstate__(self, state):\n shape = state['_SharedNumpyArray__np_array'].shape\n dtype = state['_SharedNumpyArray__np_array'].dtype\n type_id = np_type_id_to_ctypes(dtype)\n self.__shared = RawArray(type_id, np.product(shape))\n self.__np_array = np.frombuffer(self.__shared, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array, state['_SharedNumpyArray__np_array'])\n self.tag = None", "def reset(self) -> np.array:\n raise NotImplementedError", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr", "def copy(self):\n obj = type(self)(self.a_n[:], domain=self.domain, name=self.name)\n if isinstance(obj.a_n, np.ndarray):\n obj.a_n = obj.a_n.copy()\n return obj", "def __setstate__(self, state):\n shape = state['_DoubleBufferedSharedNumpyArray__np_array1'].shape\n dtype = state['_DoubleBufferedSharedNumpyArray__np_array1'].dtype\n type_id = np_type_id_to_ctypes(dtype)\n self.__shared1 = RawArray(type_id, np.product(shape))\n self.__np_array1 = np.frombuffer(self.__shared1, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array1, state['_DoubleBufferedSharedNumpyArray__np_array1'])\n self.__shared2 = RawArray(type_id, np.product(shape))\n self.__np_array2 = np.frombuffer(self.__shared2, dtype=dtype).reshape(shape)\n np.copyto(self.__np_array2, state['_DoubleBufferedSharedNumpyArray__np_array2'])\n self.__parity = state['_DoubleBufferedSharedNumpyArray__parity']", "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def tearDown(self):\n\n def reset_module(name, module):\n if module:\n sys.modules[name] = module\n else:\n sys.modules.pop(name, None)\n reset_module('simplejson', self.simplejson)\n reset_module('json', self.json)\n reload(protojson)", "def test_pickle():\r\n M = Module()\r\n M.x = (T.dmatrix())\r\n M.y = (T.dmatrix())\r\n a = T.dmatrix()\r\n M.f = Method([a], a + M.x + M.y)\r\n M.g = Method([a], a * M.x * M.y)\r\n\r\n mode = get_mode()\r\n m = M.make(x=numpy.zeros((4,5)), y=numpy.ones((2,3)), mode=mode)\r\n\r\n m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1))\r\n\r\n assert numpy.all(m.x == m_dup.x) and numpy.all(m.y == m_dup.y)\r\n\r\n m_dup.x[0,0] = 3.142\r\n assert m_dup.f.input_storage[1].data[0,0] == 3.142\r\n assert m.x[0,0] == 0.0 #ensure that m is not aliased to m_dup\r\n\r\n #check that the unpickled version has the same argument/property aliasing\r\n assert m_dup.x is m_dup.f.input_storage[1].data\r\n assert m_dup.y is m_dup.f.input_storage[2].data\r\n assert m_dup.x is m_dup.g.input_storage[1].data\r\n assert m_dup.y is m_dup.g.input_storage[2].data", "def __init__(self, size, orig=None):\n if not cArray.cModule:\n cArray.cModule=ctypes.cdll.LoadLibrary(\"./arraylib.so\")\n #Arg & return types must be said explicitly, otherwise we are gonna get seg. faults when dealing with pointers.\n #pointers of 64 bit machines are longlong, if treated as int, they are truncated => seg. fault\n cArray.cModule.reserve_array.restype = ctypes.c_longlong\n cArray.cModule.reserve_array.argtypes = [ctypes.c_int]\n cArray.cModule.free_array.argtypes = [ctypes.c_longlong]\n cArray.cModule.and_array.argtypes = [ctypes.c_longlong,ctypes.c_longlong,ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.or_array.argtypes = [ctypes.c_longlong,ctypes.c_longlong,ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.not_array.argtypes = [ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.get_element.argtypes = [ctypes.c_longlong,ctypes.c_int]\n cArray.cModule.set_element.argtypes = [ctypes.c_longlong,ctypes.c_int,ctypes.c_int]\n \n self.size=size\n self.arrayRef=cArray.cModule.reserve_array(ctypes.c_int(self.size))\n self.myCModule=cArray.cModule #on the destructor, cArray can not be accesed anymore, hence the object should store a ref to this.\n if orig != None:\n for i in range(size):\n self.__setitem__(i,orig[i])", "def load_build(self):\r\n Unpickler.load_build(self)\r\n if isinstance(self.stack[-1], NDArrayWrapper):\r\n if self.np is None:\r\n raise ImportError('Trying to unpickle an ndarray, '\r\n \"but numpy didn't import correctly\")\r\n nd_array_wrapper = self.stack.pop()\r\n array = nd_array_wrapper.read(self)\r\n self.stack.append(array)", "def _repackage_hidden(h: nd.NDArray):\n return h.detach()", "def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")", "def _reset(self, env_id: np.ndarray) -> None:", "def restore_via_init(objt: _ty.Type[MyArray]) -> Restorer[BaseArray, MyArray]:\n return objt", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def test_array_cache(self):\n cache = array_handler.ArrayCache()\n # Test if len works.\n self.assertEqual(len(cache), 0)\n arr = numpy.zeros(100, float)\n varr = vtk.vtkFloatArray()\n # test contains\n self.assertEqual(varr not in cache, True)\n cache.add(varr, arr)\n self.assertEqual(len(cache), 1)\n self.assertEqual(varr in cache, True)\n \n # Test the get method.\n self.assertEqual(cache.get(varr) is arr, True)\n\n # Test if the cache is cleared when the array is deleted.\n del varr\n self.assertEqual(len(cache), 0)", "def __array_wrap__(self, result, **kwargs):\n\n return self.__class__(result, self.shape)", "def set_value(self, value, borrow=False):\r\n if not borrow:\r\n #TODO: check for cuda_ndarray type\r\n if not isinstance(value, numpy.ndarray):\r\n # in case this is a cuda_ndarray, we copy it\r\n value = copy.deepcopy(value)\r\n self.container.value = value # this will copy a numpy ndarray\r", "def reset(self) -> List[int]:\n self.array = deepcopy(self.original)\n return self.array", "def copy(self, old):\n self.h = old.h\n self.L_h = old.L_h\n\n self.d = np.arange(1,self.L_h+1)\n\n self.it = old.it\n self.N_first = old.N_first\n self.la = old.la\n self.a = old.a\n self.e = np.copy(old.e)\n self.e2 = old.e2\n\n self.P = old.P\n self.alpha_g = np.copy(old.alpha_g)\n self.A = np.copy(old.A)\n self.sigma2 = old.sigma2\n self.mu = np.copy(old.mu)\n self.R = np.copy(old.R)\n\n self.b = np.copy(old.mu)\n self.w = np.copy(old.w)\n self.pie = np.copy(old.pie)\n self.pi = np.copy(old.pi)\n self.p = np.copy(old.p)\n\n self.mu_pad = np.copy(old.mu_pad)\n self.M_mu = np.copy(old.M_mu)\n self.R_pad = np.copy(old.R_pad)\n #self.M_R = np.copy(old.M_R)\n\n self.half_pie_var = np.copy(old.half_pie_var)\n self.half_pie_var_pad = np.copy(old.half_pie_var_pad)\n self.M_half_pie_var_pad = np.copy(old.M_half_pie_var_pad)\n self.pie_var = np.copy(old.pie_var)\n\n self.rev_A = np.copy(old.rev_A)\n\n self.LP = old.LP\n self.LP_list = old.LP_list\n self.la_list = old.la_list\n self.a_list = old.a_list\n self.sigma2_list = old.sigma2_list\n self.A_list = old.A_list", "def _numpy_transform(fqdn, value):\n import numpy\n return _package_transform(numpy, fqdn, value)" ]
[ "0.60902596", "0.58744705", "0.5683738", "0.5663783", "0.5609355", "0.5535437", "0.5494269", "0.54620075", "0.54172635", "0.5362005", "0.5362005", "0.5336442", "0.5305379", "0.530221", "0.5182793", "0.51734614", "0.5172819", "0.51510024", "0.51288235", "0.5113124", "0.50953704", "0.5092387", "0.5091938", "0.5086598", "0.5063424", "0.5052794", "0.50387406", "0.5032744", "0.5023723", "0.5018826" ]
0.765654
0
This method injects in the providers to the faker instance.
def add_providers(self): str_providers = PROVIDERS[0] # Providers, called by name live_providers = PROVIDERS[1] # Providers, provided as a live module for providers in PROVIDERS: # Iterate over the types of providers for provider in providers: # Iterate over all the methods # Inject those into faker, and swap the numpy instance self.fake.add_faker(self._swap_numpy(provider[0]), provider[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config: Config) -> None:\n self.config = config\n\n faker_config = self.config.faker\n self.faker = Faker(locale=faker_config.locale)\n\n self.fakes = {}", "def setup_provider(self):\n pass", "def add_providers_deped(self):\n # This gives direct access to the module's main class called Provider.\n klasses = [\n provider.Provider for provider in PROVIDERS] # Accessing the PROVIDERS. Check this method out, see how it operates.\n for k in klasses:\n self.fake.add_provider(k)", "def faker() -> Faker:\n\n return Faker()", "def fake_init():\n return Faker()", "def create_providers(cls) -> Iterable['BaseProvider']:\n return []", "def _load_providers(self, **kwargs):\n return super()._load_providers(providers=\"TIProviders\", **kwargs)", "def setUp(self):\n self.factory = PhoneFactory()", "def _fixture_setup(self):\n pass", "def addAllFactories(self) -> None:\n ...", "def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names", "def register(self, provider):\n for entry in dir(provider):\n try:\n provider_function = type(provider).__dict__[entry]\n factory_provider = getattr(provider_function, 'factory_provider', None)\n if factory_provider:\n provided_type, singleton = factory_provider\n if callable(provider_function): # A function or member function\n # if it's a bound method, this will get the bound version\n provider_member = getattr(provider, entry)\n self.add_factory(provided_type, provider_member, singleton)\n elif hasattr(provider_function, '__get__'):\n # this is a property or non-callable descriptor:\n self.add_factory(\n provided_type,\n functools.partial(provider_function.__get__, provider, provider),\n singleton,\n )\n else:\n self.add_service(provided_type, provider_function)\n except KeyError:\n pass", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "def _build_observation_providers(self) -> Dict[str, ObservationProvider]:\n pass", "def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]", "def setUpClass(cls):\n super(EmotionTest, cls).setUpClass()\n user = UserFactory(username='dan', email='dan@dan.net')\n user.set_password('password')\n user.first_name = 'Dan'\n user.last_name = 'Theman'\n user.save()\n cls.dan = user\n\n for _ in range(10):\n user = UserFactory.create()\n user.set_password(factory.Faker('password'))\n user.save()", "def initialize_client():\n logging.info('Initializing Sendgrid provider')\n sendgrid_authentication, sendgrid_username = get_provider_credentials('sendgrid') \n sendgrid_provider = SendGridProvider(sendgrid_authentication, sendgrid_username)\n\n logging.info('Initializing Mailgun provider')\n mailgun_authentication, mailgun_domain = get_provider_credentials('mailgun')\n mailgun_provider = MailGunProvider(mailgun_authentication, mailgun_domain)\n\n logging.info('Registering providers')\n client.register_provider(sendgrid_provider, 10)\n client.register_provider(mailgun_provider, 20)", "def _setup_random_gen(\n self,\n probabilities: List[float],\n random_nums: List[int]\n ) -> None:\n RandomGen._probabilities = probabilities\n RandomGen._random_nums = random_nums\n self._random_gen = RandomGen()", "def setup(self):\n for gen in self._generators:\n gen.setup()", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def install_providers():\n host = env.host_string\n providers = get_providers(host)\n for provider in providers.values():\n if getattr(provider, 'manager', None) is not None:\n provider.manager.install()\n\n provider.install()", "def populate_fixtures():\n languages()\n words()", "def setup(self):\n for gen in self._feature_stats_generators:\n gen.setup()", "def test__get_faker_anonymize_list(self):\n # Run\n transformer = Mock()\n transformer.anonymize = ['email']\n\n result = CategoricalTransformer._get_faker(transformer)\n\n # Asserts\n self.assertEqual(\n result.__name__,\n 'faker',\n \"Expected faker function\"\n )", "def setUp(self):\n UsuarioFactory.create()\n self.user = Usuario.objects.get(username='admin')\n ProyectoFactory.lider_proyecto = self.user\n ProyectoFactory.create()\n FaseFactory.proyecto = Proyecto.objects.get(nombre='Proyecto01')\n FaseFactory.create()\n TipoItemFactory.fase = Fase.objects.get(nombre='Fase01')\n TipoItemFactory.create()\n self.factory = RequestFactory()", "async def setup(self, context: InjectionContext):", "def setUp(self):\n self.factory = RequestFactory()\n StaffProfile.objects.rebuild()\n self.manager = mommy.make(\n \"auth.User\", first_name=\"Jane\", last_name=\"Ndoe\", email=\"jane@example.com\"\n )\n self.user = mommy.make(\n \"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\", email=\"bob@example.com\"\n )\n manager_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.manager)\n staff_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.user)\n self.manager_profile = manager_mommy.make()\n self.staffprofile = staff_mommy.make()", "def setup(self, registers):\n \"\"\" tasks before any generation functions are called \"\"\"\n pass", "def setUp(self):\n password = factory.Faker('pystr', min_chars=8, max_chars=16)\n self.user = UserFactory.create(password=password)\n self.token = Token.objects.create(user=self.user)\n self.factory = APIRequestFactory()\n\n # set up the data\n store = StoreFactory(user=self.user)\n material = MaterialFactory()\n self.material_stock = MaterialStockFactory(\n store=store, material=material, current_capacity=20, max_capacity=100\n )" ]
[ "0.63692963", "0.6351762", "0.61550707", "0.6035626", "0.5997692", "0.5795709", "0.57771325", "0.57227266", "0.55226743", "0.54858613", "0.5434935", "0.54322845", "0.53593737", "0.5339375", "0.5336239", "0.53179175", "0.529026", "0.5267008", "0.52436227", "0.52424914", "0.5205812", "0.520212", "0.51944804", "0.51875806", "0.51826084", "0.51672924", "0.5128174", "0.5126979", "0.512609", "0.5119085" ]
0.77991426
0
Create a map of duplicates and probabilities according to a pdf, i.e. uniform and store for reuse on each original event current version taken directly from FEBRL needs review b/c number of duplicates stored starts at 2?
def generate_duplicate_pdf(self): num_dup = 1 prob_sum = 0.0 prob_list = [(num_dup, prob_sum)] max_dups = self.duplicate_cfg["Max_duplicate"] uniform_val = 1.0 / float(max_dups) for i in range(max_dups - 1): num_dup += 1 prob_list.append((num_dup, uniform_val + prob_list[-1][1])) return prob_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a", "def filna_dict(mes):\n key = [f'pdf_{count + 1}' for count in range(mes)]\n value = ['stans.pdf' for count in range(mes)]\n filna_tobe_inserted = dict(zip(key, value))\n return filna_tobe_inserted", "def filna_dict(mes):\n key = [f'pdf_{count+1}'for count in range(mes)]\n value = ['stans.pdf'for count in range(mes)]\n filna_tobe_inserted = dict(zip(key,value))\n return filna_tobe_inserted", "def calc_prob(data):\n total = len(data)\n frequencies = sorted(Counter(data).items())\n probabilities = OrderedDict()\n for (key, value) in frequencies:\n probabilities[key] = value / total\n return probabilities", "def prob_dist(line1, line2, model):\n vocab = set(counts_un.keys())\n probs = dict()\n for line3 in vocab:\n probs[line3] = model.get_trigram_prob(line1, line2, line3)\n return probs", "def create_probability_object(self):\n self.update_frequencies(self)\n prob_dict = {}\n for symbol in self.all_symbols.keys():\n prob_dict[symbol] = self.all_symbols[symbol] / self.total_symbols\n return prob_dict\n # self.prob_dict = prob_dict", "def get_probability(letters, n):\n return {l: c/n for l, c in letters.items()}", "def _compute_register_probs(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.pmf(bits, probability)\n\n return probs / sum(probs)", "def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def generate_pdf_training_data(cls):\n sz = cls.test_set_size\n _x = np.zeros((sz, cls.state_size))\n _y = np.zeros((sz, cls.action_size))\n u = dict()\n u[str(_x[0])] = True\n for _i in range(0, sz):\n _pdf = np.random.randint(100, size=cls.action_size)\n _pdf = _pdf / np.sum(_pdf)\n _x[_i] = np.random.randint(3, size=cls.action_size)\n while str(_x[_i]) in u:\n _x[_i] = np.random.randint(3, size=cls.action_size)\n u[str(_x[_i])] = True\n _y[_i] = _pdf\n return _x, _y", "def generate_transition_bigram_probabilities(transition_unigram_counts, transition_bigram_counts):\r\n\ttransition_bigram_probabilities = dict()\r\n\tfor tag_bigram in transition_bigram_counts:\r\n\t\ttransition_bigram_probabilities[tag_bigram] = float(transition_bigram_counts[tag_bigram])/transition_unigram_counts[tag_bigram[0]]\r\n\treturn transition_bigram_probabilities", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def get_ngramlogprobs(freqdict):\n return", "def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def unigram_model(list_of_words, unigram_count, N=count_token()):\n d = pd.read_csv(unigram_count)\n proba_dict = {list_of_words[i]: (d[el].values[0] / float(N)) if el in d.columns.values else 0.0 for i, el in enumerate(list_of_words) }\n return proba_dict", "def calcProbability(self):\n for attribute in self.attributes:\n index = self.F2I[attribute]\n features = set([self.train[i][0][index] for i in range(len(self.train))])\n for feature in features:\n #all the true and false\n result_t = list(filter(lambda x: x[1]== True, self.train))\n total_t = len(result_t)\n result_f = list(filter(lambda x: x[1]== False, self.train))\n total_f= len(result_f)\n #the probability for the feature if its true or false\n t = len(list(filter(lambda x: x[0][index] == feature, result_t)))\n f = len(list(filter(lambda x: x[0][index] == feature, result_f)))\n prob_yes= t/total_t\n prob_no = f/total_f\n #assign the probabilities to the dictionaries\n self.probs_yes[(index,feature)] = prob_yes\n self.probs_no[(index,feature)] = prob_no", "def probability(self):\r\n \r\n my_dict = dict()\r\n \r\n for i in self.__dtmc:\r\n \r\n sum_Pij = float(sum([self.__dtmc[i][j] for j in self.__dtmc[i]]))\r\n \r\n if sum_Pij == 0:\r\n \r\n my_dict[i] = dict()\r\n \r\n elif sum_Pij > 0:\r\n \r\n if i not in my_dict:\r\n \r\n my_dict[i] = dict()\r\n \r\n for j in self.__dtmc[i]:\r\n \r\n Pij = self.__dtmc[i][j] / sum_Pij\r\n \r\n my_dict[i][j] = Pij\r\n \r\n return my_dict", "def calculate_prior_probability(y):\n unique, counts = np.unique(y, return_counts=True)\n u_c = dict(zip(unique, counts))\n instances = len(y)\n for u in u_c:\n u_c[u] = float(u_c[u] / instances)\n return u_c", "def _calculate_measurement_probs(measurements):\n total_mes = len(measurements)\n unique_mes = [list(x) for x in {tuple(x) for x in measurements}]\n total_unique_mes = len(unique_mes)\n len_qubits = len(unique_mes[0])\n measurements_probabilities = {}\n for i in range(total_unique_mes):\n strqubits = ''\n for qubit_idx in range(len_qubits):\n strqubits += str(unique_mes[i][qubit_idx])\n prob = measurements.count(unique_mes[i]) / total_mes\n measurements_probabilities[strqubits] = prob\n\n return measurements_probabilities", "def probabilityGet(NS,NH,SList,HList):\n global PS\n global PH\n PS = NS/(NS+NH) #probability of Spam\n PH = NH/(NS+NH) #probability of Ham\n AllPSpam = {} \n AllPHam = {}\n\n lambd = input(\"Choose a value for your lambda: \\n(a) 0.05 \\n(b) 0.5 \\n(c) 1 \\n(d) 2 \\nEnter letter of your choice: \") #Changeable lambda\n if lambd == 'a':\n lam= 0.05\n elif lambd == 'b':\n lam = 0.5\n elif lambd == 'd':\n lam = 2\n else:\n lam = 1\n\n for every_word,count in SList.items(): #computes probability of words in spam \n print(every_word, count)\n L_Spam = (count+lam)/(NS+(5000*lam))\n AllPSpam[every_word] = L_Spam #contains all the probability of everyword in Spam\n for every_word,count in HList.items(): #computes probability of words in ham\n L_Ham = (count+lam)/(NH+(5000*lam))\n AllPHam[every_word] = L_Ham #contains all the probability of everyword in Ham\n print(\"Testing of emails now begins!\")\n testingPhase(AllPSpam, AllPHam)", "def entropy_permutation_test(ordered_pitch_types, single_pitch_pdf, conditional_joint_probabilities, total_transitions,\n n=1000):\n pitch_types, pitch_probabilities = zip(*single_pitch_pdf.items())\n permutation_entropies = []\n progress = progressbar.ProgressBar()\n\n for test_number in progress(xrange(n)):\n # create the new matrix\n permutation_counts = {}\n for first_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type] = {}\n for second_pitch_type in ordered_pitch_types:\n permutation_counts[first_pitch_type][second_pitch_type] = 0\n\n pitch_permutation = numpy.random.choice(pitch_types, total_transitions, p=pitch_probabilities)\n current_pitch = numpy.random.choice(pitch_types, p=pitch_probabilities)\n for next_pitch in pitch_permutation:\n permutation_counts[current_pitch][next_pitch] += 1\n current_pitch = next_pitch\n\n joint_probabilities, _, _ = joint_probabilities_from_transitions(ordered_pitch_types, permutation_counts)\n permutation_entropies.append(entropy_from_probability_matrix(joint_probabilities))\n\n joint_entropy = entropy_from_probability_matrix(conditional_joint_probabilities)\n # print 'Mean', numpy.mean(permutation_entropies)\n # print 'Standard deviation', numpy.std(permutation_entropies)\n # tdof, tloc, tscale = stats.t.fit(permutation_entropies)\n # print 'DF', tdof, 'Loc (mean)', tloc, 'Scale (SD)', tscale\n # t_score = (joint_entropy - tloc) / tscale\n # print stats.t.cdf(joint_entropy, df=tdof, loc=tloc, scale=tscale)\n\n mean, stddev = stats.norm.fit(permutation_entropies)\n print 'Mean = {mean}\\t StdDev = {stddev}'.format(mean=mean, stddev=stddev)\n z_score = (joint_entropy - mean) / stddev\n p_value = stats.norm.cdf(joint_entropy, mean, stddev)\n print 'The joint entropy has a Z-score of {z_score} which gives a P-value of {p_value}'.format(z_score=z_score,\n p_value=p_value)\n return z_score, p_value", "def frecuencia_abs(seq) -> dict:\n hist = {}\n for i in seq:\n hist[i] = hist.get(i, 0) + 1\n return hist", "def sdd(events,probs):\n \n import random\n nprobs=[x*1000 for x in probs] #so, here i multiply each float in 'probs' by 1000 and store the products in 'nprobs'\n newlist=[]\n for a in range(len(events)) : #then, in this loop, i create a list (newlist), in which each event appears 1000*its probability times\n b=nprobs[a]\n b=int(b)\n for c in range(b) :\n newlist.append(events[a]) \n return (random.choice(newlist)) #and finally, i ramdonly sample ", "def prime_error_rate_dic(aa_order):\n aa_error_rate_dic = {}\n for i in aa_order:\n #first element of definitions are the from mutation rate\n #and the second element is the to mutation rate\n aa_error_rate_dic[i] = [0.0, 0.0]\n return aa_error_rate_dic", "def distribution_probability(self, game):\n dist_probability = {}\n\n total_visits = sum(self.root.n_a.values())\n\n for action, visits in self.root.n_a.items():\n dist_probability[action] = visits/total_visits\n return dist_probability", "def counts_to_probs(some_dict, num):\n new_d = dict()\n for key in some_dict:\n value = some_dict[key]\n new_d[key] = value/num\n return new_d", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def question_1(patient):\n result = {}\n for disease, symptoms in patient.symptoms.iteritems():\n prob = calculate_probability(disease, symptoms)\n result[disease.name] = \"%.4f\" % prob\n return result" ]
[ "0.6266391", "0.6231039", "0.6222904", "0.6209188", "0.62075746", "0.60514987", "0.60154533", "0.59842813", "0.59767723", "0.5969421", "0.5939433", "0.58987904", "0.58945656", "0.58854735", "0.58664787", "0.5817961", "0.57828134", "0.5771365", "0.5759954", "0.5755793", "0.57386154", "0.57328147", "0.5705914", "0.5696651", "0.5670643", "0.5664535", "0.5656928", "0.56161195", "0.5610407", "0.5603261" ]
0.74976236
0
Determines whether original record will be duplicated Gets the maximum number of duplicated records to generate
def expect_duplicate(self): # Reset everything for this record self._expect_duplicate = False self.__dupcntr = 0 self.__maxdup = 0 # Get the probability to generate duplicate for next record if self.fake.random.random() < self.duplicate_cfg["Prob_duplicate"]: self._expect_duplicate = True self.__maxdup = self.random_select_ndups() else: self._expect_duplicate = False self.__maxdup = 0 self.__logger.debug("expect_duplicate ndups: %d", self.__maxdup)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_duplicate(self):\n return bool(self.duplicated)", "def isRepeated(self):\n return self._field.label == FieldDescriptor.LABEL_REPEATED", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def process_duplicate_rows(self):\n pass", "def is_repetition(self):\n return self.id == 1", "def Get_dup_records(ds,key_var):\n temp = ds.groupby([key_var]).agg({key_var:'count'}).rename(columns={key_var:'Freq'}).reset_index()\n temp = temp[temp['Freq']>1]\n print(\"Total Duplicate records:: \" +str(temp.shape[0]))\n\n return temp", "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def test_add_dup(self):\n for i in range(3):\n self.datastore.save(self.trans)\n\n eq_(1, self.datastore._collection.count())", "def test_are_duplicates_length(self):\n rules = [\n pd.Series({\"A\": \"high\", \"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1), \"Class\": \"apple\"},\n name=1),\n pd.Series({\"B\": Bounds(lower=1, upper=1), \"C\": Bounds(lower=1, upper=1),\n \"Class\": \"apple\"}, name=2)\n ]\n duplicate = _are_duplicates(rules[0], rules[1])\n self.assertTrue(duplicate is False)", "def test_duplicate_entries(self):", "def has_duplicates(l):\r\n return len(set(l)) < len(l)", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def duplicate_record_check(cur):\n # get all created tables from db\n cur.execute(\"SELECT * FROM information_schema.tables WHERE table_schema='public'\")\n result = cur.fetchall()\n\n # create list of tables\n table_list = [table[2] for table in result]\n\n print('Checking tables for duplicate records...')\n\n # check each table for duplicates\n for table_name in table_list:\n cur.execute(f\"SELECT COUNT(*) FROM {table_name}\")\n row_count = cur.fetchall()\n cur.execute(f\"SELECT DISTINCT COUNT(*) FROM {table_name}\")\n distinct_count = cur.fetchall()\n if row_count[0][0] == distinct_count[0][0]:\n print(f\"GREAT, no duplicate records found in {table_name}!\")\n elif distinct_count[0][0] < row_count[0][0]:\n print(f\"WARNING, duplicate records found! {distinct_count[0][0]}\"\n f\"distinct record count is less than total record count of {row_count[0][0]}\")", "def is_unique(self, field):\n old_length = len(self.archive)\n self.archive.add(self.create_hash(field))\n return len(self.archive) > old_length", "def __numRecordsMoreThanMax(self, numRecords):\n return numRecords > self.maxRecordCount", "def check_no_duplicates(examples):\n return len(examples) == len(set(examples))", "def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))", "def single_records(df,\n key_cols=['report_date', 'plant_id_eia', 'generator_id']):\n len_1 = len(df)\n len_2 = len(df.drop_duplicates(subset=key_cols))\n return bool(len_1 == len_2)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def dupable_matches_required(self):\n return 2", "def is_unique(self):\r\n return self._unique", "def get_duplicate_rows(df):\n\treturn df.duplicated().sum()", "def duplicate_and_unique_movies(dataset, index_):\r\n for row in dataset.values():\r\n \r\n key=row[index_]\r\n if key in review_max.keys():\r\n num=review_max[key]\r\n num+=1\r\n review_max[key]=num\r\n else:\r\n review_max[key]=1\r\n \r\n movies_clean=[num for num in review_max.values() if num>1]", "def check_duplicate(self, state):\n pass", "def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))", "def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def _recalculate_ticket(self, ticket):\n ids = self._get_dups_recursively(ticket.id)\n\n dups = \", \".join([str(i) for i in ids])\n dup_count = len(ids)\n\n if ticket.values.get('dups', None) == dups \\\n and int(ticket.values.get('dup_count', '')) == dup_count:\n return False\n\n self.env.log.debug('Recalculated ticket %s with dups %s (%d)' % (\n ticket.id, dups, dup_count))\n\n ticket['dups'] = dups\n ticket['dup_count'] = str(dup_count)\n\n # delete fields if there are no dups\n if dup_count == 0:\n ticket['dups'] = None\n ticket['dup_count'] = None\n\n return True", "def _is_duplicated_rule(self, table_entry: TableEntry) -> bool:\n te_hash = _hash(table_entry)\n if te_hash in self.table_entries: # avoiding duplicated ipv4 forwarding rules\n return True", "def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def is_article_duplicate(cls, article):\n return cls.db.hkeys(\"article_map\").count(article.link) != 0" ]
[ "0.675923", "0.63455653", "0.6291391", "0.5996474", "0.59781253", "0.5973371", "0.59171003", "0.5903125", "0.5873092", "0.58724254", "0.5841418", "0.58190286", "0.5798888", "0.5789265", "0.5784065", "0.57638514", "0.5761442", "0.57307065", "0.5723805", "0.5719764", "0.57114965", "0.5710191", "0.569416", "0.5692535", "0.5682265", "0.5623792", "0.5603852", "0.55927265", "0.5586643", "0.558243" ]
0.7407912
0
Generate the predictions of the original model on training and validation datasets. The original model is also trained if train = True.
def generate_original_preds(train = True): x_train, y_train, x_val, y_val, id_to_word = load_data() model = create_original_model() if train: filepath="models/original.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] model.fit(x_train, y_train, validation_data=(x_val, y_val),callbacks = callbacks_list, epochs=epochs, batch_size=batch_size) model.load_weights('./models/original.hdf5', by_name=True) pred_train = model.predict(x_train,verbose = 1, batch_size = 1000) pred_val = model.predict(x_val,verbose = 1, batch_size = 1000) if not train: print('The val accuracy is {}'.format(calculate_acc(pred_val,y_val))) print('The train accuracy is {}'.format(calculate_acc(pred_train,y_train))) np.save('data/pred_train.npy', pred_train) np.save('data/pred_val.npy', pred_val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def get_predictions(fitted_model_filename):\n click.echo(\"Mode: predicting probabilities.\\n\")\n defaults = get_defaults()\n\n fitted_model_filename = add_extension(fitted_model_filename)\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n # boot_data = bootstrap(new_options, mode=\"internal_test\")\n # model = boot_data['model']\n #\n # X_test_int, y_test_int = boot_data['data']\n # internal_test_proba = model.predict_proba(X_test_int)\n # internal_test_proba = np.c_[y_test_int, internal_test_proba[:, 1]]\n\n boot_data = bootstrap(new_options, mode=\"external_test\")\n model = boot_data['model']\n X_test_ext, y_test_ext = boot_data['data']\n\n # fit scaler on train data and transform test data\n scaler = StandardScaler()\n X_train, y_train = load_data(defaults, which='train')\n\n numeric_cols = X_train.select_dtypes(include=np.float64).columns.tolist()\n scaler.fit(X_train[numeric_cols])\n X_test_ext.loc[:, numeric_cols] = scaler.transform(X_test_ext[numeric_cols])\n\n external_test_proba = model.predict_proba(X_test_ext)\n external_test_proba = np.c_[y_test_ext, external_test_proba[:, 1]]\n\n # internal_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH, \"internal_test_preds.csv\")\n external_test_results_path = os.path.join(defaults.OUTPUT.PREDS_PATH,\n f\"external_test_preds_{fitted_model_filename.replace('.pkl', '')}.csv\")\n # pd.DataFrame(internal_test_proba, columns=['target', 'proba']).to_csv(internal_test_results_path, index=False)\n pd.DataFrame(external_test_proba, columns=['target', 'proba']).to_csv(external_test_results_path, index=False)", "def make_prediction(x_train, y_train, x_test, model):\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n return y_predict", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def predict(self, test_batch_size=64, device='cuda', load=False, model_path=None, dataloader_num_workers=4, save_prediction=True):\n self.model.eval()\n self.device = device\n self.test_batch_size = test_batch_size\n if load:\n if model_path:\n self.load(model_path, device=self.device)\n else:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"loaded model={model_path}\")\n self.load(model_path, device=self.device)\n if self.model is None:\n raise Exception(\"model cannot be None. Load or train the model before inference\")\n dataloader = self.data_module.get_test_dataloader(batch_size=self.test_batch_size, shuffle=False, num_workers=dataloader_num_workers)\n all_outputs = []\n tk0 = tqdm(enumerate(dataloader, 1), total=len(dataloader))\n for batch_id, data in tk0:\n for key, value in data.items():\n data[key] = value.to(self.device)\n # batch_outputs, batch_loss = self.model(**data)\n batch_outputs, batch_loss= self.validate_one_batch(data)\n all_outputs.append(batch_outputs.detach().cpu().numpy())\n predictions = np.concatenate(all_outputs, axis=0)\n if save_prediction:\n submission = pd.read_csv(path_sample_submission_file)\n assert submission.shape[0] == predictions.shape[0], \"unexpected behavior.code fix required\"\n submission.iloc[:, 1:] = predictions\n\n if not os.path.isdir(path_submissions_dir):\n os.mkdir(path_submissions_dir)\n submission.to_csv(os.path.join(path_submissions_dir, f\"{self.experiment_id}.csv\"), index=False)\n tk0.close()\n return predictions", "def _predict(self, test_dl: torch.utils.data.DataLoader) -> torch.Tensor:\n\n # Initialize an empty tensor to store the predicted output\n output = torch.tensor([]).to(cfg.training.device)\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the test data loader\n for x_batch in test_dl:\n # Move the batch to the appropriate device\n x_batch = x_batch.to(cfg.training.device)\n # Forward pass to obtain model predictions\n y_star = self.forward(x_batch)\n # Concatenate the predictions to the output tensor\n output = torch.cat((output, y_star), 0)\n\n # Return the tensor containing the predicted output\n return output", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def predict(self): \n return self.model.predict(self.test_x)", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "def predict(self, x_test, y_test, model_path):\n tf.reset_default_graph()\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(model_path + \".meta\")\n saver.restore(sess, model_path)\n graph = tf.compat.v1.get_default_graph()\n x = graph.get_operation_by_name(\"x_input\").outputs[0]\n y = tf.compat.v1.get_collection(\"network_architecture\")[0]\n no_samples = x_test.shape[0]\n predictions = []\n n_iteration = no_samples // self.batch_size\n for step in range(n_iteration):\n x_batch, y_batch = get_batch_data(x_test, y_test, iter_step=step, batch_size=self.batch_size)\n preds = sess.run(y, feed_dict={x: x_batch})\n predictions.append(preds)\n return predictions", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def fit_and_get_test_predictions(self, trace, tuning=True):\n pass", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, data, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n return self.model.predict(data, 1, verbose) # ,steps)", "def predict(self, test_dataset: Dataset) -> PredictionOutput:\n test_dataloader = self.get_test_dataloader(test_dataset)\n return self._prediction_loop(test_dataloader, description=\"Prediction\")", "def predict(self, model, x_test):\n pass", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def predict(self, X_test):\n return self.model.predict(X_test)", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def predict(config: Config, device: torch.device, resume: Optional[ResumeInfo]) -> None:\n # pylint: disable=too-many-locals\n # Load datasets\n print(colored(\"loading training datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory()\n datasets, preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n print(colored(\"saving question ids:\", attrs=[\"bold\"]))\n split_map = {\n \"train\": (config.training.data.train, datasets.train),\n \"val\": (config.training.data.val, datasets.val),\n \"test\": (config.training.data.test, datasets.test),\n }\n for split, (dataconfig, dataset) in split_map.items():\n root = Path(wandb.run.dir) / \"predictions\"\n if not root.exists():\n root.mkdir(parents=True)\n path = root / f\"{split}_ids.json\"\n start = int(dataconfig.subset[0] * len(dataset))\n end = int(dataconfig.subset[1] * len(dataset))\n subset = torch.utils.data.Subset(dataset, range(start, end))\n ids = [subset[i][\"question\"][\"questionId\"] for i in range(len(subset))]\n with open(path, \"w\") as file:\n json.dump(ids, file)\n\n # Create model runner\n print(colored(\"model:\", attrs=[\"bold\"]))\n runner_factory = RunnerFactory()\n runner = runner_factory.create(config, device, preprocessors, datasets, resume)\n print(f\"{runner.model=}\")\n\n print(colored(\"loading prediction datasets:\", attrs=[\"bold\"]))\n dataset_factory = DatasetFactory(training=False)\n datasets, pred_preprocessors = dataset_factory.create(config)\n print(f\"train: {len(datasets.train)}\")\n print(f\"val: {len(datasets.val)}\")\n print(f\"test: {len(datasets.test)}\")\n\n # Extend question embedding dictionary with pad vector for OOV.\n # The runner will check if a question token index is out of bounds and\n # set it to the padding index if so.\n runner.model.question_embeddings = torch.nn.Embedding.from_pretrained(\n torch.cat(\n (\n runner.model.question_embeddings.weight.data,\n torch.zeros(\n (\n len(pred_preprocessors.questions.index_to_word)\n - runner.model.question_embeddings.num_embeddings,\n runner.model.question_embeddings.embedding_dim,\n )\n ).to(device),\n ),\n dim=0,\n )\n )\n # Update datasets and preprocessors for prediction\n runner.datasets = datasets\n runner.preprocessors = pred_preprocessors\n\n print(colored(\"predicting:\", attrs=[\"bold\"]))\n runner.predict()", "def train_predict(descriptions_models,\n X_train, y_train,\n X_valid, y_valid,\n scoring=None):\n\n results = []\n for description, model in descriptions_models:\n\n scorer = check_scoring(model, scoring=scoring)\n result = {'description': description}\n\n # Train\n start = time.time()\n model.fit(X_train, y_train)\n result['time_train'] = time.time() - start\n\n # Predict train\n start = time.time()\n result['score_train'] = scorer(model, X_train, y_train)\n result['time_predict_train'] = time.time() - start\n\n # Predict validation\n start = time.time()\n result['score_valid'] = scorer(model, X_valid, y_valid)\n result['time_predict_valid'] = time.time() - start\n\n results.append(result)\n\n return pd.DataFrame(results)[[\n 'description', 'score_train', 'score_valid',\n 'time_train', 'time_predict_train', 'time_predict_valid']]", "def fit_and_predict(self, X_train, y_train, X_test, y_test):\n if self.feature_transform_func:\n X_train, X_test = self.feature_transform_func(X_train, X_test)\n\n self.fit(X_train, y_train)\n y_predict = self.predict(X_test)\n return self.Acu_eval(y_predict, y_test)", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def predict(self, test_data):\n return self.leader.predict(test_data)", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def predict(self, test_data):\n random.seed(self.seed)\n preds = [{\"id\": instance['id'], \"prediction\": random.choice([0, 1])} for instance in test_data]\n return preds", "def make_predictions(self):\n \n self.Y = self.X.dot(self.w)", "def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)", "def get_initial_predictions(tuner, input_data, output_path, model_save_name):\n\n best_model = tuner.best_estimator()\n batch_job = best_model.transformer(1, \"ml.m5.large\", output_path=output_path.as_uri(),\n model_name=model_save_name)\n batch_job.transform(input_data.as_uri())\n # TODO: Do an ls first so we can get any/all files\n output_file = output_path / 'validation.csv.out'\n with smart.open(output_file.as_uri(), 'r', transport_params={'session': boto_session}) as f:\n predictions = pd.read_csv(f, header=None)\n return predictions" ]
[ "0.6959041", "0.6904541", "0.6858206", "0.674883", "0.6679141", "0.65974444", "0.654767", "0.65204763", "0.64544606", "0.6438485", "0.6437152", "0.64298964", "0.6410104", "0.640449", "0.640449", "0.6369408", "0.63413376", "0.63313776", "0.62701774", "0.6262735", "0.6240748", "0.6231877", "0.62217706", "0.6215227", "0.62140906", "0.62039584", "0.6198473", "0.61929524", "0.6186982", "0.6182213" ]
0.7165115
0
The managed object reference ID of the root resource pool for the cluster.
def resource_pool_id(self) -> str: return pulumi.get(self, "resource_pool_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pool_id ( self ):\n return self._pool_id", "def managed_object_id(self):\n o = self._data[\"managed_object\"]\n if type(o) in (int, long):\n return o\n return o.id", "def identity_pool_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def elastic_pool_id(self) -> Optional[str]:\n return pulumi.get(self, \"elastic_pool_id\")", "def parent_cluster_resource_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")", "def _pool_id(self, queue, project=None):\n return self._catalogue_ctrl.get(project, queue)['pool']", "def root_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"root_id\")", "def get_objectID(self):\n return self.resource.uuid", "def reference_id(self) -> str:\n return pulumi.get(self, \"reference_id\")", "def get_parentID(self):\n parent = Collection.find(self.resource.container)\n return parent.uuid", "def getId(self):\n return _libsbml.CompartmentReference_getId(self)", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def parent_cluster_resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"parent_cluster_resource_id\")", "def cluster_id(self):\n return self._cluster_id", "def obj_id(self) -> int:\n return int(self.index.split(\"/\")[-1]) if self.index else None", "def identity_pool_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_pool_id\")", "def get_objectID(self):\n return self.collection.uuid", "def identity_pool_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"identity_pool_id\")", "def rootid(self):\n candidates = [nid for nid, attrs\n in self.graph.nodes.items()\n if attrs['type'] == 'root']\n \n if len(candidates) > 1:\n errmsg = self.name + ' has more than one root'\n raise ValueError(errmsg)\n\n if len(candidates) == 0:\n errmsg = self.name + ' has no root'\n raise ValueError(errmsg) \n \n return candidates[0]", "def central_node_id(self):\n if self._central_node_id is None:\n return self.nodes[0]\n else:\n return self._central_node_id", "def owner_id(self) -> int:\n return self.proto.owner", "def get_parentID(self):\n parent_path = self.collection.container\n if self.collection.is_root:\n parent_path = \"/\"\n parent = Collection.find(parent_path)\n return parent.uuid", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")" ]
[ "0.71896243", "0.66001445", "0.65141094", "0.6471181", "0.6368429", "0.6354283", "0.634698", "0.6305942", "0.62388986", "0.62217504", "0.6184611", "0.61742735", "0.61742735", "0.61742735", "0.61742735", "0.61742735", "0.61529875", "0.6127469", "0.6123672", "0.61147434", "0.6111746", "0.6098793", "0.6090876", "0.60850084", "0.6024355", "0.60161644", "0.59966296", "0.59966296", "0.59966296", "0.59966296" ]
0.74713767
0
The `ComputeCluster` data source can be used to discover the ID of a cluster in vSphere. This is useful to fetch the ID of a cluster that you want to use for virtual machine placement via the `VirtualMachine` resource, allowing to specify the cluster's root resource pool directly versus using the alias available through the `ResourcePool` data source. > You may also wish to see the `ComputeCluster` resource for more information about clusters and how to managed the resource in this provider. Example Usage ```python import pulumi import pulumi_vsphere as vsphere datacenter = vsphere.get_datacenter(name="dc01") compute_cluster = vsphere.get_compute_cluster(name="cluster01", datacenter_id=datacenter.id) ```
def get_compute_cluster(datacenter_id: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetComputeClusterResult: __args__ = dict() __args__['datacenterId'] = datacenter_id __args__['name'] = name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('vsphere:index/getComputeCluster:getComputeCluster', __args__, opts=opts, typ=GetComputeClusterResult).value return AwaitableGetComputeClusterResult( datacenter_id=pulumi.get(__ret__, 'datacenter_id'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), resource_pool_id=pulumi.get(__ret__, 'resource_pool_id'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def get_compute_cluster_output(datacenter_id: Optional[pulumi.Input[Optional[str]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetComputeClusterResult]:\n ...", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def get_cluster_id(self):\n cmd = \"svcinfo lscluster -delim :\"\n\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_CLUSTER_ID)\n cluster_id = values[index]\n return cluster_id", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def clusters(self,project_id=os.environ.get(\"ATLAS_PROJECT\")):\n project_id = project_id if project_id != '' else self.__project_id\n return self.get('{}/groups/{}/clusters'.format(ApiVersion.A1.value,project_id))", "def cluster_id(self):\n return self._cluster_id", "def find_cluster(self, id):\n raise NotImplementedError", "def cluster_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier\")", "def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def get_coe_cluster(self, name_or_id, filters=None):\n return _utils._get_entity(self, 'coe_cluster', name_or_id, filters)", "def lookup_cluster_by_name(cluster_name):\n cluster_root = get_cluster_root()\n if not cluster_root:\n print('Cannot get the root of the linked list of clusters')\n return\n cluster = None\n\n # lookup for the task associated with the id\n if cluster_root['cluster_']['name'].string() == cluster_name:\n cluster = cluster_root['cluster_'].address\n else:\n curr = cluster_root\n while True:\n curr = curr['next'].cast(uClusterDL_ptr_type)\n\n if curr['cluster_']['name'].string() == cluster_name:\n cluster = curr['cluster_'].address\n break\n\n if curr == cluster_root:\n break\n\n if not cluster:\n print(\n (\"Cannot find a cluster with the name: {}.\".format(cluster_name))\n )\n return cluster", "def cluster_myid(self, target_node: \"TargetNodesT\") -> ResponseT:\n return self.execute_command(\"CLUSTER MYID\", target_nodes=target_node)", "def show_vsan_cluster(self, cluster_id):\n url = \"clusters/%s\" % str(cluster_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def data_center_id(self) -> str:\n return pulumi.get(self, \"data_center_id\")" ]
[ "0.70518404", "0.70052683", "0.67735595", "0.6768537", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.6736824", "0.66624004", "0.66624004", "0.66624004", "0.66624004", "0.6613303", "0.660601", "0.660601", "0.6474703", "0.6474703", "0.6474703", "0.63924193", "0.63861185", "0.63270766", "0.6276351", "0.6269334", "0.62308496", "0.6221327", "0.62061673", "0.61876196", "0.6160735", "0.61367285" ]
0.72311264
0
Test addition for Complex with Complex, complex, int and float
def test_add(): z = Complex(1, -2) w = Complex(1, 1) assert (z + w) == Complex(2, -1) assert (z + (1+1j)) == Complex(2, -1) assert (z + 2) == Complex(3, -2) assert (z + 2.0) == Complex(3, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def _cmplx_add_ ( s , o ) :\n return o + complex ( s )", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary", "def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num", "def test_op_add_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_r = offl_a + s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_iadd_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a + s\n\n offl_a = stream.bind(a)\n offl_a += s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _complex(real, imag):\n real = np.asarray(real)\n imag = np.asarray(imag)\n cplx = 1j * imag \n return cplx + real", "def complex(real, imag):", "def test_op_add_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.2, -1.5)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_iadd_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_complex():\n assert complex(Quantity(1, unit('m'))) == complex(1)", "def __radd__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r + self.r, other.i + self.i)", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)", "def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True", "def test_real(self):\n\n real = common_math.real\n\n self.assertTrue(real(3.75) + real(4.75) == real(8.5))\n self.assertTrue(real(2.5) * real(-1.5) == -real(3.75))\n\n pi_1 = to_real(real, Fraction(311, 99))\n pi_2 = to_real(real, Fraction(333, 106))\n pi_3 = to_real(real, Fraction(355, 113))\n\n self.assertTrue(pi_1 < pi_2)\n self.assertTrue(pi_2 < pi_3)", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def add(self, number: float) -> float:\n if self.check_type_not_complex(number=number):\n self.__memory += number\n return self.__memory\n return self.__memory", "def test_add_float(self):\n self.assertAlmostEqual(cr.add(2.21, 4.7), 2.21 + 4.7, places=2)", "def test_iadd_with_float_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1.0\n\n a += b\n\n expected_result = Vec3(3, 4, 5)\n\n self.assertEqual(a, expected_result)", "def check_type_not_complex(cls, number: Number) -> None:\n if isinstance(number, complex):\n print(\"Calculator supports arithmetic only with integers\",\n \"and floats but not with complex numbers\")\n return False\n return True", "def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)", "def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary", "def add(self, x):\n if type(x) is int:\n self.real += x\n else:\n self.real = self.real + x.real\n self.imag = self.imag + x.imag", "def test_op_add_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.0, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def complex(real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero" ]
[ "0.76475245", "0.7613455", "0.73216003", "0.7232131", "0.7204029", "0.7114866", "0.6915121", "0.69098693", "0.6900085", "0.6607692", "0.6509762", "0.6498399", "0.6357027", "0.6335295", "0.63045627", "0.6205945", "0.61870325", "0.6182034", "0.6174292", "0.6170315", "0.61585486", "0.6132917", "0.6125642", "0.6124113", "0.61066985", "0.60958374", "0.6086763", "0.6080302", "0.60616887", "0.6054502" ]
0.81614006
0
Test subtraction for Complex with Complex, complex, int and float
def test_sub(): z = Complex(1, -2) w = Complex(1, 1) assert (z - w) == Complex(0, -3) assert (z - (1+1j)) == Complex(0, -3) assert (z - 2) == Complex(-1, -2) assert (z - 2.0) == Complex(-1, -2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complex_difference(c_1,c_2):\n return c_1 - c_2", "def _cmplx_sub_ ( s , o ) :\n return (-o ) + complex ( s )", "def complex(real, imag):", "def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary", "def test_op_sub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n s = complex(1.3, 1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_r = offl_a - s\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _cmplx_rsub_ ( s , o ) :\n return o - complex ( s )", "def __complex__(self) -> complex:\n return self._translate_in_type(complex, self.integer, self.float_num)", "def test_complex():\n assert complex(Quantity(1, unit('m'))) == complex(1)", "def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)", "def test_op_isub_scalar_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n s = complex(1.2, -1.3)\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = a - s\n\n offl_a = stream.bind(a)\n offl_a -= s\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old).all(),\n \"Input array operand must be modified: \"\n \"{0} should be {1}\".format(r, old))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_subtraction():\n assert calculator.subtract(7, 3) == 4\n assert calculator.subtract(7.0, 3.0) == 4.0\n assert calculator.subtract(7, -3) == 10\n assert calculator.subtract(7.0, -3.0) == 10.0", "def complex_sum(c_1,c_2):\n return c_1 + c_2", "def test_C_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1 + 1j]]), 1, 10)", "def complex_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Complex):\n name = type(var).__name__\n raise ComplexError(\n 'Function {} expected complex number, {} got instead.'.format(func, name))", "def test_op_sub_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.3, -1.4)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_r = offl_a - o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def check_type_not_complex(cls, number: Number) -> None:\n if isinstance(number, complex):\n print(\"Calculator supports arithmetic only with integers\",\n \"and floats but not with complex numbers\")\n return False\n return True", "def test_sub_with_float_arg(self):\n\n a = Vec3(7, 8, 9)\n b = 5.0\n\n result = a - b\n\n expected_result = Vec3(2, 3, 4)\n\n self.assertEqual(result, expected_result)", "def complex(real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def is_complex(num):\n try:\n complex(num)\n except Exception:\n return False\n return True", "def test_mul():\n z = Complex(1, -2)\n v = Complex(2, 2)\n assert z*v == Complex(6, -2)\n assert v*z == z*v\n assert z*2 == Complex(2, -4)\n assert z*2.0 == Complex(2, -4)\n assert z*(2+2j) == v*z", "def complex_magnitude(c):\n return (c * c.conjugate()) ** 0.5", "def complex(self, real=0.0, imag=0.0):\n if imag == 0.0 and real == 0.0:\n return complex_zero", "def test_Z_NotComplex(self):\n\t\tself.assertRaises(calc.NotComplexError, calc.it, M([[1]]), 0 + 0j, 10)", "def complex_derivative ( fun , z , h = 0 , I = 3 , err = False , real = True , imag = True ) :\n \n Z = complex ( z )\n \n X = Z.real\n Y = Z.imag\n\n ## few altenatives to calculate the real and imaginary part\n \n if real :\n UX = lambda x : complex ( fun ( complex ( x , Y ) ) ).real\n ## Real part \n re = derivative ( UX , X , h = h , I = I , err = err )\n else :\n VY = lambda y : complex ( fun ( complex ( X , y ) ) ).imag \n ## Real part \n re = derivative ( VY , Y , h = h , I = I , err = err )\n\n if imag : \n VX = lambda x : complex ( fun ( complex ( x , Y ) ) ).imag \n ## Imaginary part \n im = derivative ( VX , X , h = h , I = I , err = err )\n else :\n UY = lambda y : complex ( fun ( complex ( X , y ) ) ).real\n ## Imaginary part \n im = -derivative ( UY , Y , h = h , I = I , err = err )\n \n if not err : return complex ( re , im )\n \n result = complex ( re.value() , im.value() )\n error = ( re.cov2() + im.cov2() ) ** 0.5 \n \n return result , error", "def test_op_isub_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def _cmplx_to_complex_ ( s ) :\n return complex ( s.real , s.imag )", "def test_isub_with_float_argument(self):\n\n a = Vec3(2, 3, 4)\n b = 1.0\n\n a -= b\n\n expected_result = Vec3(1, 2, 3)\n\n self.assertEqual(a, expected_result)", "def _normalizeComplex(data):\n if hasattr(data, \"dtype\"):\n isComplex = numpy.issubdtype(data.dtype, numpy.complexfloating)\n else:\n isComplex = isinstance(data, numbers.Complex)\n if isComplex:\n data = numpy.absolute(data)\n return data", "def _cmplx_add_ ( s , o ) :\n return o + complex ( s )", "def __neg__(self):\n return Complex(-self._reNum, -self._imNum)" ]
[ "0.73002094", "0.69852185", "0.6779295", "0.6768346", "0.6679916", "0.6596779", "0.6549099", "0.64673406", "0.6422183", "0.6415886", "0.63986313", "0.628705", "0.6265425", "0.62318534", "0.61975026", "0.6182748", "0.6176905", "0.6081503", "0.60485506", "0.6047534", "0.6038145", "0.59815145", "0.5964033", "0.5960958", "0.5949088", "0.5947505", "0.5940247", "0.5926241", "0.5911554", "0.5905094" ]
0.75719965
0
Compute LDA model & find perplexity, save topics list for coherence calc
def lda_models(doc_term_matrix, n_topics, vectorizer, rand_start): perplexity_values = [] lda_time = [] topics_list = [] i = rand_start for num_topics in n_topics: # create model t1 = time.time() lda_model = LatentDirichletAllocation(n_components=num_topics, doc_topic_prior = 1/num_topics, topic_word_prior=0.1, n_jobs=39, random_state = i) lda_model.fit_transform(doc_term_matrix) t2 = time.time() lda_time.append(t2-t1) print(f" Model time: {t2-t1}", flush = True) # compute perplexity perplexity_values.append(lda_model.bound_) # create list of topics topics = list_topics(lda_model.components_, vectorizer, top_n=10) topics_list.append(topics) # output completion message i = i+1 print('Number of topics =', num_topics, "complete.", flush = True) return perplexity_values, lda_time, topics_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n coherence_c_v = []\n coherence_u_mass = []\n print(\"Fitting models\")\n for num_topics in range(self.min_topics, self.max_topics, self.step):\n lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=self.id2word, num_topics=num_topics,\n random_state=100, chunksize=100, passes=20,\n per_word_topics=True, minimum_probability=0)\n if not os.path.exists(f\"data/intermediate/optimal_testing\"):\n os.mkdir(f\"data/intermediate/optimal_testing\")\n with open(f\"data/intermediate/optimal_testing/lda_model_{num_topics}_topics.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics} coherence: {coherence}\")\n coherence_c_v.append(coherence)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='u_mass')\n coherence_u_mass.append(coherence_model_lda.get_coherence())\n return coherence_c_v, coherence_u_mass", "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n alpha = list(np.arange(0.1, 1, 0.3))\n alpha.append(\"symmetric\")\n beta = copy.deepcopy(alpha)\n alpha.append(\"asymmetric\")\n corpus_sets = [gensim.utils.ClippedCorpus(corpus, int(len(corpus) * 0.75)), corpus]\n corpus_titles = [\"75% corpus\", \"100% corpus\"]\n model_results = {\"Validation_set\": [], \"Topics\": [], \"Alpha\": [], \"Beta\": [], \"Coherence\": []}\n print(\"Fitting models\")\n for i, corpus_set in enumerate(corpus_sets):\n for num_topics in self.topics_to_test:\n for a in alpha:\n for b in beta:\n lda_model = gensim.models.LdaMulticore(corpus=corpus_set, id2word=self.id2word, alpha=a,\n random_state=100, chunksize=100, passes=20,\n num_topics=num_topics,\n per_word_topics=True, minimum_probability=0, eta=b)\n if i == 1: # we only want to save the model if it's a model on the whole corpus\n if not os.path.exists(f\"data/intermediate/hyperparameter_testing\"):\n os.mkdir(f\"data/intermediate/hyperparameter_testing\")\n with open(f\"data/intermediate/hyperparameter_testing/lda_{num_topics}_\"\n f\"topics{a}_alpha_{b}_eta.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents,\n dictionary=self.id2word, coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics}, alpha {a} eta {b} corpus {corpus_titles[i]} coherence: {coherence}\")\n model_results['Validation_set'].append(corpus_titles[i])\n model_results['Topics'].append(num_topics)\n model_results['Alpha'].append(a)\n model_results['Beta'].append(b)\n model_results['Coherence'].append(coherence)\n pd.DataFrame(model_results).to_csv(\"hyperparamter_tuning_results.csv\", index=False)", "def _lda(self):\n self.ldamodel = gensim.models.ldamodel.LdaModel(self.gensim_corpus, \n num_topics=self.n_topics, \n id2word=self.id_map, \n passes=self.n_passes,\n random_state=42)\n \n self.topic_matrix = self.ldamodel.print_topics(num_topics=self.n_topics, \n num_words=self.n_words)", "def train_lda_topic_model_with_mallet(texts, path_mallet,\n terms_to_remove=[], num_topics=50,\n no_below=10, no_above=0.9,\n scoring=False, start=2, step=3):\n preprocessed_corpus = []\n print ('training of gensim corpus began')\n for i, text in enumerate(texts):\n if i == 0:\n # todo filter here\n text = text.split()\n\n # Additional filtering steps #\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n\n dct = initialize_gensim_dictionary([text])\n else:\n text = text.split()\n # Additional filtering steps\n\n \"\"\"\n filtered_text = [word for word in text if (word[0] in\n string.ascii_uppercase + string.ascii_lowercase)]\n\n filtered_text = [word for word in filtered_text if\n (word not in set(stopwords.words('english')))]\n preprocessed_corpus.append(filtered_text)\n \"\"\"\n add_documents_to_gensim_dictionary(dct, [text])\n # todo:this is to be integrated to the building process\n\n if len(terms_to_remove) > 0:\n for term in terms_to_remove:\n dct.filter_tokens(bad_ids=[dct.token2id[term]])\n\n dct.filter_extremes(no_below=no_below, no_above=no_above)\n\n gensim_corpus = [dct.doc2bow(bag_of_word.split()) for bag_of_word in texts]\n print ('gensim corpus done')\n if scoring:\n\n coherence_values = []\n\n for n in range(start, num_topics, step):\n\n lda = LdaMallet(constants.PATH_TO_MALLET,\n gensim_corpus, id2word=dct,\n num_topics=n)\n coherencemodel = CoherenceModel(model=lda,\n texts=preprocessed_corpus,\n dictionary=dct, coherence='c_v')\n coherence_values.append(coherencemodel.get_coherence())\n\n return coherence_values\n\n else:\n lda = LdaMallet(constants.PATH_TO_MALLET, gensim_corpus,\n id2word=dct, num_topics=num_topics)\n # Visualize LDA results, poor results obtained.\n # from gensim.models.wrappers import ldamallet\n # lda_model = ldamallet.malletmodel2ldamodel(lda)\n # vis = pyLDAvis.gensim.prepare(lda_model, gensim_corpus, dct)\n # pyLDAvis.save_html(vis , 'test.html')\n return {'model': lda, 'corpus': gensim_corpus}", "def build_model(num_topics=30):\n data = utils.read_wiki(\"wiki.train.tokens\")\n\n # preprocessing: remove too frequent words, stopwords ...\n logger.info(\"Start preprocessing, this will take quite some time ...\")\n list_of_tokens, bigrams = preprocess(data)\n\n id2word = corpora.Dictionary(list_of_tokens)\n id2word.filter_extremes(no_below=5, no_above=0.6, keep_n=VOCAB_SIZE)\n logger.info(f\"Done processing dataset len, vocab len {len(id2word.keys())}, {len(list_of_tokens)}\")\n \n # convert data into df vectors\n corpus = [id2word.doc2bow(tokens) for tokens in list_of_tokens]\n\n for num_topics in range(10, 100, 6):\n lda_model = LdaModel(corpus, num_topics=num_topics,\n id2word=id2word,\n passes=20,\n iterations=400,\n # alpha=[0.01]*num_topics,\n alpha=\"auto\",\n # eta=[0.01] * VOCAB_SIZE,\n eta=\"auto\")\n \n # save the model\n path = pathlib.Path(f\"{SAVING_DIR}/lda_topic_{num_topics}\")\n path.mkdir(parents=True, exist_ok=True)\n path = path / \"lda.model\"\n lda_model.save(str(path.absolute()))\n id2word.save(UNIGRAM_FILE)\n bigrams.save(BIGRAM_FILE)\n\n # visualize topics by LDAviz\n vis = gensimvis.prepare(topic_model=lda_model, corpus=corpus, dictionary=id2word)\n pathlib.Path(\"lda_vizs\").mkdir(parents=True, exist_ok=True)\n pyLDAvis.save_html(vis, f'lda_vizs/lda_visualization_{num_topics}.html')\n return id2word, bigrams, lda_model", "def modelOpti(corpus, dictionary, limit, start=2, step=2):\n cohVals = []\n modelList = []\n for num_topics in range(start, limit, step):\n model = gensim.models.LdaMulticore(corpus, num_topics = num_topics, id2word = dictionary, chunksize = 700, passes = 15, workers = 8, eval_every = None)\n modelList.append(model)\n cohLDA = CoherenceModel(model = model, corpus = corpus, dictionary = dictionary, coherence = 'u_mass', processes = 8)\n cohVals.append(cohLDA.get_coherence())\n \n return modelList, cohVals", "def f(DATA_LINK, DATA_COLUMN_NAME, STOPWORD_CHOICE, STOPWORD_LINK, NGRAM_CHOICE,NGRAM_NUM, TestData,topic_number_user,fetchArray):\r\n data = pd.read_csv(DATA_LINK)\r\n df=data[DATA_COLUMN_NAME]\r\n ######################################################################\r\n if (STOPWORD_CHOICE):\r\n stopwords=prepare_stopwords(STOPWORD_LINK)\r\n else:\r\n stopwords=prepare_stopwords(link='stopwords.csv')\r\n ######################################################################\r\n\r\n df=clean(df)\r\n\r\n processed_docs = []\r\n\r\n for doc in df:\r\n processed_docs.append(preprocess(doc,stopwords))\r\n ############################################################################\r\n if NGRAM_CHOICE:\r\n ngram=[]\r\n ngram_mod=[]\r\n for i in range(NGRAM_NUM):\r\n if(i==0):\r\n ngram.append(gensim.models.Phrases(processed_docs[0:10000], min_count=5, threshold=100)) # higher threshold fewer phrases\r\n else:\r\n ngram.append(gensim.models.Phrases(ngram[i-1][processed_docs[0:10000]], min_count=5, threshold=100)) # higher threshold fewer phrases\r\n ngram_mod.append(gensim.models.phrases.Phraser(ngram[i]))\r\n \r\n ###########################################################################\r\n\r\n ################################################################################\r\n if NGRAM_CHOICE:\r\n # Form Ngrams\r\n data_words_ngrams = make_ngrams(processed_docs,NGRAM_NUM,ngram_mod)\r\n\r\n # Do lemmatization keeping only noun, adj, vb, adv\r\n data_lemmatized=[]\r\n for i in range(len(data_words_ngrams)):\r\n data_lemmatized.append(lemmatization(data_words_ngrams[i]))\r\n else:\r\n data_lemmatized=processed_docs\r\n ################################################################################\r\n \r\n\r\n dictionary = gensim.corpora.Dictionary(data_lemmatized)\r\n\r\n dictionary.filter_extremes(no_below=15, no_above=0.1, keep_n= 100000)\r\n\r\n bow_corpus = [dictionary.doc2bow(doc) for doc in data_lemmatized]\r\n\r\n lda_model = gensim.models.LdaMulticore(bow_corpus, \r\n num_topics = topic_number_user, \r\n id2word = dictionary, \r\n passes = 10, workers = 2)\r\n\r\n for idx, topic in lda_model.print_topics(-1):\r\n print(\"Topic: {} \\nWords: {}\".format(idx, topic ))\r\n print(\"\\n\")\r\n lda_model.save('turk_lda.gensim')\r\n\r\n unseen_document = TestData\r\n\r\n rx = re.compile('\\W+')\r\n unseen_document = rx.sub(' ', unseen_document).strip()\r\n\r\n\r\n # Data preprocessing step for the unseen document\r\n bow_vector = dictionary.doc2bow(preprocess(unseen_document,stopwords))\r\n\r\n topics = []\r\n for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):\r\n print(\"Score: {}\\t Topic: {}\".format(score, lda_model.print_topic(index, 5)))\r\n # rslt = result(str(score), str(lda.print_topic(index,5)))\r\n rslt = result(str(score), str(re.findall('\"([^\"]*)\"', str(lda_model.print_topic(index,5)))))\r\n topics.append(rslt)\r\n\r\n fetchArray.put(topics)", "def main(self, words_docs, cleaned_sentences, lang, model_dir, number_of_clusters, embedding_model, model_id):\n\t\ttry:\n\t\t\tif embedding_model == \"tfidf\": text_vector = self.create_tfidf_vectors(cleaned_sentences)\n\t\t\telif embedding_model == \"word2vec\": text_vector = self.create_w2v_vectors(words_docs)\n\t\t\tmodel, pred_dict = self.train_model(cleaned_sentences, text_vector, number_of_clusters, lang, model_id, model_dir)\n\t\t\tdf_dominant_topic = self.evaulate_clusters(pred_dict, model_dir)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n Error in main : \",e)\n\t\t\tprint(\"\\n Error details : \", traceback.format_exc())\n\n\t\treturn df_dominant_topic", "def maximization_step(self, number_of_topics, verbose):\n if verbose:\n print(\"M step:\")\n\n self.topic_word_prob = np.zeros((number_of_topics, len(self.vocabulary)))\n self.topic_word_prob_collection_specific = []\n\n for k in range(self.number_of_collections):\n topic_word_prob_collection_specific = np.zeros((number_of_topics, len(self.vocabulary)))\n for i in range(self.number_of_documents):\n # update P(w | z)\n\n # ############################\n\n self.topic_word_prob = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), self.topic_prob_C[k][i])))\n\n topic_word_prob_collection_specific = np.add(self.topic_word_prob,\n np.transpose(np.multiply(np.multiply(np.multiply(self.term_doc_matrix[k][i], 1 - self.topic_prob_B[k][i]), self.topic_prob_j[k][i]), 1 - self.topic_prob_C[k][i])))\n\n # update P(z | d)\n\n # ############################\n\n matrix = np.dot(np.transpose(self.term_doc_matrix[k][i]), self.topic_prob_j[k][i])\n self.document_topic_prob[k][i] = normalize_row(matrix)\n topic_word_prob_collection_specific = normalize_row(topic_word_prob_collection_specific)\n self.topic_word_prob_collection_specific.append(topic_word_prob_collection_specific)\n\n self.topic_word_prob = normalize_row(self.topic_word_prob)\n\n #print(\"pi:\")\n #print(self.document_topic_prob)\n #print(\"p(w|theta):\")\n #print(self.topic_word_prob)", "def optimize(self):\n scores = []\n n_topics = np.arange(self.topic_range[0], self.topic_range[1]+1)\n print('Running optimization with topic range from {0} to {1}'.format(\n self.topic_range[0],self.topic_range[1]))\n self._preproc()\n\n # Perform LDA for topic_range\n for n in n_topics:\n self.n_topics = n\n self._lda()\n if self.verbose:\n print('LDA completed for {0} topics.'.format(n))\n self._evaluate()\n scores.append(self.score)\n \n # Visualize results\n print('Optimization completed, plotting results...')\n fig1, ax1 = plt.subplots()\n ax1.plot(n_topics, np.asarray(scores))\n ax1.set_title('Coherence for topic range from {0} to {1}'.format(\n self.topic_range[0], self.topic_range[1]), fontsize= 16)\n ax1.set_xlabel('n_topics')\n ax1.set_ylabel('score')\n ax1.set_xticks(n_topics)\n plt.show()", "def model(self, doc_list=None):\r\n\r\n # eta => prior for the per-topic word distribution\r\n eta = torch.ones(self.V)\r\n\r\n with pyro.plate(\"topics\", self.K):\r\n\r\n # Beta => per topic word distribution\r\n Beta = pyro.sample(f\"beta\", dist.Dirichlet(eta))\r\n\r\n # alpha => prior for the per-doc topic vector\r\n alpha = torch.ones(self.K) / self.K\r\n\r\n X_List, Theta = [], []\r\n for d in pyro.plate(\"documents\", self.D, subsample_size=self.S):\r\n\r\n # theta => per-doc topic vector\r\n theta = pyro.sample(f\"theta_{d}\", dist.Dirichlet(alpha))\r\n\r\n doc = None if doc_list is None else doc_list[d]\r\n\r\n with pyro.plate(f\"words_{d}\", self.N[d]):\r\n\r\n # assign a topic\r\n z_assignment = pyro.sample(\r\n f\"z_assignment_{d}\",\r\n dist.Categorical(theta)\r\n )\r\n\r\n # from that topic vec, select a word\r\n X = pyro.sample(\r\n f\"w_{d}\",\r\n dist.Categorical(Beta[z_assignment]),\r\n obs=doc\r\n )\r\n\r\n X_List.append(X)\r\n Theta.append(theta)\r\n\r\n Theta = torch.stack(Theta)\r\n\r\n return X_List, Beta, Theta", "def generate_lda_model(self, B, topics, beta=0.01):\n # Suggestions from plda:\n # https://code.google.com/p/plda/wiki/PLDAQuickStart\n alpha = 50/float(topics)\n\n # Se o modelo LDA ainda nao existe para o fold, inferi-lo\n path_base = './exp/'\n path_base_lda = '%slda/' % path_base\n path_test_data = '%stest_data.txt' % (path_base_lda)\n\n m, n = B.shape\n try:\n os.makedirs(path_base_lda)\n except OSError:\n pass\n\n results = []\n for user in range(1, m):\n attractions = list(B[user].nonzero()[0])\n attractions.append(\"\") # para colocar o ultimo 1\n attractions = [ str(a) for a in attractions ]\n results.append(\" 1 \".join(attractions))\n\n a = open(path_test_data, 'w')\n a.write(\"\\n\".join(results))\n a.close()\n\n comando_modelo = (\n '../plda/lda --num_topics %(topics)s --alpha %(alpha)s --beta %(beta)s '\n '--training_data_file %(path)stest_data.txt '\n '--model_file %(path)slda_model.txt --burn_in_iterations 250 '\n '--total_iterations 300'\n ) % {'path': path_base_lda, 'beta': beta, 'alpha': alpha, 'topics': topics}\n\n print comando_modelo\n output = subprocess.check_output(shlex.split(comando_modelo),\n stderr=subprocess.STDOUT)\n print output\n\n comando_inferencia = (\n '../plda/infer --alpha %(alpha)s --beta %(beta)s '\n '--inference_data_file %(path)stest_data.txt '\n '--inference_result_file %(path)sinference_result.txt '\n '--model_file %(path)slda_model.txt --total_iterations 300 '\n '--burn_in_iterations 250'\n ) % {'path': path_base_lda, 'beta': beta, 'alpha': alpha}\n\n print comando_inferencia\n output = subprocess.check_output(shlex.split(comando_inferencia),\n stderr=subprocess.STDOUT)\n print output\n\n # Handling LDA for attractions\n lda_attractions = {}\n for l in open('%slda_model.txt' % path_base_lda):\n attraction, data = l.split('\\t')\n data = data.split(' ')\n data = [ Decimal(d) for d in data ]\n s = sum(data)\n data = [ d/s for d in data ]\n lda_attractions[attraction] = data\n\n # Handling LDA for each user\n lda_users = {}\n user = 0\n for l in open('%sinference_result.txt' % path_base_lda):\n user += 1\n data = l.split(' ')\n data = [ Decimal(d) for d in data ]\n s = sum(data)\n data = [ d/s for d in data ]\n lda_users[user] = data\n\n self.model = {\n 'users': lda_users,\n 'attractions': lda_attractions,\n }", "def post_process_result_of_lda_topic_model(lda_model, gensim_corpus,\n document_collection,\n document_collection_filtered,\n n_closest=25):\n # Prepare containers to store results\n # Container to keep the document topic matrix\n n_closest = - n_closest\n document_topic_matrix = []\n # Container to keep topics and the closest texts to each topic\n topic_closest_doc_with_topics_words = []\n # Container to keep topics\n all_topics = lda_model.show_topics(50)\n\n # Create an LDA corpus from the original gensim corpus\n lda_corpus = lda_model[gensim_corpus]\n\n # Iterate through the lda corpus and create the document topic matrix\n for i, documents in enumerate(lda_corpus):\n # Data returned is not proper numpy matrix\n document_topic_matrix.append(\n np.array([elements[1]for elements in documents]))\n\n # Create the proper numpy matrix\n document_topic_matrix = np.vstack(document_topic_matrix)\n\n # Find the closest texts to a given topic\n # Iterate through the transpose of the document topic matrix\n for i, element in enumerate(document_topic_matrix.T):\n # Identify the id of 15 closest texts of each topic\n closest = element.argsort(axis=0)[n_closest:][::-1]\n # Create a container to keep each text with the id above\n texts = []\n for element in closest:\n texts.append({'matched_text':\n document_collection_filtered[element],\n 'matched_text_words':\n document_collection[element]['match_word'],\n 'testimony_id': document_collection[element]\n ['testimony_id']})\n\n # Append them to container\n topic_closest_doc_with_topics_words.append({'texts': texts,\n 'topic_words':\n all_topics[i]})\n\n return {'topic_documents': topic_closest_doc_with_topics_words,\n 'document_topic_matrix': document_topic_matrix}", "def compute(self, topics, save_filename):\n texts = []\n\n tokenizer = RegexpTokenizer(r'\\w+')\n\n # create English stop words list\n en_stop = stopwords.words('english')\n\n # Create p_stemmer of class PorterStemmer\n p_stemmer = PorterStemmer()\n\n for i in self.__doc_set:\n # clean and tokenize document string\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # stem tokens\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n\n # add tokens to list\n texts.append(stemmed_tokens)\n\n # turn our tokenized documents into a id <-> term dictionary\n dictionary = gensim.corpora.Dictionary(texts)\n\n # convert tokenized documents into a document-term matrix\n corpus = [dictionary.doc2bow(text) for text in texts]\n\n # generate LDA model\n lsi_model = gensim.models.LsiModel(corpus, num_topics=topics, id2word=dictionary)\n\n save_filename += \"_{}\".format(topics)\n\n dictionary.save(save_filename + \".dict\")\n gensim.corpora.MmCorpus.save_corpus(save_filename + \".mm\", corpus, id2word=dictionary)\n lsi_model.save(save_filename + \".model\")\n\n return lsi_model, corpus, dictionary", "def learn(self, docs, labels, alpha=1.0):\n assert len(docs)==len(labels)\n labelCounts = {l: 0 for l in self.CLASSES}\n wordCounts = {l: Counter() for l in self.CLASSES}\n totalWordCounts = {l: 0 for l in self.CLASSES}\n # iterate over documents in order to record\n for i in range(0, len(labels)):\n # count(y) in labelCounts\n l = labels[i]\n labelCounts[labels[i]] +=1\n # count(y,w) for all words in totalWordCounts\n totalWordCounts[labels[i]] += len(docs[i])\n words = docs[i]\n # count(y,word) in wordCounts,\n \n for word in words:\n wordCounts[labels[i]][word] += 1\n # and to store the training vocabulary in self.trainVocab\n self.trainVocab.add(word)\n # compute and store prior distribution over classes\n # (unsmoothed) in self.priorProbs\n print(\"Label,priorProbs,Label Count\", file=sys.stderr)\n for l in self.priorProbs:\n self.priorProbs[l] = np.divide(labelCounts[l], len(labels))\n print(l +\",\"+str(self.priorProbs[l])+\",\"+str(labelCounts[l]), file=sys.stderr) #This was for part one\n for word in self.trainVocab: \n self.likelihoodProbs[l][word] = np.divide(wordCounts[l][word]+self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n self.likelihoodProbs[l]['**OOV**'] = np.divide(self.ALPHA, totalWordCounts[l]+self.ALPHA*(len(self.trainVocab)+1))\n # Sanity checks--do not modify\n assert len(self.priorProbs)==len(self.likelihoodProbs)==len(self.CLASSES)>2\n assert .999 < sum(self.priorProbs.values()) < 1.001\n for y in self.CLASSES:\n assert .999 < sum(self.likelihoodProbs[y].values()) < 1.001,sum(self.likelihoodProbs[y].values())\n assert 0 <= self.likelihoodProbs[y]['**OOV**'] < 1.0,self.likelihoodProbs[y]['**OOV**']", "def learn_topic_model_activities(self):\n print \"\\nLearning a topic model with LDA:\"\n\n doc_topic, topic_word = tm.run_topic_model(self.accu_path, self.config['lda'])\n\n tm.dump_lda_output(self.lda_path, doc_topic, topic_word)\n print \"Topic Modelling - done.\\n\"\n return True", "def investigate_topics(model, loaded_data, labels, videos, prob_of_words, language_indices, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n code_book, graphlets_, uuids, miss_labels = loaded_data\n print \"1\"\n import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n graphs = loaded_data[1]\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner','Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / probability_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n print('Topic {}: {}'.format(i, list(set(objs))))\n print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n print \"2\"\n import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words", "def evaluate_lda(model, dictionary, corpus, texts, calculate_coherence=True, use_multicore=False):\n # perplexity = model.log_perplexity(corpus)\n coherence_lda = None\n if calculate_coherence:\n coherence_model_lda = CoherenceModel(model=model, texts=texts, dictionary=dictionary,\n coherence='c_v', processes=N_WORKERS if use_multicore else 1)\n coherence_lda = coherence_model_lda.get_coherence()\n return 0, coherence_lda", "def __getitem__(self, doc):\n lda_model = ldamodel.LdaModel(\n num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word, dtype=np.float64)\n lda_model.topics = np.zeros((self.vocab_len, self.num_topics))\n ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc)\n\n time_lhoods = []\n for time in range(self.num_time_slices):\n lda_model = self.make_lda_seq_slice(lda_model, time) # create lda_seq slice\n lhood = LdaPost.fit_lda_post(ldapost, 0, time, self)\n time_lhoods.append(lhood)\n\n doc_topic = ldapost.gamma / ldapost.gamma.sum()\n # should even the likelihoods be returned?\n return doc_topic", "def elbow_lda(self, corpus, num_iter):\n coherence_values = []\n model_list = []\n for num_topics in range(self.start, self.stop, self.step):\n print('Topics Tested: ' + str(num_topics)) \n model = lda.LDA(corpus, num_topics, num_iter)\n model_list.append(model)\n coherence = model.get_coherence_score(corpus)\n coherence_values.append(coherence)\n return model_list, coherence_values", "def build_model_gensim(corpus, id2word, num_topics=20, validset=None):\n\n # Build LDA model\n lda_model = gensim.models.ldamulticore.LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics,\n random_state=100,\n eval_every=5,\n chunksize=10000, #nb of docs in each training chunk\n passes=50,\n iterations=500,\n alpha=0.001,\n per_word_topics=True,\n workers=4,)\n\n print(\"eta\",lda_model.eta)\n print(\"alpha\",lda_model.alpha)\n\n if validset:\n valid_corpus, valid_id2word, valid_data_lemmatized = validset\n print(lda_model.log_perplexity(valid_corpus, len(valid_corpus)))\n\n return lda_model", "def lda(documents_as_bag_of_words=[], topics_num=TOPICS_NUM, write_results_to=FILE_DEFAULT_GENSIM_LDA_RESULTS):\n\n\timport gensim\n\n\ttemp_files = generate_temp_files_for_lda_or_lsa(documents_as_bag_of_words)\n\n\tlda_model = gensim.models.ldamodel.LdaModel(corpus=temp_files['corpus'], id2word=temp_files['id2word'], num_topics=TOPICS_NUM, update_every=1, chunksize=10000, passes=1)\n\n\ttopics = lda_model.print_topics(TOPICS_NUM)\n\n\twith open(write_results_to, 'w', encoding='utf-8') as f:\n\t\tfor topic in topics:\n\t\t\tf.write('{}: {}\\n'.format(str(topic[0]), topic[1]))\n\t\tf.close()", "def investigate_topics(model, code_book, labels, videos, prob_of_words, _lambda, n_top_words = 30):\n\n topic_word = model.topic_word_\n doc_topic = model.doc_topic_\n # code_book, graphlets, uuids, miss_labels = loaded_data\n # print \"1\"\n # import pdb; pdb.set_trace()\n\n true_labels = labels\n vocab = [hash for hash in list(code_book)]\n\n # ****************************************************************************************************\n # Relevance\n # ****************************************************************************************************\n # names_list = [i.lower() for i in ['Alan','Alex','Andy','Amy','Michael','Ben','Bruno','Chris','Colin','Collin','Ellie','Daniel','Dave','Eris','Emma','Helen','Holly','Jay','the_cleaner',\n # 'Jo','Luke','Mark','Louis','Laura', 'Kat','Matt','Nick','Lucy','Rebecca','Jennifer','Ollie','Rob','Ryan','Rachel','Sarah','Stefan','Susan']]\n\n relevant_words = {}\n for i, phi_kw in enumerate(topic_word):\n\n phi_kw = threshold(np.asarray(phi_kw), 0.00001)\n log_ttd = [_lambda*math.log(y) if y!=0 else 0 for y in phi_kw]\n log_lift = [(1-_lambda)*math.log(y) if y!=0 else 0 for y in phi_kw / prob_of_words]\n relevance = np.add(log_ttd, log_lift)\n\n # cnt = 0\n # import pdb; pdb.set_trace()\n # for h, g in zip(np.asarray(vocab)[relevance >2.1], graphs[relevance >2.1]):\n # o, s, t = object_nodes(g)\n # if \"hand\" in o and \"object_14\" in o and len(s) == 2:\n # print h, s, t\n # cnt+=1\n # print cnt\n # vis.genome_rel(relevance, i)\n\n inds = np.argsort(relevance)[::-1]\n # top_relevant_words_in_topic = np.array(vocab)[inds] #[:-(n_top_words+1):-1]\n # pdb.set_trace()\n relevant_language_words_in_topic = []\n\n for ind in inds:\n word = vocab[ind]\n\n #todo: somehting is wrong here.\n if relevance[ind] <= 1.0 and word.isalpha() and word not in names_list:\n relevant_language_words_in_topic.append(word)\n # pdb.set_trace()\n relevant_words[i] = relevant_language_words_in_topic[:10]\n\n # print(\"\\ntype(topic_word): {}\".format(type(topic_word)))\n # print(\"shape: {}\".format(topic_word.shape))\n # print \"objects in each topic: \"\n topics = {}\n for i, topic_dist in enumerate(topic_word):\n objs = []\n top_words_in_topic = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\n\n #print('Topic {}: {}'.format(i, ' '.join( [repr(i) for i in top_words_in_topic] )))\n # for j in [graphlets[k] for k in top_words_in_topic]:\n # objs.extend(object_nodes(j)[0])\n topics[i] = objs\n # print('Topic {}: {}'.format(i, list(set(objs))))\n # print top_words_in_topic\n\n # #Each document's most probable topic\n restricted_labels, restricted_videos = [], []\n pred_labels = []\n\n for n in xrange(doc_topic.shape[0]):\n #print [p for p in doc_topic[n] if p >= 0.0] # each document probabilities to each topic\n if max(doc_topic[n]) > class_thresh:\n # print true_labels[n]\n # print doc_topic[n]\n # print doc_topic[n].argmax()\n # doc_topic[n][doc_topic[n].argmax()] = 0\n restricted_labels.append(true_labels[n])\n restricted_videos.append(videos[n])\n topic_most_pr = doc_topic[n].argmax()\n pred_labels.append(topic_most_pr)\n\n #if dbg: print(\"doc: {} topic: {}\".format(n, topic_most_pr))\n true_labels = restricted_labels\n videos = restricted_videos\n # print \"2\"\n # import pdb; pdb.set_trace()\n\n return true_labels, pred_labels, videos, relevant_words", "def lda_description(review_text, min_topic_freq=0.05,topic_model_file='lda_model_10'):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n \n # parse the review text with spaCy\n parsed_review = nlp(review_text)\n \n # lemmatize the text and remove punctuation and whitespace\n unigram_review = [token.lemma_ for token in parsed_review\n if not punct_space(token)]\n \n # apply the first-order and secord-order phrase models\n bigram_review = bigram_model[unigram_review]\n trigram_review = trigram_model[bigram_review]\n \n # remove any remaining stopwords\n trigram_review = [term for term in trigram_review\n if not term in spacy.lang.en.STOP_WORDS]\n #print('bow:',trigram_review)\n \n # create a bag-of-words representation\n review_bow = sents_dict.doc2bow(trigram_review)\n \n \n # create an LDA representation\n lda = LdaMulticore.load(joinp(pilot_path, topic_model_file)) # my addition\n review_lda = lda[review_bow]\n \n \n # mine\n if topic_model_file=='lda_model_25':\n topic_names=topic_names_25\n elif topic_model_file=='lda_model_10':\n topic_names=topic_names_10\n #\n \n # sort with the most highly related topics first\n #review_lda = sorted(review_lda, key=lambda topic_number,freq: freq)\n listt=[]\n for topic_number, freq in review_lda:\n if freq < min_topic_freq:\n break\n \n # print the most highly related topic names and frequencies\n #print('{:10} {}'.format(topic_names[topic_number],round(freq, 3))) ## for now not putting yet topic names\n #print('{:25} {}'.format(topic_number,round(freq, 3))) \n x=[topic_number,topic_names[topic_number],np.round(freq, 3)]\n listt.append(x)\n return(listt)", "def run_lda(args, corpus, pre, dictionary=None, workers=None, docs=None, num_files=None):\n MALLET_PATH = os.environ.get(\"MALLET_PATH\", \"lda-tools/ext/mallet/bin/mallet\")\n if args.gensim:\n lda = gensim.models.wrappers.LdaMallet\n model = lda(MALLET_PATH, corpus, num_topics=args.num_topics,\n id2word=dictionary, optimize_interval=args.optimize_interval,\n workers=workers, iterations=args.num_iterations,\n prefix=pre)\n else:\n rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '-'\n prefix = os.path.join(tempfile.gettempdir(), rand_prefix)\n mallet_corpus = prefix + 'corpus'\n\n print('Generating topic model.')\n form = 'tsv' if args.tsv_corpus else \"text\"\n tsv_corpus = None\n if not args.tsv_corpus:\n os.makedirs(mallet_corpus)\n corpus.export(mallet_corpus, abstract=False, form=form)\n elif args.year_split != -1:\n year, lines = docs\n os.makedirs(mallet_corpus)\n tsv_corpus = os.path.join(mallet_corpus, str(year) + \"-tmp.tsv\")\n with open(tsv_corpus, 'w') as f:\n f.write(\"\\n\".join(lines))\n else:\n tsv_corpus = args.tsv_corpus\n\n mallet_corpus = None if args.tsv_corpus else mallet_corpus\n model = Mallet(MALLET_PATH, mallet_corpus, num_topics=args.num_topics,\n iters=args.num_iterations, bigrams=args.bigrams_only,\n topical_n_grams=args.topical_n_grams,\n remove_stopwords=(not args.topical_n_grams), prefix=pre,\n print_output=True, file=tsv_corpus, min_df=args.min_df,\n max_df=args.max_df, num_files=num_files)\n return model", "def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,\n lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):\n\n self.init_lda_post()\n # sum of counts in a doc\n total = sum(count for word_id, count in self.doc)\n\n model = \"DTM\"\n if model == \"DIM\":\n # if in DIM then we initialise some variables here\n pass\n\n lhood = self.compute_lda_lhood()\n lhood_old = 0\n converged = 0\n iter_ = 0\n\n # first iteration starts here\n iter_ += 1\n lhood_old = lhood\n self.gamma = self.update_gamma()\n\n model = \"DTM\"\n\n if model == \"DTM\" or sslm is None:\n self.phi, self.log_phi = self.update_phi(doc_number, time)\n elif model == \"DIM\" and sslm is not None:\n self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)\n\n lhood = self.compute_lda_lhood()\n converged = np.fabs((lhood_old - lhood) / (lhood_old * total))\n\n while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:\n\n iter_ += 1\n lhood_old = lhood\n self.gamma = self.update_gamma()\n model = \"DTM\"\n\n if model == \"DTM\" or sslm is None:\n self.phi, self.log_phi = self.update_phi(doc_number, time)\n elif model == \"DIM\" and sslm is not None:\n self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)\n\n lhood = self.compute_lda_lhood()\n converged = np.fabs((lhood_old - lhood) / (lhood_old * total))\n\n return lhood", "def model_topics(df):\n\n data = df.text.values.tolist()\n data_words = list(sent_to_words(data))\n\n # Build the bigram and trigram models\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100) \n\n # Faster way to get a sentence clubbed as a trigram/bigram\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n # Remove Stop Words\n data_words_nostops = remove_stopwords(data_words)\n\n # Form Bigrams\n data_words_bigrams = make_bigrams(data_words_nostops,bigram_mod)\n\n # Initialize spacy 'en' model, keeping only tagger component (for efficiency)\n nlp = spacy.load('en', disable=['parser', 'ner'])\n\n # Do lemmatization keeping only noun, adj, vb, adv\n data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\n # Create Dictionary\n id2word = corpora.Dictionary(data_lemmatized)\n\n # Create Corpus\n texts = data_lemmatized\n\n # Term Document Frequency\n corpus = [id2word.doc2bow(text) for text in texts]\n\n # Perform Topic Modeling for number of topics ranging from 5 to 50 in steps of 5\n model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=5, limit=50, step=5)\n\n return model_list,coherence_values,corpus,id2word", "def train(self, documents):\n ###DONE\n\n #entire vocab in document set D\n vocab_sod = set()\n vocab_pop = set()\n \n #Calcuates prior probabilities\n priorSOD = 0 #how many docs are spam\n priorPOP = 0 #how many docs are ham\n \n #Cacluates Tct\n term_freq_sod = {} #{term:occur, term:occur}\n term_freq_pop = {}\n \n #Tct'\n Tct_sod = 0 #Tct' = sum of (every term occurence in class c + 1)\n Tct_pop = 0\n \n for doc in documents: \n if 'sod' in doc.label:\n priorSOD += 1\n for token in doc.tokens:\n Tct_sod += 1\n if token in term_freq_sod.keys():\n term_freq_sod[token] = term_freq_sod[token] + 1\n else:\n term_freq_sod[token] = 1\n vocab_sod.add(token) \n else:\n priorPOP += 1\n for token in doc.tokens:\n Tct_pop += 1\n if token in term_freq_pop.keys():\n term_freq_pop[token] = term_freq_pop[token] + 1\n else:\n term_freq_pop[token] = 1\n vocab_pop.add(token)\n \n \n #endfor\n # | is for set join\n self.vocab = vocab_sod | vocab_pop #gets rid of duplicate words (those in both 'ham' and 'spam') \n \n #Tct Primes\n #tct' = term freq of all terms in class c + 1*(total terms)\n Tct_sod = Tct_sod + len(self.vocab) \n Tct_pop = Tct_pop + len(self.vocab) \n \n \n print(\"PriorSod: \" + str(priorSOD))\n print(\"PriorPop: \" + str(priorPOP))\n print(\"LEN Docum: \" + str(len(documents)))\n \n self.priorSOD = priorSOD / len(documents)\n self.priorPOP = priorPOP / len(documents)\n \n for term in self.vocab:\n if term in term_freq_pop.keys():\n self.cond_prob_pop[term] = (term_freq_pop[term] + 1) / Tct_pop\n else:\n self.cond_prob_pop[term] = 1 / Tct_pop\n \n if term in term_freq_sod.keys():\n self.cond_prob_sod[term] = (term_freq_sod[term] + 1) / Tct_sod\n else:\n self.cond_prob_sod[term] = 1 / Tct_sod\n \n \n pass", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def topics_score_per_doc(lda_model, list_lemma):\n #Création d'un dictionnaire gensim\n array_lemma = np.array(list_lemma)\n dictionary = gensim.corpora.Dictionary(array_lemma)\n\n #Création d'un \"bag of words\" avec la fonction doc2bow\n bow_corpus = [dictionary.doc2bow(doc) for doc in array_lemma]\n\n for i in range(len(list_lemma)):\n print(\"\\nFor document {}\".format(i+1))\n for index, score in sorted(lda_model[bow_corpus[0]], key=lambda tup: -1*tup[1]):\n print(\"\\nScore: {}\\t \\nTopic: {}\".format(score, lda_model.print_topic(index, 10)))" ]
[ "0.7421184", "0.73544043", "0.72307", "0.7002529", "0.6943727", "0.673741", "0.6723259", "0.6671794", "0.6657565", "0.6648194", "0.6644296", "0.66000706", "0.6559571", "0.6551705", "0.65513426", "0.65454745", "0.6489818", "0.64809227", "0.6480784", "0.6443412", "0.63931346", "0.63751763", "0.63607985", "0.63553685", "0.6346438", "0.6317345", "0.62599593", "0.62528116", "0.62517923", "0.6201099" ]
0.744949
0
Workaround manage.py migrate complications run syncdb in case it's our first run, so we make sure south_migrationhistory table is created run migrate to apply latest migrations run syncdb again to populate contrib.auth.models
def smart_syncdb_migrate(self): local('python manage.py syncdb') local('python manage.py migrate') local('python manage.py syncdb --all')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def migrate():\n puts(yellow(\"Run South migrations\"))\n django_manage('migrate')", "def post_migrations(self):", "def migrate(self):\n\tpass", "def syncdb():\n with virtualenv():\n run('python manage.py syncdb --noinput')\n run('python manage.py migrate')", "def update_db():\r\n settings = getattr(options, 'settings', 'dev')\r\n sh(django_cmd('lms', settings, 'syncdb', '--traceback', '--pythonpath=.'))\r\n sh(django_cmd('lms', settings, 'migrate', '--traceback', '--pythonpath=.'))", "def migrate_database(self):\n\n self.db.migrate_database()", "def sync_db():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Create tables for models which have not yet been installed?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py syncdb\"\n \" --noinput\"\n )", "def migrate(where='local'):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n\n run('bin/django syncdb')\n try:\n run('bin/django schemamigration dasa --auto')\n except:\n pass\n run('bin/django migrate dasa')", "def model_post_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = False", "def southify(app):\n managepy('migrate %s 0001 --fake' % app)\n managepy('migrate %s' % app)", "def setup_before_migration(self, apps):", "def migrate_db():\n Base.metadata.create_all(ENGINE)", "def migration():", "def migrate(cr, version):\n pass", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def migrate():\n run('cd /home/indabom/web && source ./bin/activate && cd ./site && python manage.py migrate')", "def model_pre_migrate(*args, **kwargs):\n global IN_MIGRATIONS\n IN_MIGRATIONS = True", "def migrate_new_apps():\n new_apps = run('%s %s/fabfiles/django_scripts/get_apps_without_migration.py'\n % (env.PYTHON_BIN, env.SRC_PATH))\n # The script denotes the start of its output by \"{% output %}\" tag so we\n # only take whatever's after that\n new_apps = new_apps.split('{% output %}')[1].split()\n with cd(env.SRC_PATH):\n for app in new_apps:\n sudo(\"%s manage.py schemamigration %s --initial\" %\n (env.PYTHON_BIN, app.strip()))\n sudo(\"%s manage.py migrate %s --no-initial-data\" %\n (env.PYTHON_BIN, app.strip()))", "def ready(self):\n import django_better_migrations.migration_writer_patch # noqa", "def migrate(ctx):\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"migrate\")", "def migrate(args=''):\n run_commands('python manage.py migrate %s' % args)", "def db_migrate():\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_UP.format(f\"upgrade-{when}\", when, MIGRATION_TABLE)\n down = MYSQL_DOWN.format(f\"downgrade-{when}\", when, MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: {os.path.join('migrations', sql_file)}\")", "def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)", "def upgrade():\n try:\n op.drop_table(\"ggrc_gdrive_integration_alembic_version\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in a new DB with no trace of the removed chain\n pass\n else:\n raise\n\n # The following duplicates a part of a gdrive-related migration,\n # since a bunch of old migrations in ggrc refer to meetings table.\n # This part is relevant only for db_reset (new databases), so we\n # shouldn't recreate this table in downgrade.\n try:\n op.drop_table(\"meetings\")\n except sa.exc.OperationalError as e:\n code, _ = e.orig.args\n if code == 1051: # doesn't exist\n # we're in an old DB where meetings has been dropped in the removed chain\n pass\n else:\n raise", "def perform_migration():\n with cd(env.code_dir):\n with _virtualenv():\n sudo('python manage.py migrate --settings=prod_settings', pty=True)", "def migrate():\n if apply_migrations():\n click.echo(OK)\n else:\n sys.exit(1)", "def migratedb(rollback=False):\n\n require(\"virtualenv_path\", \"project_path\", \"sudo_user\")\n\n #\n # Some things need to be done first (i.e. if they need a different\n # database connection or some custom args)\n #\n if \"migratedb_first\" in env:\n\n for app, args in env.migratedb_first.iteritems():\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version, args)\n\n #\n # Do the rest afterwards\n #\n if has_version_info():\n\n apps = env.south_migrations.keys()\n\n for app in apps:\n\n print app\n\n version = get_south_migrate_version(app, rollback)\n\n migrate_app_db(app, version)\n\n #\n # If we know nothing, just migrate everything\n #\n else:\n migrate_app_db()", "def migrate(cls)->None:\n pass", "def migrate_fake():\n run('source /home/indabom/web/bin/activate && /home/indabom/web/site/manage.py migrate --fake')", "def migratedb_command():\n db = get_db()\n # This migration detects whether it needs to run before making changes.\n db.migrate_add_user_is_enabled()" ]
[ "0.7362598", "0.7175394", "0.6962745", "0.67925906", "0.66853064", "0.665665", "0.66325575", "0.65849835", "0.6555793", "0.65435", "0.6471299", "0.6459678", "0.6422288", "0.6384289", "0.63629246", "0.6273245", "0.615992", "0.61169046", "0.6091286", "0.60912824", "0.60731256", "0.6034795", "0.6017065", "0.6007843", "0.5996848", "0.5968128", "0.59622544", "0.5950757", "0.59288013", "0.59194374" ]
0.7330435
1
ssum([1,2,3]) 6 ssum([2,3]) 5 ssum([3]) 3 ssum([]) 0
def ssum(L: list) -> int: return 0 if not L else L[0]+ssum(L[1:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zero_sum(list):\n if not list:\n return 0\n else:\n return sum(list)", "def sum_unique(l):\n pass", "def sum_of_squares(seq):\n if len(seq) == 0:\n return 0\n else:\n result = 0\n for num in seq:\n result += num ** 2\n return result", "def zsum(s, *args, **kwargs):\n return 0 if s.empty else s.sum(*args, **kwargs)", "def sum(*nums): \n s=0\n for num in nums:\n s += num\n return s", "def lss(inlist):\r\n ss = 0\r\n for item in inlist:\r\n ss = ss + item*item\r\n return ss", "def sum3(nums):\n count = 0\n for num in nums:\n count += num\n return count", "def sum_multiples(num):\n pass", "def lsum (inlist):\r\n s = 0\r\n for item in inlist:\r\n s = s + item\r\n return s", "def sum_numbers(sequence):\r\n\r\n total = 0\r\n seq = get_numbers(sequence)\r\n for element in seq:\r\n total += element\r\n\r\n return total", "def U(xs):\n ret = 0\n for x in xs:\n ret += log(x)\n return ret", "def test_running_sum_multi_zeros(self):\n argument = [0,0,0,0]\n expected = [0,0,0,0]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only zeros\")", "def _ss(data):\n c = sum(data)/len(data)\n ss = sum((x-c)**2 for x in data)\n return ss", "def sum_list(numbers):\n\t\n\tif len(numbers) == 0:\n\t\treturn 0 \n\n\tsum = numbers[0] +sum_list(numbers[1:])\n\treturn sum", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def lucas(n):\n lucval = sum_series(n, 2, 1)\n print(lucval)\n return lucval", "def add_list_numbers(incoming_list):\n # summation=0\n if incoming_list:\n summation = sum(incoming_list)\n else:\n summation = 0\n return summation", "def test_suite():\n test(sum_all_elements([1,3,1,4,3,8]) == 5)\n test(sum_all_elements([1,3,5,7]) == 16)\n test(sum_all_elements([1, -7, 10, 23]) == -6)\n test(sum_all_elements(range(1,555,2)) == 76729)", "def multiplication_total_of(num_list):", "def cum_sum(seq):\n s = 0\n cumult = [0]\n for n in seq:\n s += n\n cumult.append(s)\n return cumult", "def n_suma(**elementy):\n return sum(elementy)/len(elementy)", "def lsummult (list1,list2):\r\n if len(list1) <> len(list2):\r\n raise ValueError, \"Lists not equal length in summult.\"\r\n s = 0\r\n for item1,item2 in pstats.abut(list1,list2):\r\n s = s + item1*item2\r\n return s", "def add_list_numbers(incoming_list):\n if incoming_list: #if incoming_list is not None and len(incoming_list) > 0\n return_value = sum(incoming_list)\n else:\n return_value = 0\n return return_value", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def __init__(self, nums):\n self.sums,tmp =[],0\n for n in nums:\n tmp +=n\n self.sums.append(tmp)", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def magma_scasum(n, dx, incx, queue):\n\n return _libmagma.magma_scasum(n, int(dx), incx, queue)", "def sum_list(num_list):\n # return sum(num_list)\n sum_list = 0\n for number in num_list:\n sum_list += number\n print(sum_list)\n \n # code prints out the sum_list for each value, increasing by the value each time\n # final output is the sum of numbers\n # currently no output for '[]' as input ", "def sum(inputList):\n sum=0#the sum of the list starts from 0\n for num in inputList:\n sum=sum+num#add all number in the list\n print(\"the sum is\",sum)", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total" ]
[ "0.6345257", "0.62525344", "0.6204409", "0.61191237", "0.6101727", "0.6077421", "0.60648704", "0.60515577", "0.60255706", "0.5993455", "0.5983791", "0.5964002", "0.5958428", "0.594942", "0.5904664", "0.5882846", "0.5862508", "0.5862386", "0.5833747", "0.582293", "0.58177805", "0.58089757", "0.58056796", "0.5765478", "0.5727781", "0.5726941", "0.57147306", "0.56974435", "0.5696405", "0.5691822" ]
0.7876537
0
print_stars(5) \n\n\n\n\n print_stars(4) \n\n\n\n print_stars(3) \n\n\n print_stars(2) \n\n print_stars(1) \n print_stars(0) ''
def print_stars(N: int) -> str: # if N: # return f'*\n{print_stars(N-1)}' # return '' return '' if not N else f'*\n{print_stars(N-1)}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "def star():\n print('*', end='')", "def starry_box(phrase):\n numStars = len(phrase) + 4\n print '*' * numStars\n print '*', phrase, '*'\n print '*' * numStars\n return", "def print_line(n):\n for i in range(1,n+1):\n str1 = ('*' * (i))\n print(str1)", "def draw_star(turtle, n):\n\n for i in range(n):\n turtle.forward(100)\n turtle.left(180 - 180/n)", "def sampleSquare():\n size = int(input('Enter the size: '))\n print('Sample Square of size', size)\n\n # display the first row of stars\n for i in range(size):\n star()\n newline()\n\n # display the \"middle\" rows. There are (size - 2) of them\n for i in range(size - 2):\n # for each row: star, spaces (size - 2 of them), star, newline\n star()\n for j in range(size - 2):\n space()\n star()\n newline()\n \n # display the last row of stars\n for i in range(size):\n star()\n newline()", "def newline():\n\n print('')", "def newline():\n print()", "def starbox(width, height):\n print(\"*\" * width) # print top edge of the box\n # print sides of the box\n for _ in range(height - 2):\n print(\"*\" + \" \" * (width - 2) + \"*\")\n print(\"*\" * width) # print bottom edge of the box", "def list(show=0):\n global stars_\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if show == 0:\n i=0\n for s in stars_:\n i=i+1\n print i,s[0],s[1],s[2],s[3]\n else:\n if show > 0 and show <= len(stars_):\n s = stars_[show-1]\n print show,s[0],s[1],s[2],s[3]\n else:\n print \"Bad star index\"", "def space():\n print(' ', end='')", "def drawFivePointStar(turtle):\n\n for i in range(5):\n turtle.forward(100)\n turtle.left(216)", "def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))", "def draw_1(n: int):\n \n for row in range(n):\n\n for col in range(n - row - 1):\n print(' ', end='')\n\n for col in range(2 * row + 1):\n print('*', end='')\n \n print()", "def while_X():\r\n i=0\r\n while i<6:\r\n j=0\r\n while j<6:\r\n if i-j==0 or i+j==5:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1 \r\n print()\r\n i+=1", "def show_stacked_body(start=0, stack=3, show_calc=0):\n stack_sort = random.randint(0, 3)\n\n for n in range(start, stack):\n n_stars = 1 + (2*n)\n\n if show_calc:\n calc_result = str(n+1) + '...n_stars= ' + str(n_stars)\n else:\n calc_result = ''\n\n chance = random.randint(0, 10)\n star_body = (\"-\" * n_stars)\n\n if chance in [0,1,2,3,4,5,6,7,8,9,10]:\n for n in range(n_stars):\n pos = random.randint(0, n_stars-1)\n\n # chance%3 or stack_sort%3 or ORNAMENT_SORT%3\n if stack_sort%3 == 0:\n ornaments = random.choice('_.^:;\\'') # _.^:;\\'\n elif stack_sort%3 == 1:\n ornaments = random.choice(\"abcdefghijklmnopqrstuvwxyz\") # _.^:;\\'\n elif stack_sort%3 == 2:\n ornaments = random.choice(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\") # _.^:;\\'\n\n star_body = star_body[:pos] + ornaments + star_body[pos+1:]\n\n line_print = star_body.center(GROUND_WIDTH) + calc_result\n print(line_print)", "def nl():\n\tprint(\"\")", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def single_line():\n print (\"-------------------------------------------------------------\")", "def newline(lines=1):\n\n # Print the new line iterated by the amount of new lines\n print('\\n' * lines)", "def createStar(npoints):\n # START CODE HERE\n\n pass\n # END CODE HERE # (remove the pass statement)", "def print_line():\n print('+ - - - - + - - - - +'),", "def draw_2(n: int):\n\n for row in range(n):\n for col in range(n - row):\n print('*', end='')\n print()", "def draw_5(n: int):\n\n # Top half + middle\n for row in range(n // 2 + (n % 2)):\n cols = (row) * 2 + (n % 2)\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()\n\n # Bottom half\n for row in range(n // 2):\n cols = n - (row + 1) * 2\n \n for col in range((n - cols) // 2):\n print(' ', end='')\n\n for col in range(cols):\n print('*', end='')\n \n print()", "def square(n):\n\n my_CRLF = '\\n'\n return_value = ''\n for _ in range(n):\n return_value += line(n) + my_CRLF\n return return_value", "def drawStar(duration):\n # START CODE HERE #\n\n\n pass\n # END CODE HERE # (remove the pass statement)", "def display_ratings(ratings):\n # only attempt to display the ratings if any were found\n if ratings:\n print('\\n[RATINGS]\\n')\n\n for rating in ratings:\n print(f' {rating}', end=' ')\n # needed to get printing back to normal\n print()", "def for_five():\r\n\r\n for row in range(7):\r\n for col in range(5):\r\n if col==0 and row<6 and row!=4 or col>0 and col<3 and row%3==0 or col==3 and (row==0 or row>3) and row<6:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def draw_5_alt(n: int):\n \n n = n - (n + 1) % 2 # cut even number down to nearest odd\n for row in range(n):\n dist_from_half = (abs(n // 2 - row))\n cols = n - dist_from_half * 2\n\n for col in range((n - cols) // 2):\n print(' ', end='')\n \n for col in range(cols):\n print('*', end='')\n \n print()", "def create_star(rk_settings, screen, stars, star_number, row_number):\r\n\tstar = Star(rk_settings, screen)\r\n\tstar_width = star.rect.width\r\n\tstar.x = star_width + 2 * star_width * star_number\r\n\tstar.rect.x = star.x\r\n\tstar.rect.y = star.rect.height + 2 * star.rect.height * row_number\r\n\tstars.add(star)" ]
[ "0.78595215", "0.7326586", "0.6608579", "0.66084236", "0.64868563", "0.6461798", "0.62855875", "0.6244473", "0.6224193", "0.621459", "0.61932653", "0.61023444", "0.6071811", "0.59977406", "0.5921401", "0.5901315", "0.58597124", "0.5856981", "0.58483934", "0.5816308", "0.57875204", "0.5784535", "0.57806206", "0.574841", "0.5748191", "0.5731355", "0.5713836", "0.5659546", "0.56586206", "0.5626202" ]
0.8280417
0
Assert that the first (leftmost) protocol value is correctly fetched from the xforwardedheader.
def test_get_protocol_with_more_than_one_value(): request = Mock( headers={"X-Forwarded-Proto": "https,http,http"}, protocol="http", ) expected = "https" protocol = get_browser_protocol(request) assert expected == protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_h2_header_ok(self):\n self.set_frang_config(frang_config=\"http_strict_host_checking true;\")\n client = self.get_client(\"deproxy-1\")\n client.start()\n client.parsing = False\n\n first_headers = [(\":authority\", \"localhost\"), (\":path\", \"/\")]\n second_headers = [(\":path\", \"/\"), (\"host\", \"localhost\")]\n third_headers = [(\":authority\", \"localhost\"), (\":path\", \"/\"), (\"host\", \"localhost\")]\n fourth_headers = [\n (\":authority\", \"tempesta-tech.com\"),\n (\":path\", \"/\"),\n (\"forwarded\", \"host=tempesta-tech.com\"),\n (\"forwarded\", \"for=tempesta.com\"),\n ]\n\n header_list = [\n first_headers,\n first_headers, # as byte\n second_headers,\n second_headers, # as byte\n third_headers,\n third_headers, # as byte\n fourth_headers,\n fourth_headers, # as byte\n ]\n for header in header_list:\n head = [\n (\":scheme\", \"https\"),\n (\":method\", \"HEAD\"),\n ]\n head.extend(header)\n client.make_request(head)\n self.assertTrue(client.wait_for_response(1))\n\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")", "def test_host_header_with_old_proto(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.0\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: Host header field in protocol prior to HTTP/1.1\",\n )", "def test_host_header_mismatch(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_host_header_mismatch_empty(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET http://user@tempesta-tech.com/ HTTP/1.1\\r\\nHost: \\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def test_host_header_no_port_in_uri(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)", "def test_host_header_no_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:80/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"200\", warning_msg=WARN_DIFFER)", "def test_host_header_mismath_port(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:81\\r\\n\\r\\n\"\n ],\n )\n self.check_response(\n client, status_code=\"403\", warning_msg=\"port from host header doesn't match real port\"\n )", "def test_send_pp_header_v1_no_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1)\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '1.1.1.1', '2.2.2.2', 1000, 2000)\n socket.sendall.assert_called_once_with(expected_header)", "def assert_header(self):\r\n\r\n if self.length > self.owner.settings[SETTINGS_MAX_FRAME_SIZE]:\r\n raise netius.ParserError(\r\n \"Headers are greater than SETTINGS_MAX_FRAME_SIZE\",\r\n stream = self.stream,\r\n error_code = FRAME_SIZE_ERROR\r\n )\r\n if self.last_type in (HEADERS, CONTINUATION) and not\\\r\n self.last_end_headers and not self.last_stream == self.stream:\r\n raise netius.ParserError(\r\n \"Cannot send frame from a different stream in middle of headers\",\r\n error_code = PROTOCOL_ERROR\r\n )", "def test_host_header_as_ip6(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: [20:11:abb::1]:80\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def test_host_header_as_ip(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\"GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n\\r\\n\"],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_IP_ADDR)", "def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected", "def test_host_header_mismath_port_in_host(self):\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\",\n requests=[\n \"GET http://tempesta-tech.com:81/ HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\"\n ],\n )\n self.check_response(client, status_code=\"403\", warning_msg=WARN_DIFFER)", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def test_send_pp_header_v1_with_src_addr(self):\n socket = self.get_socket(PROXY_PROTOCOL.V1, src_addr=('6.6.6.6', 666))\n socket.getsockname.return_value = ('1.1.1.1', 1000)\n socket.getpeername.return_value = ('2.2.2.2', 2000)\n\n socket._send_pp_header()\n\n expected_header = encode_v1('TCP4', '6.6.6.6', '2.2.2.2', 666, 2000)\n socket.sendall.assert_called_once_with(expected_header)", "def test_headers(self):\n self.assert_expected_token_value()", "def test_server_should_be_http_1_1(httpbin):\n resp = get_raw_http_response(httpbin.host, httpbin.port, \"/get\")\n assert resp.startswith(b\"HTTP/1.1\")", "def test_h2_host_header_as_ipv6(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\"host\", \"[20:11:abb::1]:443\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def test_specific_headers_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n self.assertIn('All-Request-Headers', req.prepared_request.headers)\n request_data_headers = self.httpbin.client['get_my_headers']['headers']['All-Request-Headers']\n self.assertEqual(req.prepared_request.headers['All-Request-Headers'], request_data_headers)", "def test_normalize_xmlrpc_address_missing_protocol(self):\r\n input_val = 'google.com:1234'\r\n expected_val = 'http://google.com:1234'\r\n actual_val = normalize_xmlrpc_address(input_val, 1471)\r\n self.assertEqual(expected_val, actual_val)", "def test_h2_host_header_as_ip(self):\n self._test(\n headers=[\n (\":path\", \"/\"),\n (\"host\", \"127.0.0.1\"),\n ],\n expected_warning=WARN_IP_ADDR,\n )", "def testIPv6(self):\n self.assertEqual([\"http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia\"], grab('foo http://[2001:a68:104:1337:250:daff:fe72:871c]/toimia', self.needScheme))", "def test_discard_first(self):\n test_length = random.randint(0,100)\n test_string = \"#\\t{0}\".format(\"\\t\".join(map(str, xrange(test_length))))\n expected = test_length\n computed = len(self.parser.parse_header(test_string, extract_mock))\n self.assertEquals(expected, computed)", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {\"http.server_name\": hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)", "def test_response_ok():\n from server import response_ok\n assert response_ok().split(b'\\r\\n')[0] == b'HTTP/1.1 %s' % OK_200", "def test_host_header(self):\n hostname = b\"server_name_1\"\n\n def update_expected_server(expected):\n expected[3][\"attributes\"].update(\n {SpanAttributes.HTTP_SERVER_NAME: hostname.decode(\"utf8\")}\n )\n return expected\n\n self.scope[\"headers\"].append([b\"host\", hostname])\n app = otel_asgi.OpenTelemetryMiddleware(simple_asgi)\n self.seed_app(app)\n self.send_default_request()\n outputs = self.get_all_output()\n self.validate_outputs(outputs, modifiers=[update_expected_server])", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_specific_url_is_used_for_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n\n url = self.httpbin.client[\"get_my_headers\"][\"url\"]\n self.assertIn(url, req.prepared_request.url)" ]
[ "0.6413071", "0.6368929", "0.63600814", "0.6126467", "0.6093351", "0.6008764", "0.597977", "0.59638256", "0.5935303", "0.5915244", "0.59103775", "0.588814", "0.58332074", "0.5819531", "0.58032525", "0.57846427", "0.5782095", "0.57428664", "0.5648308", "0.5636194", "0.5635823", "0.5607141", "0.55981106", "0.55842924", "0.55680764", "0.55322903", "0.5489046", "0.54722", "0.54717106", "0.54137605" ]
0.72213775
0
Extract metadata like original image name and crop position from the given file name. Change this function to use a different file name pattern.
def get_metadata_from_filename(file_name: str) -> namedtuple: if os.path.isabs(f): file_name = os.path.basename(file_name) original_image_name = file_name.split('-')[0] x_pos = int(file_name.split('.')[-2].split('+')[-2:][0]) Metadata = namedtuple('Metadata', ['original_image_name', 'x_pos']) return Metadata(original_image_name, x_pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseFilename(fileName):\n # regex to match names like Axis-BaldCA_2018-05-29T16_02_30_129496.jpg\n # and bm-n-mobo-c__2017-06-25z11;53;33.jpg\n regexExpanded = '([A-Za-z0-9-_]+[^_])_+(\\d{4}-\\d\\d-\\d\\d)T(\\d\\d)[_;](\\d\\d)[_;](\\d\\d)'\n # regex to match diff minutes spec for subtracted images\n regexDiff = '(_Diff(\\d+))?'\n # regex to match optional crop information e.g., Axis-Cowles_2019-02-19T16;23;49_Crop_270x521x569x820.jpg\n regexOptionalCrop = '(_Crop_(-?\\d+)x(-?\\d+)x(\\d+)x(\\d+))?'\n matchesExp = re.findall(regexExpanded + regexDiff + regexOptionalCrop, fileName)\n # regex to match names like 1499546263.jpg\n regexUnixTime = '(1\\d{9})'\n matchesUnix = re.findall(regexUnixTime + regexDiff + regexOptionalCrop, fileName)\n cropInfo = None\n if len(matchesExp) == 1:\n match = matchesExp[0]\n parsed = {\n 'cameraID': match[0],\n 'date': match[1],\n 'hours': match[2],\n 'minutes': match[3],\n 'seconds': match[4]\n }\n isoStr = '{date}T{hour}:{min}:{sec}'.format(date=parsed['date'],hour=parsed['hours'],min=parsed['minutes'],sec=parsed['seconds'])\n dt = dateutil.parser.parse(isoStr)\n unixTime = int(dt.timestamp())\n parsed['diffMinutes'] = int(match[6] or 0)\n cropInfo = match[-4:]\n elif len(matchesUnix) == 1:\n match = matchesUnix[0]\n unixTime = int(match[0])\n dt = datetime.datetime.fromtimestamp(unixTime)\n isoStr = datetime.datetime.fromtimestamp(unixTime).isoformat()\n parsed = {\n 'cameraID': 'UNKNOWN_' + fileName,\n 'date': dt.date().isoformat(),\n 'hours': str(dt.hour),\n 'minutes': str(dt.minute),\n 'seconds': str(dt.second)\n }\n parsed['diffMinutes'] = int(match[2] or 0)\n cropInfo = match[-4:]\n else:\n logging.error('Failed to parse name %s', fileName)\n return None\n if cropInfo[0]:\n parsed['minX'] = int(cropInfo[0])\n parsed['minY'] = int(cropInfo[1])\n parsed['maxX'] = int(cropInfo[2])\n parsed['maxY'] = int(cropInfo[3])\n parsed['isoStr'] = isoStr\n parsed['unixTime'] = int(unixTime)\n return parsed", "def repackFileName(parsedName):\n cropCoords = None\n if 'minX' in parsedName:\n cropCoords=(parsedName['minX'], parsedName['minY'], parsedName['maxX'], parsedName['maxY'])\n return getImgPath('', parsedName['cameraID'], parsedName['unixTime'],\n cropCoords=cropCoords,\n diffMinutes=parsedName['diffMinutes'])", "def parse_crop_details(fn, crop_name, crop_parent):\n if crop_name is None:\n if fn is None:\n raise ValueError(\"Either `fn` or `crop_name` must be give.\")\n crop_name = _get_fn_name(fn)\n\n crop_parent = crop_parent if crop_parent is not None else os.getcwd()\n crop_location = os.path.join(crop_parent, \".xyz-{}\".format(crop_name))\n\n return crop_location, crop_name, crop_parent", "def extract_metadata(name):\n seps = name.count(\" - \")\n artist = title = None\n\n if seps == 1:\n\n pos = name.find(\" - \")\n artist = name[:pos].strip()\n title = name[pos + 3:].strip()\n\n else:\n title = name.strip()\n\n return dict(artist=artist, title=title)", "def file_info(file_name, file_pattern):\n match = re.compile(file_pattern).match(file_name)\n if match:\n basepath = match.group('basepath')\n sensor = match.group('sensor')\n ax = match.group('ax')\n freq = match.group('freq')\n date = match.group('date')\n return basepath, sensor, ax, freq, date\n else:\n return None # there is no file extension to file_name", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }", "def parse_image_filename(filename):\n\n # regexes\n starts_with_six_digits = re.compile(r'^\\d{6}')\n capital_letter = re.compile(r'([A-Z]{1})')\n plus = re.compile(r'\\+')\n\n # split the filename and extention\n filename, extension = os.path.splitext(filename)\n try:\n style_number, color, description = filename.split('_')\n except Exception as e:\n print(e)\n print(filename, extension)\n\n style_number = int(style_number)\n\n # decode the color\n # intCaps -> int/caps\n color = capital_letter.sub(r'/\\1', color).lower()\n # plus+to+space -> plus to space\n color = plus.sub(r' ', color)\n\n # decode the description\n description = plus.sub(r' ', description)\n\n return style_number, color, description", "def _parse_h36m_imgname(imgname) -> Tuple[str, str, str]:\n subj, rest = osp.basename(imgname).split('_', 1)\n action, rest = rest.split('.', 1)\n camera, rest = rest.split('_', 1)\n return subj, action, camera", "def test_get_image_name(self):\n ssp = self._get_ssp_stor()\n\n def verify_image_name(name, checksum, expected):\n img_meta = image_meta.ImageMeta(name=name, checksum=checksum)\n self.assertEqual(expected, ssp._get_image_name(img_meta))\n self.assertTrue(len(expected) <= const.MaxLen.FILENAME_DEFAULT)\n\n verify_image_name('foo', 'bar', 'image_foo_bar')\n # Ensure a really long name gets truncated properly. Note also '-'\n # chars are sanitized.\n verify_image_name(\n 'Template_zw82enbix_PowerVM-CI-18y2385y9123785192364',\n 'b518a8ba2b152b5607aceb5703fac072',\n 'image_Template_zw82enbix_PowerVM_CI_18y2385y91'\n '_b518a8ba2b152b5607aceb5703fac072')", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx", "def parse_file_name(file_name):\n\n elements = file_name.split(\"_\")\n if file_name.find(\"_VI_\") > 0:\n client = elements[0]\n capture_range = \"R1\"\n condition = elements[2]\n polarization = \"VIS\"\n shot = elements[4]\n modality = \"VIS\"\n else:\n client = elements[0]\n capture_range = elements[1]\n condition = elements[2]\n polarization = elements[3]\n shot = elements[4]\n modality = \"THERMAL\"\n \n return client, capture_range, condition, polarization, shot, modality", "def extract_date_metadata(fname):\n\n try:\n # check if file has creation date, exception if not\n date_metadata = fileops.get_video_creation_date_metadata(fname)\n\n # extract the date/time string from metadata, exception if\n # not the proper format\n datetimestr = metadata_to_datetimestr(date_metadata)\n\n logging.debug(\"Found creation date metadata %r for file %r\",\n datetimestr, os.path.basename(fname))\n\n return datetimestr\n\n except fileops.VideoMetadataError:\n logging.warning(\n \"%r does not have a proper creation date metadata\",\n os.path.basename(fname))\n\n return \"\"\n\n except DateStrError:\n logging.warning(\n \"%r creation data metadata not the right format\",\n os.path.basename(fname))\n \n return \"\"", "def _get_python_info_rename(path: str) -> str:\n if path.name.endswith(\".egg-info\"):\n f = \"PKG-INFO\"\n else:\n # Assume dist-info. Are there other options?\n f = \"METADATA\"\n pkgmetainfodata = path / f\n with pkgmetainfodata.open() as f:\n for line in f:\n match = re.match(r'^Name: ([A-Z-a-z].+)', line)\n if match:\n name = match.group(1)\n break\n if not line.strip():\n # First blank line; gone too far; give up\n return\n else:\n return\n return name + path.suffix", "def extract_filename(str):\n regex = r\"([0-9_-]+).jpg\"\n matches = re.search(regex, str)\n if matches:\n return matches.group(1)", "def get_preset_metadata(self, filename):\r\n\r\n raise NotImplementedError", "def get_data_from_name(image_name):\n nome = image_name.split(\".\")[0]\n nome_recebido = list(nome)\n ano = ''.join(nome_recebido[:4])\n mes = ''.join(nome_recebido[4:6])\n dia = ''.join(nome_recebido[6:8])\n hora = ''.join(nome_recebido[8:10])\n minuto = ''.join(nome_recebido[10:12])\n segundo = ''.join(nome_recebido[12:14])\n codigo = ''.join(nome_recebido[14:24])\n certeza = ''.join(nome_recebido[24:27])\n placa = ''.join(nome_recebido[27:34])\n posicao = ''.join(nome_recebido[34])\n classificao = ''.join(nome_recebido[35:37])\n velocidade = ''.join(nome_recebido[37:40])\n comprimento = ''.join(nome_recebido[40:43])\n sequencial = ''.join(nome_recebido[43:])\n\n return [ano, mes, dia, hora, minuto, segundo, codigo, certeza, placa, posicao, classificao, velocidade, comprimento,\n sequencial]", "def extract_description(path):\n return os.path.splitext(os.path.basename(path))[0]", "def test_get_original_file_name_match_regex(self):\n test_file_name = \"uploaded_file_name_%s_abcd123\" % settings.FILE_DUPLICATION_MARKER\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def LoadMetadata(filename):\r\n## print filename\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.zvi'))\r\n if globbed:\r\n return LoadZVIMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'*.xml'))\r\n if globbed:\r\n return LoadAxioVisionXMLMetaData(globbed[0])\r\n globbed=glob.glob(os.path.join(os.path.dirname(filename),'metadata.txt'))\r\n if globbed:\r\n return LoadMMMetaData(globbed[0])\r\n return None\r\n #no further valid options, crash horribly\r", "def read_photo_date(file_name):\n # Open image file for reading (binary mode)\n fd = open(file_name, 'rb')\n\n # Return Exif tags\n tags = exifread.process_file(fd)\n try:\n date_time = tags['EXIF DateTimeOriginal']\n except KeyError:\n date_time = get_timestamp_from_mp4(os.path.basename(file_name))\n if date_time == \"\":\n # date time info is not valid in exif, try to get file's create time\n date_time = get_file_modification_time(file_name)\n \n\n log(str(date_time) + \"--->\" + str(file_name))\n\n #parse date time string and returns tuple\n words = str(date_time).split(' ')[0].split(':') #2013:11:16 17:44:16\n if len(words) == 3:\n y = words[0]\n m = words[1]\n d = words[2]\n else:\n words = str(date_time).split(' ')[0].split('-') # 2015-01-08 16:05:13\n y = words[0]\n m = words[1]\n d = words[2]\n\n #returns a tuple\n return y, m, d", "def extractParticular(link):\n webpage = openWebsite(link).read()\n nameIndexStart = webpage.index('<title>') + 7\n nameIndexStop = webpage[nameIndexStart:].index('</title>') + nameIndexStart - 1\n name = webpage[nameIndexStart : nameIndexStop].split('-')[0]\n name = \" \".join(name.split())\n name = re.sub('/', '', name)\n\n avatarName = RESTAURANTPATH + '{}.png'.format(\"\".join(name.split()).lower())\n captureImage(link, avatarName)\n\n return name, avatarName", "def extract_metadata_videoname(basename):\n # basename could be a path to a bb video file or just the basename.\n # TODO(gitmirgut): Check if in data from 2015 is in the same string format.\n fn_wo_ext = os.path.splitext(os.path.basename(basename))[0]\n id_str, interval_str = fn_wo_ext.split('_')[1:]\n start_str, end_str = interval_str.split('--')\n id_int = int(id_str)\n start_ts = iso8601.parse_date(start_str)\n end_ts = iso8601.parse_date(end_str)\n series = pd.Series([id_int, start_ts, end_ts],\n index=['cam_id', 'start_ts', 'end_ts'])\n return series", "def reFileName(str_):\n rv = 'None', str_\n m = re.match(r'((?:[a-zA-Z0-9-]){4,})_(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n else:\n m = re.match(r'(\\d+-\\d+)\\.-\\.(.*)$', str_)\n if m:\n rv = m.group(1), m.group(2)\n return rv", "def _extract_metadata(self) -> None:\n self.log(\"Extracting metadata.\")\n image_paths: list[Path] = []\n for ext in (\"jpg\", \"jpeg\", \"png\"):\n image_paths.extend(self._base_dir.glob(f\"**/*.{ext}\"))\n image_paths_str = [str(image.relative_to(self._base_dir)) for image in image_paths]\n filepaths = pd.Series(image_paths_str)\n metadata = cast(\n pd.DataFrame,\n filepaths.str.split(\"/\", expand=True).rename( # type: ignore[attr-defined]\n columns={0: \"superclass\", 1: \"concept\", 2: \"context\", 3: \"filename\"}\n ),\n )\n metadata[\"filepath\"] = filepaths\n metadata.sort_index(axis=1, inplace=True)\n metadata.sort_values(by=[\"filepath\"], axis=0, inplace=True)\n metadata = self._label_encode_metadata(metadata)\n metadata.to_csv(self._metadata_path)", "def test_jpeg_exif(h, f):\n if h[6:10].lower() == 'exif':\n return 'jpeg'", "def img_in(filename):\n temp_img = Image.open(filename)\n img = np.array(temp_img)\n name = filename.split('.')[-2]\n return name, img" ]
[ "0.7071573", "0.66357124", "0.64707863", "0.6266383", "0.61719465", "0.6033898", "0.600292", "0.59774214", "0.59133536", "0.5879702", "0.5877382", "0.5843988", "0.5837125", "0.5832479", "0.58029586", "0.57413375", "0.5720259", "0.57090443", "0.5669945", "0.5668011", "0.5648759", "0.5648031", "0.5572095", "0.5543046", "0.55327857", "0.55228084", "0.5515169", "0.5507461", "0.55067086", "0.55063075" ]
0.6781023
1
Insert the crop represented by file_name into this image.
def insert(self, file_path: str, annot_type: str) -> None: if self._valid_file_name_regex.match(os.path.basename(file_path)) is None: raise ValueError(f'Illegal file name: {os.path.basename(file_path)}') x_pos = get_metadata_from_filename(file_path).x_pos if x_pos in self._x_positions: col = self._cols[x_pos] else: col = Column() self._x_positions.append(x_pos) self._x_positions.sort() col.insert(Crop(file_path, annot_type)) self._cols[x_pos] = col self.n_cols = len(self._cols)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_current(self, image_name):\n # Sets the position of the crop\n self.j ,self.i = 0, 0\n\n # loads the image\n self.image = convert2int(tifffile.imread(image_name)).astype(numpy.float32)\n\n # Computes the number of crops in x and y\n self.ny = numpy.ceil(self.image.shape[0] / self.step)\n self.nx = numpy.ceil(self.image.shape[1] / self.step)\n\n # rescale the image\n self.image -= self.image_min\n self.image /= (0.8 * (self.image_max - self.image_min))\n self.image = numpy.clip(self.image, 0, 1)", "def generateMask(self, nameFile): \n imgPath = os.path.join(GG.utils.PATH_PHOTO_MASK, nameFile)\n imgMask = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\")))\n imgTemplate = Image.open(GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"template.png\")))\n imgUpload = Image.open(imgPath)\n size = MASK_SIZE[self.avatarConfiguration[\"headSize\"]]\n imgUploadResized = imgUpload.resize(size, Image.ANTIALIAS)\n imgMask.paste(imgUploadResized, MASK_COORD[self.avatarConfiguration[\"headSize\"]], imgTemplate)\n imgMask.save(MASK_UPLOAD)\n self.avatarConfiguration[\"mask\"] = \"imgUploadMask.png\"\n self.paintMask()", "def insert_in_tree(self, pic_name, pic_num, crop_num, is_crop=False):\n \n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n \n # insert the picture/crop name in column 0\n if (is_crop == False):\n myiter = self.tree_store.append(None, None)\n if crop.available == True:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n else:\n self.tree_store.set_value(myiter, \\\n 0, '<span foreground=\"#A0A0A0\"><b>' + pic_name + '</b></span>')\n elif (is_crop == True):\n #determine iter that points to row containing pic_num\n # in column 1\n parent = None\n for i in range(0, len(self.tree_store)):\n if (pic_num == self.tree_store[i][1]):\n #found the parent, insert the child\n parent = self.tree_store[i].iter\n myiter = self.tree_store.append(parent, None)\n self.tree_store.set_value(myiter, 0, '<span foreground=\"#000000\"><b>' + pic_name + '</b></span>')\n break\n # expand the row to show the crop\n self.image_tree.expand_row(self.tree_store.get_path(parent), True)\n\n # fill in the remaining columns\n self.tree_store.set_value(myiter, 1, pic_num)\n self.tree_store.set_value(myiter, 2, crop_num)\n self.tree_store.set_value(myiter, 3, \"0%\")\n \n return myiter", "def _crop(self, fieldname, scale, box):\n croputils = IImageCroppingUtils(self.context)\n data = croputils.get_image_data(fieldname)\n\n original_file = StringIO(data)\n image = PIL.Image.open(original_file)\n image_format = image.format or self.DEFAULT_FORMAT\n\n cropped_image = image.crop(box)\n cropped_image_file = StringIO()\n cropped_image.save(cropped_image_file, image_format, quality=100)\n cropped_image_file.seek(0)\n\n croputils.save_cropped(fieldname, scale, cropped_image_file)\n\n # store crop information in annotations\n self._store(fieldname, scale, box)\n\n # Purge caches if needed\n notify(Purge(self.context))", "def crop_image (filename):\n from PIL import Image\n image = Image.open(filename)\n for edge in 'NSWE':\n image = _crop(image, edge)\n image.save(filename)", "def set_crop(self, crop):\n self.crop = crop", "def crop_image(inputimage, folder, newimgname, xtop=0, ytop=64, xbottom=512, ybottom=448):\n\timg = Image.open(folder + os.sep + inputimage)\n\timg = img.crop((xtop, ytop, xbottom, ybottom))\n\timg.save(folder + os.sep + newimgname, 'PNG')", "def _copy_image(self, name):\n image = self._get_image(name)\n QtGui.QApplication.clipboard().setImage(image)", "def add_transect_file(self, file_name: str):\n # Create a transect dict\n #transect = {\n # 'Path': file_path,\n # 'File': file_name,\n # 'Number': index,\n #}\n\n # Add the transect to the file\n self.Files.append(file_name)", "def _generate_crop(self):\n if self.box_drawn == True:\n if (self.cd_pic_num != -1) & (self.cd_crop_num == 1):\n self.communicator.generate_crop(picture_num=self.cd_pic_num, \\\n xa=self.xa, ya=self.ya, xb=self.xb, yb=self.yb)\n else:\n print \"ERROR: can only generate a new crop from a thumbnail\"\n else:\n print \"ERROR: please select an area to generate a crop from\"", "def set_cropping(self, crop=True):\n self._crop = crop\n self._final = None # Force rebuild", "def add_image(self, file_name, content):\n self.face_fs.put(content, filename=file_name)", "def process(file_name):\n img=Image.open(str(file_name))\n cim_resized = img.resize((40,40), resample=Image.LANCZOS)\n n = cim_resized.convert('L')\n cropped = np.array(n).astype(np.float64)\n im=Image.fromarray(cropped)\n im.show()\n normalized_cropped_image = cropped - np.mean(cropped)\n normalized_cropped_image = normalized_cropped_image.reshape((-1, image_size, image_size, num_channels)).astype(np.float32)\n predicted_arr = predict(normalized_cropped_image)\n label = ''.join(['' if int(x[0]) == 10 else str(x[0]) for x in list(predicted_arr)])\n print 'LABEL: ' + label", "def crop_image(filename, n):\n image = SimpleImage(filename)\n width = image.width\n new_width = width - (2 * n)\n height = image.height\n new_height = height - (2 * n)\n image_crop_width = SimpleImage.blank(new_width, height)\n for y in range(height):\n for x in range(new_width):\n pixel = image.get_pixel((x + n), y)\n image_crop_width.set_pixel(x, y, pixel)\n image_crop_width.show()\n\n image_crop_height = SimpleImage.blank(width, new_height)\n for y in range(new_height):\n for x in range(width):\n pixel = image.get_pixel(x, y + n)\n image_crop_height.set_pixel(x, y, pixel)\n image_crop_height.show()\n\n image_crop_width_height = SimpleImage.blank(new_width, new_height)\n for y in range(new_height):\n for x in range(new_width):\n pixel = image.get_pixel(x + n, y + n)\n image_crop_width_height.set_pixel(x, y, pixel)\n image_crop_width_height.show()", "def append(self, filename):\n\n self.db.single_insert_camera(filename)\n self.db.batch_insert_camera(filename)", "def crop(image, size):\n size = size.split('x')\n (root, name, ext) = split_filepath(image.path)\n filename = scale_image(image.path, (int(size[0]), int(size[1])), 'crop')\n return '/%s/%s' % (os.path.abspath(root).replace('%s/' % os.path.abspath(settings.BASE_PATH), ''), filename)", "def clip(self):\n \n subprocess.call(['gdaltindex', self.extent, self.referenceImagePath])\n dataNames = sorted(glob.glob(self.fullPath + '/full*.tif'))\n splitAt = len(self.fullPath) + 1\n\n for i in range(len(dataNames)):\n x = dataNames[i]\n y = dataNames[i][:splitAt] + dataNames[i][splitAt+4:]\n subprocess.call(['gdalwarp', '-r', 'near', '-cutline', self.extent, '-crop_to_cutline', x, y, '-dstnodata', '9999'])\n \n for n in dataNames:\n os.remove(n)\n dataNames = sorted(glob.glob(self.fullPath + '/*.tif'))\n test = gdal.Open(dataNames[0]).ReadAsArray()\n logger.log('SUCCESS', 'Clipping complete! %d %s files were successfully clipped to the size of %s with dimensions %d rows by %d columns' % (len(dataNames), str(self.outformat), str(self.referenceImagePath), test.shape[0], test.shape[1]))", "def crop_and_save_single(img,crop_height,crop_width,image_save_dir,name,with_label=False):\n\n assert np.mod(img.shape[0], crop_height) == 0\n assert np.mod(img.shape[1], crop_width) == 0\n\n num_row = img.shape[0] #// crop_height\n num_col = img.shape[1] #// crop_width\n crop_img = np.zeros((crop_height, crop_width, 4))\n\n for row in range(0,num_row,crop_height):\n for col in range(0,num_col,crop_width):\n # print(\"row:{}, row+crop height:{}, j: {}, row+cropwidth:{}\".format(row,row+crop_height,col,col+crop_width))\n crop_img = img[row:row+crop_height, col:col+crop_width, :]\n\n # out_name = img_name[:-4] + '_' + \\\n out_name = name + '_' + \\\n str(num_col) + '_' + str(row).zfill(2) + \\\n '_' + str(col).zfill(2)+'.png'\n\n # if with_label:\n # label_name = \"/\"+str(index) + \"_\" + date_time + \"_label\"\n # crop_3_ch = crop_img[:,:,:3] # if cropping a labeled image\n # crop_label = crop_img[:,:,-1] # if cropping a labeled image\n # PIL_crop_label = Image.fromarray(crop_label.astype(np.uint8))\n # # PIL_crop_label.save(save_dir[1]+\"_label_\"+out_name) # if cropping a labeled image\n\n PIL_crop = Image.fromarray(crop_img[:,:,:3].astype(np.uint8))\n # if with_label:\n # # return PIL_crop,PIL_crop_label\n # # return PIL_crop\n PIL_crop.save(image_save_dir+\"/\"+out_name)", "def add_image(self, image_file_name):\n # check\n if os.path.exists(image_file_name) is False:\n raise NotImplementedError(\"Image file %s does not exist.\" % image_file_name)\n\n self._myCanvas.add_image_file(image_file_name)\n\n return", "def Save_Image_Crop(img, x, y, width, height, filename = None, path = 'Predictions'):\n img = img[y:y+height, x:x+width,:]\n\n if filename is not None:\n try: \n os.mkdir(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img)\n plt.tight_layout()\n plt.savefig(path + '/' + filename + '_Crop.png')", "def insert_file(wrd, doc, filename: str) -> None:\n doc.Content.Select()\n wrd.Selection.Collapse(0)\n wrd.Selection.InsertFile(filename)", "def display_cropped_img(i):\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)", "def crop_image(self, img):\n img.crop_image(self._center, 1.1 * self._radius)", "def add_file_to_clean(self, filename):\n self.files_to_clean.add(filename)", "def _image_paste(self, image, dest_image, pos_x, pos_y):\n dest_image.paste(image, (pos_x, pos_y))", "def crop(self, *args, **kwargs):\n return _image.image_crop(self, *args, **kwargs)", "def update_movie(self, file_name):\n try:\n pix = QPixmap(file_name)\n self.cur_imageRect['width'] = pix.width()\n self.cur_imageRect['height'] = pix.height()\n if self.isFullScreen():\n width = self.screen_width\n height = self.screen_height\n padding_left = 0\n padding_top = 0\n else:\n width = 1000\n height = 450\n padding_left = 40\n padding_top = 50\n scale = min(width / pix.width(), height / pix.height())\n self.video_label.setGeometry(padding_left, padding_top, pix.width() * scale, pix.height() * scale)\n self.video_label.clear()\n self.video_label.setPixmap(pix)\n except:\n pass\n os.remove(file_name)", "def _pasteFile(self) -> None:\n if not self._fileClipboard:\n return\n cut = self._fileClipboard.pop()\n filenames = [x.name for x in self._fileClipboard]\n destPaths = [self._currPath.joinpath(x) for x in filenames]\n try:\n duplicates = []\n for src, dest in zip(self._fileClipboard, destPaths):\n if src == dest:\n raise shutil.SameFileError\n if dest in self._currPath.glob('*'):\n duplicates.append(dest)\n if duplicates:\n if self._overwriteFileMsgBox(duplicates) == QMessageBox.Cancel:\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n return\n for src, dest in zip(self._fileClipboard, destPaths):\n if cut and src.is_file():\n shutil.move(str(src), str(dest))\n elif src.is_dir():\n dir_util.copy_tree(str(src), str(dest))\n if cut:\n shutil.rmtree(src)\n elif src.is_file():\n shutil.copy(str(src), str(dest))\n elif not src.exists():\n raise FileNotFoundError\n self._statusBar.showMessage('File pasted!', 3000)\n self._fileClipboard.clear()\n self._pasteFileAction.setEnabled(False)\n except shutil.SameFileError:\n self._statusBar.showMessage('You cannot overwrite the same file!', 3000)\n self._fileClipboard.clear()\n except PermissionError:\n self._statusBar.showMessage('No permission to copy the file!', 3000)\n self._fileClipboard.clear()\n except FileNotFoundError:\n self._statusBar.showMessage('Cannot find the source file!', 3000)\n self._fileClipboard.clear()\n finally:\n self._listDirectories()", "def crop(self, coords):\n pass", "def crop_image(image_to_crop, year):\r\n\timg = Image.open(image_to_crop)\r\n\t#The dimensions of just the US in the image\r\n\timg = img.crop((80, 240, 800, 615))\r\n\r\n\tfile_destination = \"images/cropped_images/\" + str(year) + \".png\"\r\n\r\n\timage_file = open(file_destination, 'wb')\r\n\timg.save(image_file, 'png')\r\n\timage_file.close()" ]
[ "0.60004956", "0.5452988", "0.53952855", "0.53464556", "0.53207326", "0.52505255", "0.5239172", "0.518678", "0.5163597", "0.5147501", "0.5141308", "0.513236", "0.5121384", "0.5045127", "0.5008329", "0.50027025", "0.49967998", "0.4968934", "0.49625248", "0.4922145", "0.4885444", "0.48595783", "0.48428452", "0.48388302", "0.47980636", "0.4796322", "0.4778058", "0.47633728", "0.47617763", "0.4761282" ]
0.56664383
1
Remove unlabelled columns in [startcol_width, end+col_width].
def _remove_overlaps(start, end) -> int: start = self._x_positions[start % self.n_cols] end = self._x_positions[int(end) % self.n_cols] n_removed = 0 for x, col in self._cols.items(): if start - self.col_width <= x <= start or end <= x <= end + self.col_width: if col.label is None: n_removed += col.mark_as('ignore') return n_removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cols_drop():", "def CleanUp(self):\n blankColumnPattern = re.compile('^-*$')\n blankColumns = []\n for columnIndex in range(self.alignment.get_alignment_length() - 1):\n columnValues = self.alignment[:,columnIndex]\n match = blankColumnPattern.search(columnValues)\n if (match):\n blankColumns.append(str(columnIndex))\n for column in blankColumns[::-1]:\n self.DeleteRange(',' + str(column), True)\n self.Show(self.displayedColumn)\n self.BackupAlignment()", "def strip_left_cols(df, cols_to_strip):\n columnss = df.columns\n return df[columns[cols_to_strip:]]", "def remove_insertion_columns(self):\n cols = self.get_insertion_columns()\n s = []\n a = 0\n for b in cols:\n if b > a:\n s.append((a, b))\n a = b + 1\n s.append((a, len(self.col_labels)))\n for name, seq in list(self.items()):\n news = []\n for c in s:\n news.append(seq[c[0]:c[1]])\n self[name] = \"\".join(news)", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def clean(df):", "def get_cols_dummy():", "def remove_below_lower_length_limit(self) -> None:\n for column_name in self.data:\n threshold_executor = TrimUtils.remove_text_below_lower_length_threshold(\n self.config[f'{column_name}_lower_length_limit']\n )\n self.data = self.data[self.data[column_name].map(threshold_executor)]\n self.data.reset_index(drop=True, inplace=True)", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset", "def truncate_labels(labels):\n def do_one_row(row):\n erase = False\n for i, _ in enumerate(row):\n if erase:\n row[i] = -1\n else:\n if row[i] == 10:\n erase = True\n return row\n\n ret = np.copy(labels)\n ret = repair_labels(ret)\n return np.apply_along_axis(do_one_row, axis=1, arr=ret)", "def delete_padded_rows(data, labels, n_dimensions):\n labels = np.repeat(labels, data.shape[1])\n data = data.reshape(-1, n_dimensions)\n added_rows = np.where(np.all(data == 0, axis=1))\n data = data[~added_rows[0]]\n labels = labels[~added_rows[0]]\n\n return data, labels", "def remove_empty_columns(aln, enforce_codon=False):\n\n ind = []\n seqs = aln.values()\n alnlen = aln.alignlen()\n\n if not enforce_codon:\n for i in range(alnlen):\n for seq in seqs:\n if seq[i] != \"-\":\n ind.append(i)\n break\n else:\n if alnlen % 3 != 0:\n raise Exception(\n \"cannot set enforce_codon if alignment length \"\n \"is not a multiple of three\")\n\n for i in range(0, alnlen, 3):\n for seq in seqs:\n if seq[i:i+3] != \"---\":\n ind.extend([i, i+1, i+2])\n break\n\n return subalign(aln, ind)", "def clear_columns(prefixlist,datas):\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n ccc=[c.lower() for c in ccc]\n \n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas.rename(columns=d,inplace=True)\n\n u, i = np.unique(datas.columns, return_index=True)\n y=u[np.argsort(i)] \n \n r=[datas.columns.tolist().index(rr)for rr in y]\n\n return datas.iloc[:, r]", "def remove_colspan(self, ):\n if self.AttributeNames.COLSPAN in self.attrs:\n del self.attrs[self.AttributeNames.COLSPAN]\n return self", "def _set_columns(self, start, end):\n if start <= end <= self.width:\n self._write(ST7789_CASET, _encode_pos(\n start+self.xstart, end + self.xstart))", "def remove_gapped_columns(aln):\n cols = zip(* aln.values())\n ind = util.find(lambda col: \"-\" not in col, cols)\n return subalign(aln, ind)", "def fitCols(self, col_start, col_end, sheet):\r\n col_n = col_start\r\n while col_n <= col_end:\r\n self.fitCol(col_n, sheet)\r\n col_n = col_n + 1", "def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states", "def get_empty_columns(\n dc_input: deepconsensus_pb2.DeepConsensusInput) -> List[int]:\n columns_to_remove = []\n for i in range(len(dc_input.subreads[0].bases)):\n all_internal_gaps = True\n for subread in dc_input.subreads:\n if subread.bases[i] != dc_constants.GAP_OR_PAD:\n all_internal_gaps = False\n break\n if all_internal_gaps:\n columns_to_remove.append(i)\n return columns_to_remove", "def test_structural_remove_columns_all_1_0(self):\n cp = Plotter.from_smiles(['CCCC', 'CCCC'], sim_type=\"structural\")\n self.assertTrue(cp._Plotter__df_descriptors.empty)", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated", "def exclude_cols(self, *_, **__) -> Tuple[str, ...]:", "def smooth_columns(input_frame):\n column_labels = list(input_frame.columns)\n input_frame.columns = [c.lower().replace('_','') for c in column_labels]\n return input_frame", "def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated", "def clear_columns(prefixlist,datas,style=0, inplace=False):\n func = {0: str.lower,\n 1: str.upper,\n 2: str.capitalize}\n\n ori_columns=datas.columns.tolist()\n ccc=rem_str(prefixlist,ori_columns)\n ccc=rem_str('_',ccc)\n# ccc=[c.lower() for c in ccc]\n ccc=[func[style](c) for c in ccc]\n\n d = {key: value for (key, value) in zip(ori_columns,ccc)}\n datas_renamed=datas.rename(columns=d,inplace=inplace)\n new_datas=datas if inplace else datas_renamed\n\n u, i = np.unique(new_datas.columns, return_index=True)\n y=u[np.argsort(i)]\n\n r=[new_datas.columns.tolist().index(rr)for rr in y]\n\n return new_datas.iloc[:, r]", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def remove_columns(tx, header, columns_to_remove):\n print(\"\\nRemove columns...\")\n num_removed = 0\n for col in columns_to_remove:\n tx = np.delete(tx, col - num_removed, 1)\n header = np.delete(header, col - num_removed + 2)\n num_removed += 1\n print(\"\\n... finished.\")\n return tx, header", "def __clean_repeated_columns(self, df, column_type):\n for column in df.columns:\n if column_type in column.lower():\n # Fill main column with data from \"prefix + _\" type column names.\n df[column_type[:-1]].fillna(df[column], inplace=True)\n # Drop the \"prefix + _\" type column names.\n df.drop(column, axis=1, inplace=True)" ]
[ "0.6347648", "0.60383844", "0.5991607", "0.5967662", "0.57150346", "0.56798846", "0.5672258", "0.5656547", "0.56305486", "0.5617258", "0.55419534", "0.5483048", "0.54566985", "0.54525024", "0.5401275", "0.5390643", "0.5390326", "0.5388381", "0.53292286", "0.53068745", "0.52854407", "0.52827644", "0.52759993", "0.5208304", "0.51816946", "0.51556236", "0.51554507", "0.51490206", "0.5144577", "0.51425093" ]
0.67192227
0
Return index of first unlabelled column after x.
def _next_unlabelled_col(x): for i in range(self.n_cols): idx = (x + i) % self.n_cols x_current = self._x_positions[idx] if self._cols[x_current].label is None: return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XToCol(self, x):\r\n \r\n colLeft = 0\r\n numColumns = self.GetColumnCount()\r\n for col in xrange(numColumns):\r\n \r\n if not self.IsColumnShown(col):\r\n continue \r\n\r\n column = self.GetColumn(col)\r\n\r\n if x < (colLeft + column.GetWidth()):\r\n return col\r\n \r\n colLeft += column.GetWidth()\r\n \r\n return wx.NOT_FOUND", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def getColumn(self, x):\n i = _getIndex(x, self.columnNames)\n return self.data[i]", "def _get_column(self, index):\n left, right = self._get_columns()\n return left if index < left.count else right", "def _get_col(self, idx):\n return self.line[self._fwf.column_slices[idx]]", "def get_undef_cols_idx(x, undef_val):\n undef_col_idx = []\n for col_idx in range(x.shape[1]):\n column = x[:, col_idx]\n if((column == undef_val).all()):\n undef_col_idx.append(col_idx)\n\n return undef_col_idx", "def __get_column(self, index: int) -> int:\n return index % self.columns", "def get_index(self, column):\r\n\r\n\t\treturn self.columns.index(column)", "def get_drop_row(self, x):\n for y in range(self.size_y):\n if self.get_piece_at_opening(x, y) == Piece.NONE:\n return y\n return -1", "def get_colnumber(self, header):\n for i in range(0, len(self.data)):\n if self.data[i][0] == header:\n return i\n return None", "def xy_to_index(x, y):\n index = y * columns + x\n return index", "def getColIdx(self, col):\n try: \n return int(col)\n except:\n return ord(col)-ord('a')", "def getColIdx(self, col):\n try:\n return int(col)\n except:\n return ord(col)-ord('a')", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def canonicalize_column_index(self, line, col):\n if col < 0:\n col += self.col_lens[line] + 1\n assert col >= 0\n return col", "def cells_x(self):\n return self._cells[0]", "def _position_x_to_column(self, x, y):\n col = -1\n if y>self.padding_top and y<self.padding_top+self.len_y_cercles:\n for i in range(self.n_columns):\n if x>self.padding_left+i*63 and x<self.padding_left+i*63+self.diam_cercles:\n col = i+1\n break\n return col", "def get_nearest_index(self, x_value: float) -> int:\n return int(np.argmax(self.x >= x_value))", "def xAt(self, col):\n\n return self.bottomBoard.x + self.bottomBoard.xAt(col)", "def find_col(table, col):\n return table[0].index(col)", "def __column_height(self, x):\n\t\tcolumn = self.board[:, x]\n\t\treturn np.count_nonzero(column)", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")", "def column(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"column\")" ]
[ "0.670367", "0.66707695", "0.65949285", "0.63676727", "0.6300657", "0.62851495", "0.6224062", "0.62041897", "0.61869335", "0.6082615", "0.6070392", "0.6063719", "0.60264426", "0.6016226", "0.59535724", "0.59106576", "0.58775485", "0.5842333", "0.5821943", "0.5775763", "0.57735604", "0.57628155", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636", "0.5743636" ]
0.84793603
0
Move the file associated with this crop to the directory path/annot_type, where annot_type is this crop's annotation type.
def move_to(self, path: str) -> None: self._new_path = os.path.join(path, self.annot_type, os.path.basename(self._file_path)) os.rename(self._file_path, self._new_path) self._file_was_moved = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveFile(self, srcPath):\n # Gets the classification for the file type of the path moved\n classification = self.classifyFile(srcPath)\n\n if classification:\n # Gets the output path given the file type\n newPath = self.outPaths[classification][\"outPath\"] + srcPath.split(\"/\")[-1]\n\n # Execute instruction\n os.replace(srcPath, newPath)", "def move_file(self, ctx):\n pass", "def move(self, **kwargs):\n if os.path.exists(self.old_artifact_path):\n if os.path.exists(self.target):\n shutil.rmtree(self.target)\n log.info(\"Copying %s on the local filesystem\" % self.type)\n shutil.copytree(self.old_artifact_path, self.target)\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def insert(self, file_path: str, annot_type: str) -> None:\n if self._valid_file_name_regex.match(os.path.basename(file_path)) is None:\n raise ValueError(f'Illegal file name: {os.path.basename(file_path)}')\n x_pos = get_metadata_from_filename(file_path).x_pos\n if x_pos in self._x_positions:\n col = self._cols[x_pos]\n else:\n col = Column()\n self._x_positions.append(x_pos)\n self._x_positions.sort()\n col.insert(Crop(file_path, annot_type))\n self._cols[x_pos] = col\n\n self.n_cols = len(self._cols)", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def move_file(path):\n new_path = os.path.join(TEST_DIR, TEST_FILE)\n command = ['mv', TEST_FILE, new_path]\n file_operation(path, command)", "def move_file(file, dest_path):\n if os.path.isdir(dest_path):\n shutil.move(file, dest_path)\n else:\n os.mkdir(dest_path)\n shutil.move(file, dest_path)", "def _move(self, in_file, dest):\n dest = os.path.abspath(dest)\n _, in_base_name = os.path.split(in_file)\n dest_parent_dir, _ = os.path.split(dest)\n if os.path.exists(dest):\n out_file = os.path.join(dest, in_base_name)\n else:\n if not os.path.exists(dest_parent_dir):\n os.makedirs(dest_parent_dir)\n out_file = dest\n shutil.move(in_file, dest)\n\n return out_file", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def file_move(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path)}\n\n url, params, headers = self.request(\"/fileops/move\", params)\n\n return self.rest_client.POST(url, params, headers)", "def move(self, newPath):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.move(newPath)\n\t\telse:\n\t\t\tsuper( textureFile, self ).move( newPath )", "def convert_and_move_file (filename, origpath, wavpath, mp4path, mono):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav (filename, name, origpath, wavpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n oldlocation = path.join(origpath, filename)\n newlocation = path.join(mp4path, filename)\n shutil.move(oldlocation, newlocation)", "def move(self,fileName,destDir):\n self.unload(fileName)\n FileInfos.move(self,fileName,destDir)", "def move_file(source, destination):\n shutil.move(source, destination)", "def save(annotation, new_filename, original_path):\n \n destination = \"../../standardized-data/\"\n if os.path.isdir(destination + \"/\" + annotation) == False:\n os.mkdir(destination + \"/\" + annotation)\n print(annotation, \"FOLDER CREATED\")\n if os.path.exists(destination + \"/\" + annotation + \"/\" + new_filename):\n print('FILE EXISTS: DOUBLE CHECK FOR DUPLICATION :', new_filename)\n else:\n shutil.copyfile(original_path, destination + \"/\" + annotation + \"/\" + new_filename)\n return", "def convert_tmpfile(src_file_name:str, dest_path:str):\n src_path = os.path.join(\n current_app.config['UPLOAD_FOLDER'],\n src_file_name\n )\n if not os.path.exists(src_path):\n abort(http.HTTPStatus.BAD_REQUEST, message='raw file not exist')\n pathlib.Path(os.path.dirname(dest_path)).mkdir(parents=True, exist_ok=True)\n shutil.move(src_path, dest_path)", "def move_back(self) -> None:\n if self._file_was_moved:\n os.rename(self._new_path, self._file_path)\n pass", "def move_file(path_from, filename):\n finaldir = getormakedir(settings.UPLOAD_DEST_DIR, filename)\n\n path_to = os.path.join(finaldir, filename)\n\n if not os.path.exists(path_to):\n shutil.copyfile(path_from, path_to)\n if settings.REMOVE_UPLOAD_FILES:\n remove_file(path_from)\n\n return path_to", "def move_files(self, file_dict: Dict[str, List[str]]) -> NoReturn:\n\n for folder in file_dict:\n target_folder = os.path.join(self.out_folder, folder)\n mkdirr(target_folder)\n for file_path in file_dict[folder]:\n annotation_file_name = (\n os.path.basename(file_path)\n .replace(\"png\", \"json\")\n .replace(\"jpg\", \"json\")\n )\n annotation_file_path = os.path.join(\n self.annotation_folder, annotation_file_name\n )\n\n copy_file(file_path, os.path.join(target_folder, DATA_FOLDER))\n copy_file(\n annotation_file_path, os.path.join(target_folder, ANNOTATION_FOLDER)\n )", "def move(self, dry_run: bool) -> int:\n if self.label == 'ignore':\n return 0\n\n file_counter = 0\n for crop in self._content:\n if not dry_run:\n crop.move_to(self.label)\n file_counter += 1\n\n return file_counter", "def act_move_file(self, file_source, file_target):\n try:\n if not os.path.isfile(file_source):\n return\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.move(file_source, file_target)\n #shutil.copy2(file_source, file_target)\n #os.remove(file_source)\n self.logger.debug('%s: Action: <move> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file move: %s -> %s', file_source, file_target)", "def move(self, path):\n self.current_location = (path[1][1], path[1][0])", "def file_move(session, dc_ref, src_file, dst_file):\n LOG.debug(\"Moving file from %(src)s to %(dst)s.\",\n {'src': src_file, 'dst': dst_file})\n vim = session._get_vim()\n move_task = session._call_method(\n session._get_vim(),\n \"MoveDatastoreFile_Task\",\n vim.get_service_content().fileManager,\n sourceName=src_file,\n sourceDatacenter=dc_ref,\n destinationName=dst_file,\n destinationDatacenter=dc_ref)\n session._wait_for_task(move_task)\n LOG.debug(\"File moved\")", "def move(self,fileName,destDir):\n if not os.path.exists(destDir): \n os.makedirs(destDir)\n srcPath = os.path.join(self.dir,fileName)\n destPath = os.path.join(destDir,fileName)\n renameFile(srcPath,destPath)\n self.refresh()", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def moveFile(source, dest):\n try:\n shutil.move(source, dest) \n except IOError as e:\n print (\"Unable to move file. %s\" %(e))", "def movefile(destpath,filename,sourcepath):\n\n\tcommand = 'mv ' + filename + ' ' + destpath\n\t\n\ttry :\n\t\tst = commands.getstatusoutput(command)\n\texcept Exception:\n\t\traise" ]
[ "0.6297892", "0.5993658", "0.58924556", "0.58497065", "0.574657", "0.5685253", "0.5554716", "0.544296", "0.5296529", "0.5167809", "0.5167291", "0.51232123", "0.5116003", "0.51118386", "0.5106717", "0.50523245", "0.5047933", "0.50246215", "0.50095254", "0.5005804", "0.49968418", "0.49850872", "0.49801317", "0.49681452", "0.49163377", "0.4898899", "0.4884747", "0.4865526", "0.48339573", "0.48265603" ]
0.7432932
0
Undo a former file movement by moving the file back to its origin.
def move_back(self) -> None: if self._file_was_moved: os.rename(self._new_path, self._file_path) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "def undo():\n\n try:\n my_file.undo()\n except FileNotFoundError:\n print('No file has been read yet')\n except Exception:\n print('You must make an edit to undo')", "def undo():", "def undo_moves(self):\r\n logging.info(\"Undoing all moves held in records\")\r\n for move in self.record.keys():\r\n logging.debug('Moving {} to {}'.format(move, self.record[move]))\r\n try:\r\n os.rename(move, self.record[move])\r\n os.removedirs(os.path.dirname(move))\r\n except OSError as e:\r\n logging.error('There was an error moving the file {}'.format(move))\r\n logging.error('Error status: {}'.format(e))\r\n logging.info(\"Completed undoing moves\")\r\n try:\r\n os.remove(self.backup)\r\n except OSError as e:\r\n logging.error('There was an error removing the file {}'.format(self.backup))\r\n logging.error('Error status: {}'.format(e))", "def undo(self):\n\n if not self.can_undo():\n print(\"error: trying to undo\")\n return\n\n func = self.undo_gen(self.undo_act())\n func()\n self.position -= 1", "def undo(self) :\n \n raise NotImplementedError()", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def undo(self):\n self.setIndex(self._index-1)", "def __undo(self):\n self.__undo_controller.undo()", "def undo(self):\n self._check_undo_prerequisites()\n self._decrement_history_pointer()\n self._replay_history()", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)", "def undo(self):\n self.cnvImgTest.undoLast()", "def restore_last_undo_point(self):\n self.unload()", "def rollback(self):\n self.stream.seek(0)", "def reset(self):\n self.source.seek(0)\n self.target.seek(0)", "def undoChanges(self):\n Objects.undoChanges(self)\n self.draw()", "def undo_settings(self):\r\n cF.undo_settings()", "def _undo_action(self):\n pass", "def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]", "def undo_move(self, n=1):\n self.state = self.move_history[-n - 1]\n self.positions = self.copy_board(self.state[1])\n # delete all moves between the current state and the restored state\n del self.move_history[-n:]", "def onUndo(self, event):\r\n\t\tself.ActionHistory.Undo()", "def move_file(self, ctx):\n pass", "def undo(self):\n if self.__undo is None: # if we can not undo anymore we raise an error\n raise ControllerException(\"Error!!! Can't undo anymore!!!\\n\")\n else: # otherwise we simply do the swap from the undo list once more\n self.__scramble.swap(self.__undo[0], self.__undo[1], self.__undo[2], self.__undo[3])\n # self.__scramble.inc()\n self.__undo = None # undo becomes None because we don't want the user to do multiple undo operations", "def rewind(self):\n self.seek(0)", "def rewind(f):\n f.seek(0)", "def rewind(self):\n self.seek(0)", "def __editUndo(self):\n self.activeWindow().undo()", "def undo(self):\r\n\r\n if self.done.size() > 0:\r\n command = self.done.pop()\r\n if command[0] == 'add':\r\n uncommand = (('del'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.delete(uncommand[1],\r\n False)\r\n if command[0] == 'del':\r\n uncommand = (('add'),\r\n command[1],\r\n command[2],\r\n command[3])\r\n self.addnew(uncommand[2],\r\n uncommand[3],\r\n False)\r\n if command[0] == 'move':\r\n uncommand = (('move'),\r\n command[2],\r\n command[1])\r\n self.move(uncommand[1],\r\n uncommand[2],\r\n False)\r\n self.undone.add(uncommand)", "def rewind(f):\n\tf.seek(0)" ]
[ "0.6898709", "0.6796202", "0.67440903", "0.66569364", "0.6606794", "0.6522146", "0.6483597", "0.6466735", "0.6456174", "0.6436866", "0.64312315", "0.6424864", "0.63239264", "0.62896913", "0.62687606", "0.61865735", "0.6161903", "0.614128", "0.6138943", "0.6126032", "0.6055876", "0.6041878", "0.6013321", "0.5997303", "0.59900224", "0.59493136", "0.5941243", "0.5939681", "0.5936037", "0.5912631" ]
0.7445615
0
Mark this column with the provided label. Returns number of labelled crops.
def mark_as(self, label: str) -> int: self.label = label return len(self._content) // len(ANNOTATIONS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit(self, label=None):\n self.labels[label] += 1", "def label_index(self, label: Text) -> int:\n count = 0\n for l in self.le.classes_:\n if(l == label):\n return count\n count += 1", "def get_count_by_label(self, label=None):\n if label is None:\n return len(self.data)\n else:\n return sum(1 for d in self.data if d.pred == label)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def label(self):\n return self._label_shape", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUC2_GetCount(self, label)", "def get_label_num(self, *args):\n return _ida_hexrays.ctree_item_t_get_label_num(self, *args)", "def nr_labels(self):\n return None if self.pY is None else self.Y.shape[1]", "def num_labels(self) -> int:\n raise NotImplementedError", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2IUC2_GetCount(self, label)", "def label(self, location, *args, **kwargs):\n\n if isinstance(location, fslimage.Image):\n return self.maskLabel(location, *args, **kwargs)\n else:\n return self.coordLabel(location, *args, **kwargs)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUC3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2IUC2_GetCount(self, label)", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_GetCount(self, label)", "def _select(start, n, label) -> int:\n n_selected = 0\n for i in range(start, int(start + n)):\n x = self._x_positions[i]\n n_selected += self._cols[x].mark_as(label)\n return n_selected", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC3IUC3_GetCount(self, label)", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3IUC3_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUC2ISS2_GetCount(self, label)", "def labelpos(self):\n return self._labelpos", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3ISS3_GetCount(self, label)", "def GetCount(self, label: 'short') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_GetCount(self, label)", "def _alter(self, label):\n altered = np.full(self.n, -1)\n altered[np.where(self.y_train == label)] = +1\n return altered", "def label_counts(rows):\n counts = rows.iloc[:, -1].value_counts()\n return counts", "def label_pos_x_scaled(self):\n return self.label_pos_x * self.photo.aspect_ratio", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)" ]
[ "0.6160278", "0.6008114", "0.5806169", "0.54976237", "0.5418939", "0.5353223", "0.5351463", "0.5350682", "0.52923065", "0.5270436", "0.52658045", "0.52610487", "0.52374464", "0.5227169", "0.5211575", "0.5198103", "0.51884943", "0.5185271", "0.51499337", "0.5137268", "0.51093817", "0.5106107", "0.50990295", "0.50824285", "0.50658983", "0.50600773", "0.5056616", "0.50445646", "0.50445646", "0.50445646" ]
0.6514316
0
Move all files of this column to the corresponding directory, if this column is not labeled to be ignored. Returns number of files moved.
def move(self, dry_run: bool) -> int: if self.label == 'ignore': return 0 file_counter = 0 for crop in self._content: if not dry_run: crop.move_to(self.label) file_counter += 1 return file_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_disk(self, dry_run: bool) -> int:\n file_counter = 0\n for k, col in self._cols.items():\n self._moved_cols.append(k)\n file_counter += col.move(dry_run=dry_run)\n return file_counter", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def _move_image(self, label, ind):\r\n root, file_name = os.path.split(self.df.sorted_in_folder[ind])\r\n # two lines below check if the filepath contains as an ending a folder with the name of one of the labels\r\n # if so, this folder is being cut out of the path\r\n if os.path.split(root)[1] in labels:\r\n root = os.path.split(root)[0]\r\n# output_path = os.path.join(root, label, file_name)\r\n output_path = self.label_dir + '/' + label + '/' + file_name\r\n print(\"file_name =\",file_name)\r\n print(\" %s --> %s\" % (file_name, label))\r\n move(self.df.sorted_in_folder[ind], output_path)\r\n \r\n # keep track that the image location has been changed by putting the new location-path in sorted_in_folder \r\n self.df.loc[ind,'sorted_in_folder'] = output_path\r\n \r\n #####\r", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def move_file(self, ctx):\n pass", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def movedir(self):\n pass", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def move_from_temp_directory(self):", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def move_backups(self, name, source, destination, regex):\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n match = pattern.match(entry)\n if match is None:\n continue\n if name == match.group(1):\n self.logger.debug('Archiving %s', entry)\n path = os.path.join(source, entry)\n result = self.os_rename(path, os.path.join(destination, entry))\n if result != 0:\n return result\n return 0", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def move_file(self, path: PathLike, dest: PathLike, force: bool = False):", "def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)", "def move(self, # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n narg=None, **kw):\n cwd = self.thisdir\n kw.setdefault('cycle', self.fm.settings['wrap_scroll'])\n direction = Direction(kw)\n if 'left' in direction or direction.left() > 0:\n steps = direction.left()\n if narg is not None:\n steps *= narg\n directory = os.path.join(*(['..'] * steps))\n self.thistab.enter_dir(directory)\n self.change_mode('normal')\n\n if not cwd or not cwd.accessible or not cwd.content_loaded:\n return\n\n if 'right' in direction:\n mode = 0\n if narg is not None:\n mode = narg\n tfile = self.thisfile\n selection = self.thistab.get_selection()\n if not self.thistab.enter_dir(tfile) and selection:\n result = self.execute_file(selection, mode=mode)\n if result in (False, ASK_COMMAND):\n self.open_console('open_with ')\n elif direction.vertical() and cwd.files:\n pos_new = direction.move(\n direction=direction.down(),\n override=narg,\n maximum=len(cwd),\n current=cwd.pointer,\n pagesize=self.ui.browser.hei)\n cwd.move(to=pos_new)\n if self.mode == 'visual':\n pos_start = min(self._visual_pos_start, (len(cwd.files) - 1))\n self._visual_move_cycles += direction.move_cycles()\n\n # Haven't cycled\n if self._visual_move_cycles == 0:\n targets = set(cwd.files[min(pos_start, pos_new):(max(pos_start, pos_new) + 1)])\n # Cycled down once\n elif self._visual_move_cycles == 1:\n if pos_new < pos_start:\n targets = set(cwd.files[:(pos_new + 1)] + cwd.files[pos_start:])\n else:\n targets = set(cwd.files)\n # Cycled up once\n elif self._visual_move_cycles == -1:\n if pos_new > pos_start:\n targets = set(cwd.files[:(pos_start + 1)] + cwd.files[pos_new:])\n else:\n targets = set(cwd.files)\n # Cycled more than once\n else:\n targets = set(cwd.files)\n\n # The current selection\n current = set(cwd.marked_items)\n # Set theory anyone?\n if self._visual_reverse:\n for fobj in targets & current:\n cwd.mark_item(fobj, False)\n for fobj in self._previous_selection - current - targets:\n cwd.mark_item(fobj, True)\n else:\n for fobj in targets - current:\n cwd.mark_item(fobj, True)\n for fobj in current - self._previous_selection - targets:\n cwd.mark_item(fobj, False)\n if self.ui.pager.visible:\n self.display_file()", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def convert_dir(self) -> int:\n old_classifiers: List[Classifier] = self.get_entities_by_entity_type(\n self.pack.classifiers, FileType.OLD_CLASSIFIER\n )\n intersection_fields = self.get_classifiers_schema_intersection_fields()\n for old_classifier in old_classifiers:\n self.create_classifier_from_old_classifier(\n old_classifier, intersection_fields\n )\n self.create_mapper_from_old_classifier(old_classifier)\n\n return 0", "def moveFiles(self, fids, pid):\n\n f = self.getFileInfo(fids[0])\n if not f or f.package == pid:\n return False\n if not self.getPackageInfo(pid):\n raise PackageDoesNotExists(pid)\n\n # TODO move real files\n\n self.db.moveFiles(f.package, fids, pid)\n\n return True", "def _move_to_inserted_directory(file_path: str):\n parts = list(Path(file_path).parts)\n parts.insert(-1, 'inserted')\n move(file_path, str(Path(*parts)))", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def on_moved(self, event):\n\n # build the relative source and destination paths\n source_path = event.src_path.replace(self.root_path, \".\")\n destination_path = event.dest_path.replace(self.root_path, '.')\n is_directory = event.is_directory\n\n # propagate the moved event if server connection is established\n if self.protocol.connected:\n self.protocol.send_move_event(is_directory, source_path, destination_path)\n else:\n logging.info(\"Connection with server has not been established, changes will not be propagated.\")", "def putDir(self, inlocaldir, inirodsdir):\n num=0\n utilities.log.info('putDir: Local tree {} into iRODS tree {}'.format(inlocaldir, inirodsdir))\n for root, dirnames, filenames in os.walk(inlocaldir):\n irodsdir = self.assembleIRODScollectionName(root, inlocaldir, inirodsdir)\n irodsColl = self.createSubCollection(newcollection=irodsdir)\n num += self.putFile(root, irodsColl, filenames)\n utilities.log.info('Copied a total of {} files to iRODS'.format(num))\n utilities.log.info('Finished copying dir {} to {} '.format(inlocaldir,inirodsdir))\n return num", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)" ]
[ "0.6540349", "0.6040318", "0.59826165", "0.5923412", "0.5619923", "0.55904424", "0.5581633", "0.54979753", "0.54583585", "0.5416308", "0.53824425", "0.53258795", "0.5291548", "0.52602667", "0.52368546", "0.5232204", "0.52239007", "0.52128977", "0.52018017", "0.5200375", "0.51892644", "0.51365745", "0.5114978", "0.51113755", "0.5096242", "0.50931275", "0.5090273", "0.5087816", "0.50804347", "0.5079122" ]
0.6531521
1
Create metrics of gauge type for filesystem replica link lag, with the local filesystem name, replication direction, remote array name, remote filesystem name and replication status as labels.
def _replica_links_lag(self): for f in self.fb.get_filesystem_replica_links(): self.replica_links_lag.add_metric([f.local_file_system.name, f.direction, f.remote.name, f.remote_file_system.name, f.status], -1 if f.lag is None else f.lag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def recreate_metrics():\n all = monitor_client.list_metric_descriptors(\n project_path, filter_='metric.type=starts_with(\"custom.\")'\n )\n for a in all:\n if \"accumulator\" in str(a) or \"biquery\" in str(a):\n metric_name = monitor_client.metric_descriptor_path(\n settings.PROJECT_ID, a.type\n )\n\n try:\n monitor_client.delete_metric_descriptor(metric_name)\n except Exception as e:\n print(e)\n\n metric_descriptor = {\n \"type\": f\"custom.googleapis.com/{Monitoring.PING}\",\n \"labels\": [\n {\n \"key\": \"operation\",\n \"valueType\": \"STRING\",\n # \"description\": \"Performed operation name\"\n }\n ],\n \"metricKind\": \"GAUGE\",\n \"valueType\": \"DOUBLE\",\n \"unit\": \"items\",\n \"description\": \"Function performed in a loop with hard limit\",\n \"displayName\": \"Repeated Function Execution\",\n }\n\n return monitor_client.create_metric_descriptor(\n settings.PROJECT_ID, metric_descriptor\n )", "def test_gauge(self):\n # Create a metrics with no metric instances\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name, self.gauge_metric_help, self.gauge_metric_type, []\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(len(mf.metric), 0)\n\n # Create it with metrics\n mf = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf, pmp.MetricFamily)\n self.assertEqual(mf.name, self.gauge_metric_name)\n self.assertEqual(mf.help, self.gauge_metric_help)\n self.assertEqual(mf.type, self.gauge_metric_type)\n\n # Create another and check equal\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n self.assertEqual(mf, mf_)\n\n for m in mf_.metric:\n self.assertEqual(m.timestamp_ms, 0)\n\n # Create another with timestamp\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n timestamp=True,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n for m in mf_.metric:\n self.assertNotEqual(m.timestamp_ms, 0)\n\n self.assertNotEqual(mf, mf_)\n\n # Create Gauge with const_labels\n mf_ = pmp.utils.create_metric_family(\n self.gauge_metric_name,\n self.gauge_metric_help,\n self.gauge_metric_type,\n self.gauge_metric_data,\n const_labels=self.const_labels,\n )\n self.assertIsInstance(mf_, pmp.MetricFamily)\n\n # Check that const_label is present in the LabelPair associated\n # with each metric instance.\n for m in mf_.metric:\n labels = [lp.name for lp in m.label]\n self.assertIn(\"app\", labels)\n\n self.assertNotEqual(mf, mf_)\n\n # Check Gauge can be round-tripped through encode and decode\n payload = pmp.encode(mf)\n self.assertIsInstance(payload, bytes)\n _mf = pmp.decode(payload)[0]\n self.assertEqual(mf, _mf)", "def _report_metrics(self, total_bytes, time_delta, num_files):\n # This recreates the gsutil throughput calculation so that metrics are 1:1.\n avg_speed = round(float(total_bytes) / float(time_delta))\n report(\n source_scheme=self._source_scheme,\n destination_scheme=self._destination_scheme,\n num_files=num_files,\n size=total_bytes,\n avg_speed=avg_speed,\n disk_io_time=self._calculate_disk_io())", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def _create_gauge(self, name: str, attributes: Attributes = None):\n otel_safe_name = _get_otel_safe_name(name)\n key = _generate_key_name(name, attributes)\n\n gauge = self.meter.create_observable_gauge(\n name=otel_safe_name,\n callbacks=[partial(self.read_gauge, _generate_key_name(name, attributes))],\n )\n self.map[key] = Observation(DEFAULT_GAUGE_VALUE, attributes)\n\n return gauge", "def check_gauge(params, match):\n gauge_no = match.group(1)\n stats_url = USGS_STATS_URL_TEMPLATE % gauge_no\n graph_url = USGS_GRAPH_URL_TEMPLATE % gauge_no\n\n response = requests.get(stats_url)\n last_measurement = response.text.strip().split(\"\\n\")[-1]\n _, _, _, mtime, tz, cfs, _ = re.split('\\s+', last_measurement)\n\n return lambda_response(None, {\n \"text\": \"Last measurement: %s cfs @ %s %s\" % (cfs, mtime, tz),\n \"attachments\": [{ \"image_url\": graph_url }]\n })", "def replication_info():\n\n def _get_last_packet_name(location, pattern):\n try:\n entries = [os.path.join(location, e) for e in os.listdir(location)]\n except OSError as e:\n logging.warning(e)\n return None\n pattern = re.compile(pattern)\n entries = filter(lambda x: pattern.search(x), entries)\n entries = filter(os.path.isfile, entries)\n entries = _sort_natural(entries, reverse=True) # latest first\n return os.path.split(entries[0])[-1] if entries else None\n\n # TODO(roman): Cache this response:\n return jsonify({\n 'last_packet': _get_last_packet_name(\n current_app.config['REPLICATION_PACKETS_DIR'],\n \"replication-[0-9]+.tar.bz2$\"\n ),\n })", "def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))", "def _solaris_balloon_stat(label):", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def generate_latest(registry=Registry):\n\n def sample_line(line, metric_type):\n if line.labels:\n labelstr = '{{{0}}}'.format(','.join(\n ['{0}=\"{1}\"'.format(\n k, v.replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace('\"', r'\\\"'))\n for k, v in sorted(line.labels.items())]))\n else:\n labelstr = ''\n timestamp = ''\n if line.timestamp is not None:\n # Convert to milliseconds.\n timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))\n name = line.name\n if metric_type == 'counter' and name.endswith('_total'):\n name = name[:-6]\n return '{0}{1} {2}{3}\\n'.format(\n name, labelstr, int(line.value), timestamp)\n\n output = []\n for metric in registry.collect():\n try:\n mname = metric.name\n mtype = metric.type\n # Munging from OpenMetrics into Prometheus format.\n if mtype == 'counter':\n mname = mname\n elif mtype == 'info':\n mname = mname + '_info'\n mtype = 'gauge'\n elif mtype == 'stateset':\n mtype = 'gauge'\n elif mtype == 'gaugehistogram':\n # A gauge histogram is really a gauge,\n # but this captures the structure better.\n mtype = 'histogram'\n elif mtype == 'unknown':\n mtype = 'untyped'\n help_str = '# HELP {0} {1}\\n'.format(mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n'))\n if 'Multiprocess' not in help_str:\n continue\n output.append('# HELP {0} {1}\\n'.format(\n mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n')))\n output.append('# TYPE {0} {1}\\n'.format(mname, mtype))\n\n for s in metric.samples:\n for suffix in ['_created', '_gsum', '_gcount']:\n if s.name == metric.name + suffix:\n break\n else:\n line = sample_line(s, mtype)\n if not line:\n continue\n output.append(line)\n except Exception as exception:\n exception.args = (exception.args or ('',)) + (metric,)\n raise\n\n return ''.join(output).encode('utf-8')", "def __init__(self, replication_num, metric_name_array, metric_collection_types = None, detailed_metric_assembly = False):\n self.replication_num = replication_num\n self.metrics = metric_name_array\n self.metric_collection_types = metric_collection_types # can be a string array elements of which can be one of ('STRING_LIST', 'COUNT_MAX', 'MEAN_STD','MIN','MAX', 'MIN_MAX') \n self.detailed_metric_assembly = detailed_metric_assembly\n self.replication_counter = 0\n self.metric_final_results = {}\n # initialize results array for each metric\n for metric in metric_name_array:\n self.metric_final_results[metric] = []", "def main(self):\n debug(\"Using %s\" % (self.PROC_DISKSTATS))\n\n initial = self.get_status()\n time.sleep(self.interval)\n final = self.get_status()\n\n # Get bytes/sec\n for d in self.partitions:\n r_diff = ((final[d].r_sectors - initial[d].r_sectors) * self.sector_size) / self.interval\n w_diff = ((final[d].w_sectors - initial[d].w_sectors) * self.sector_size) / self.interval\n final[d].r_rate = r_diff\n final[d].w_rate = w_diff\n \n # Status string\n msg = \" \".join([ \"%s (r: %d KB/s, w: %d KB/s)\" % (i.dev, i.r_rate / 1024, i.w_rate / 1024) for i in sorted(final.values(), key=lambda x:x.dev) ])\n performance = \" \".join([ \"'%s read'=%d '%s write'=%d\" % (i.dev, i.r_rate, i.dev, i.w_rate) for i in sorted(final.values(), key=lambda x:x.dev) ])\n\n return (EX_OK, msg, performance)", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def create_network_and_stats(\r\n dir_path, map_lines, otu_table_fp, prefs, data, background_color, label_color):\r\n cat_by_sample, sample_by_cat, num_meta, meta_dict, labels, node_labels,\\\r\n label_list = get_sample_info(map_lines)\r\n con_by_sample, node_file, edge_file, red_node_file,\\\r\n red_edge_file, otu_dc, degree_counts, sample_dc, \\\r\n = get_connection_info(otu_table_fp, num_meta, meta_dict)\r\n num_con_cat, num_con = get_num_con_cat(con_by_sample, cat_by_sample)\r\n num_cat = get_num_cat(sample_by_cat, con_by_sample.keys())\r\n dir_path = os.path.join(dir_path, \"otu_network\")\r\n make_table_file(edge_file, labels, dir_path, \"real_edge_table.txt\")\r\n make_table_file(node_file, node_labels, dir_path, \"real_node_table.txt\")\r\n make_table_file(red_edge_file, labels, dir_path,\r\n \"real_reduced_edge_table.txt\")\r\n make_table_file(red_node_file, node_labels, dir_path,\r\n \"real_reduced_node_table.txt\")\r\n make_stats_files(\r\n sample_dc,\r\n otu_dc,\r\n degree_counts,\r\n num_con_cat,\r\n num_con,\r\n num_cat,\r\n cat_by_sample,\r\n dir_path)\r\n if background_color == 'white':\r\n background_color = Color('white', (255, 255, 255))\r\n elif background_color == 'black':\r\n background_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n background_color = data_colors[background_color]\r\n except KeyError:\r\n raise KeyError(\"background_color unknown\")\r\n\r\n if label_color == 'white':\r\n label_color = Color('white', (255, 255, 255))\r\n elif label_color == 'black':\r\n label_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n label_color = data_colors[label_color]\r\n except KeyError:\r\n raise KeyError(\"label_color unknown\")\r\n\r\n make_props_files(\r\n labels,\r\n label_list,\r\n dir_path,\r\n data,\r\n background_color,\r\n label_color,\r\n prefs)", "def derive_newrelic_slaves(self):\n if self.has_slave_data is True:\n self.update_metric(\"newrelic/replication_lag\", self.sum_of([\"slave/seconds_behind_master\"]))\n\n # both need to be YES, which is 1\n running = self.sum_of([\"slave/slave_io_running\", \"slave/slave_sql_running\"])\n if running is not None:\n replication_status = 1.0\n if running == 2:\n replication_status = 0.0\n self.update_metric(\"newrelic/replication_status\", replication_status)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", self.sum_of([\"slave/relay_log_pos\"]))\n self.update_metric(\"newrelic/master_log_lag_bytes\", self.diff_of([\"slave/read_master_log_pos\",\n \"slave/exec_master_log_pos\"]))\n else: # This is a hack because the NR UI can't handle it missing for graphs\n self.update_metric(\"newrelic/replication_lag\", 0.0)\n self.update_metric(\"newrelic/replication_status\", 0.0)\n self.update_metric(\"newrelic/slave_relay_log_bytes\", 0.0)\n self.update_metric(\"newrelic/master_log_lag_bytes\", 0.0)", "def update_gauge(self):\n pass # Do nothing", "def feature_dynamic_filesystem(self):\n def flatten_list(structured):\n \"\"\"Flatten nested list.\"\"\"\n flat = []\n for i in structured:\n flat += i\n return flat\n\n # Get file operations and their number\n self.features[\"file_read\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", [])\n self.features[\"files_read\"] = len(self.features[\"file_read\"])\n self.features[\"file_written\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", [])\n self.features[\"files_written\"] = len(self.features[\"file_written\"])\n self.features[\"file_deleted\"] = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", [])\n self.features[\"files_deleted\"] = len(self.features[\"file_deleted\"])\n self.features[\"file_copied\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"files_copied\"] = len(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])\n )\n self.features[\"file_renamed\"] = flatten_list(\\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])\n )\n self.features[\"files_renamed\"] = len(self.features[\"file_renamed\"])\n\n # Get other file operations numbers\n self.features[\"files_opened\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", [])\n )\n self.features[\"files_exists\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", [])\n )\n self.features[\"files_failed\"] = len(\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n )\n\n # Get total number of unique touched files\n file_operations = \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_read\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_written\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_deleted\", []) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_copied\", [])) + \\\n flatten_list(self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_moved\", [])) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_recreated\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_opened\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_exists\", []) + \\\n self.report.get(\"behavior\", {}).get(\"summary\", {})\\\n .get(\"file_failed\", [])\n # remove duplicates\n self.features[\"files_operations\"] = len(list(set(file_operations)))", "def init_metric_definitions():\n metric_definitions = []\n\n # add info to list in memory, one by one, following signature values\n metric_def_ID = 1\n metric_def_name = \"Recovery Time\"\n metric_def_info = \"Measures time taken by ONAP to restore a VNF\"\n metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n metric_def_ID = 2\n metric_def_name = \"Uptime Percentage\"\n metric_def_info = \"Measures ratio of uptime to reference time, not counting planned downtime\"\n metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,\n metric_def_info))\n\n\n # write list to binary file\n write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)\n\n return metric_definitions", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def add_gauge_server(self, data, feed_id, server_id, metric_enum):\n metric_id = self._metric_id_gauge_server(feed_id=feed_id, server_id=server_id,\n metric_enum=metric_enum)\n self.add_gauge(data=data, metric_id=metric_id)", "def process_data(base_path, gauge_data_dir, job_name): \n \n # Collect all gauge files and put them into one array\n storm_gauges_files = os.listdir(gauge_data_dir) \n g = numpy.zeros((len(gauges), len(storm_gauges_files))) \n \n #for storm_gauges in storm_gauges_files: \n # data_path = os.join(gauage_data_dir, storm_gauges)\n # for i in range(0, len(gauges)): \n \n for (index, storm_gauges) in enumerate(storm_gauges_file): \n with open(os.path.join(gauge_data_dir ,storm_gauges), 'r') as gauges_data: \n data = numpy.loadtxt(gauges_data, delimiter = ',', skiprows=1) \n g[:, index] = data[:, 1]\n\n return g", "def test_metric_namespace(self):\n self.statsd.namespace = \"foo\"\n self.statsd.gauge('gauge', 123.4)\n self.assert_equal_telemetry('foo.gauge:123.4|g\\n', self.recv(2))", "def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary", "def tracking(main_params, link_params, check_params, manual_log_path=''):\n \n vect_path, csv_path, dest_path, verbose = main_params\n createCSV, forced_matching, search_range, memory, adaptive_stop = link_params\n params = {'r':search_range, 'm':memory}\n check, img_path, size = check_params\n \n # Log path determination\n if manual_log_path:\n log_path = manual_log_path\n else: \n log_path = os.path.join(dest_path, 'log.txt')\n log_txt = ''\n \n # Creation of the dest directory if it doesn't exist\n if not os.path.exists(dest_path):\n os.mkdir(dest_path)\n \n start = time.time()\n previous_step = start\n txt = 'TRACK> Preparing the data...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n \n # Loading graphs\n files = nu.preloadFiles(vect_path, ['.gpickle'], debug=False) # finding all the gpickles\n graphs = [nx.read_gpickle(graph[0]) for graph in files] # creation of a list containing all the graphs, in order\n \n # Creation of the csv if necessary, and loading as DataFrame\n if createCSV:\n nu.createNodesCSVForTracking(files, csv_path, verbose)\n df = pd.read_csv(csv_path)\n \n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Tracking...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n \n # Tracking \n df_track = tp.link(df, search_range, memory=memory, t_column='t', \n adaptive_stop=adaptive_stop)\n\n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Updating graphs...'.format(timer-previous_step)\n log_txt += txt + '\\n'\n \n # Adding nodes ID to the graphs \n graphs = matchAndInsert(df_track, graphs, verbose, forced_matching, \n log_path, log_txt)\n \n timer = time.time()\n end = timer-previous_step\n txt = ('TRACK> ...done in {:.0f} min {:.4f} s.'\n .format(end // 60, end % 60))\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Saving graphs...'.format(timer-previous_step)\n log_txt += txt + '\\n' \n \n # Saving the updated graphs\n desc = 'TRACK> Saving graphs'\n for i, graph in enumerate(tqdm(graphs, total=len(graphs), desc=desc,\n unit='graph', disable=not verbose)): \n \n txt = 'TRACK> graph {} / {}'.format(i+1, len(graphs))\n log_txt += txt + '\\n'\n \n # New graph name: [old name]_track_r[search_range]_m[memory].gpickle'\n graph_name = files[i][1] + '_track'\n for key, value in params.items():\n graph_name += '_' + key + str(value)\n \n path = os.path.join(dest_path, graph_name + '.gpickle') \n nx.write_gpickle(graph, path, protocol=2) \n\n timer = time.time()\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n time.sleep(1) # dirty hack to wait for the console output\n previous_step = timer\n txt = 'TRACK> Processing check image...'.format(timer-previous_step)\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n time.sleep(1) # dirty hack to wait for the console output\n \n # Saving an image to check the results \n if check:\n img = color.gray2rgb(io.imread(img_path)) # loading the image as rgb\n colors = {} # dictionary in which we save the colors by node tag\n \n # Drawing each node with a random color for each node tag\n desc = 'TRACK> drawing graphs nodes'\n for i, graph in enumerate(tqdm(graphs, total=len(graphs), desc=desc,\n unit='graph', disable=not verbose)):\n txt = 'TRACK> drawing nodes from graph {}'.format(files[i][1])\n log_txt += txt + '\\n'\n nu.drawNodesRandomColors(graph, img, size, colors) \n \n txt = 'TRACK> writing image...'\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n io.imsave(os.path.join(dest_path, 'tracking_check.png'), img)\n\n timer = time.time()\n end = timer-start\n txt = 'TRACK> ...done in {:.4f} s.'.format(timer-previous_step)\n log_txt += txt + '\\n'\n txt = ('TRACK> DONE in {:.0f} min {:.4f} s.'\n .format(end // 60, end % 60))\n log_txt = nu.printAndUpdateLog(txt, log_txt, verbose)\n \n # Writing the log \n with open(log_path, 'a+') as log:\n log.write(log_txt)", "def create_system_metrics(system):\n pass", "def gcp_create_metric_descriptor(project_id: str):\n client = monitoring_v3.MetricServiceClient()\n project_name = client.project_path(project_id)\n\n for desc_type, desc_desc in [\n [\"buildbots_percent_failed\", \"Percentage of failed builds\"],\n [\"buildbots_builds_successful\", \"Number of successful builds in the last 24h.\"],\n [\"buildbots_builds_failed\", \"Number of failed builds in the last 24h.\"],\n [\"buildbots_builds_total\", \"Total number of builds in the last 24h.\"],\n ]:\n\n descriptor = monitoring_v3.types.MetricDescriptor()\n descriptor.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type)\n descriptor.metric_kind = (\n monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)\n descriptor.value_type = (\n monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE)\n descriptor.description = desc_desc\n descriptor = client.create_metric_descriptor(project_name, descriptor)\n print('Created {}.'.format(descriptor.name))", "def _track_data_statistics(self, info_l, last_info, episode_len,\n all_stats, maxlen_stats):\n maxlen = get_max_episode_len(self.path)\n start = info_l[0]['extras']\n last_ex = last_info['extras']\n\n if 'cable-shape' in self.path or 'cable-line-notarget' in self.path:\n nb_sides = start['nb_sides']\n frac_beads = last_ex['nb_zone'] / last_ex['nb_beads']\n if episode_len == maxlen:\n maxlen_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n maxlen_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'done_{nb_sides}'].append( last_ex['task.done'] )\n all_stats[f'frac_{nb_sides}'].append( frac_beads )\n all_stats[f'len_{nb_sides}'].append( episode_len )\n\n elif 'cable-ring' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'cloth-flat' in self.path:\n delta = last_ex['cloth_coverage'] - start['cloth_coverage']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['coverage_delta'].append( delta )\n maxlen_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['coverage_delta'].append( delta )\n all_stats['cloth_coverage'].append( last_ex['cloth_coverage'] )\n\n elif 'cloth-cover' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n\n elif 'bag-alone-open' in self.path:\n delta = last_ex['fraction'] - start['fraction']\n percent = last_ex['convex_hull_area'] - start['convex_hull_area']\n percent = 100 * percent / start['convex_hull_area']\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['fraction'].append( last_ex['fraction'] )\n maxlen_stats['fraction_delta'].append( delta )\n maxlen_stats['percent_improve'].append( percent )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['fraction'].append( last_ex['fraction'] )\n all_stats['fraction_delta'].append( delta )\n all_stats['percent_improve'].append( percent )\n\n elif 'bag-items-easy' in self.path or 'bag-items-hard' in self.path:\n # For this it'd be interesting to see what task stage we're at.\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n maxlen_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['zone_items_rew'].append( last_ex['zone_items_rew'] )\n all_stats['zone_beads_rew'].append( last_ex['zone_beads_rew'] )\n\n elif 'bag-color-goal' in self.path:\n if episode_len == maxlen:\n maxlen_stats['done'].append( last_ex['task.done'] )\n maxlen_stats['task_stage'].append( last_ex['task_stage'] )\n maxlen_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n maxlen_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n all_stats['done'].append( last_ex['task.done'] )\n all_stats['task_stage'].append( last_ex['task_stage'] )\n all_stats['frac_in_target_bag'].append( last_ex['frac_in_target_bag'] )\n all_stats['frac_in_distract_bag'].append( last_ex['frac_in_distract_bag'] )\n\n else:\n print(f'For: {self.path}, we are not tracking extra stats.')", "def gauge(self, gauge, value):\n pass" ]
[ "0.5539488", "0.5088195", "0.507494", "0.5027533", "0.49304163", "0.4900687", "0.4889911", "0.47771588", "0.47663313", "0.47630692", "0.47085527", "0.46948", "0.46515706", "0.45758998", "0.45718196", "0.4553225", "0.45504344", "0.45424002", "0.453815", "0.45122313", "0.45104492", "0.45063064", "0.4500496", "0.44973612", "0.44908875", "0.4483738", "0.44833496", "0.44704238", "0.44648114", "0.44597092" ]
0.5900812
0
Builds and sends an embed message with new commits information.
async def process_push_hook(push: models.PushHook): repository = push.repository project = push.project commit_str = "commit" if push.total_commits_count == 1 else "commits" # Show link to commit compare if there's more than one commit if push.total_commits_count > 1: embed_url = f"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}" else: embed_url = f"{repository.homepage}/commit/{push.after[:7]}" if push.before == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] New branch created {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) elif push.after == EMPTY_COMMIT: embed = discord.Embed(title=f"[{project.namespace}/{project.name}] Branch deleted {push.branch}", url=embed_url, colour=discord.Colour.light_grey()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) await send_message(None, embed=embed, avatar_url=push.project.avatar_url) # If there are no commits, do not show a message if not push.total_commits_count: return embed = discord.Embed(title=f"[{project.namespace}/{project.name}:{push.branch}] " f"{push.total_commits_count} new {commit_str}", url=embed_url, colour=discord.Colour.blurple()) embed.set_author(name=push.user_name, icon_url=push.user_avatar) embed.description = "" for commit in push.commits: message = commit.message.splitlines()[0] embed.description += f"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\n" print("Sending push message") await send_message(None, embed=embed, avatar_url=push.project.avatar_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self, bot, comm, groups):\n commit_message = self.plugin.get_commit_message()\n bot.reply(comm, u'{user}: {msg}', kwvars={'msg': commit_message})", "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n mes_author = author\n else:\n mes_author = self._author\n if not msg:\n msg = f\"Commit via python client {__version__}\"\n ci = {\"commit_info\": {\"author\": mes_author, \"message\": msg}}\n return ci", "def commit(self, msg=None):\n self.log.debug(\"committing in git: %s\" % msg)\n completemsg = \"EasyBuild-commit from %s (time: %s, user: %s) \\n%s\" % (socket.gethostname(),\n time.strftime(\"%Y-%m-%d_%H-%M-%S\"),\n getpass.getuser(),\n msg)\n self.log.debug(\"git status: %s\" % self.client.status())\n try:\n self.client.commit('-am \"%s\"' % completemsg)\n self.log.debug(\"succesfull commit\")\n except GitCommandError, err:\n self.log.warning(\"Commit from working copy %s (msg: %s) failed, empty commit?\\n%s\" % (self.wc, msg, err))\n try:\n info = self.client.push()\n self.log.debug(\"push info: %s \" % info)\n except GitCommandError, err:\n self.log.warning(\"Push from working copy %s to remote %s (msg: %s) failed: %s\" % (self.wc,\n self.repo,\n msg,\n err))", "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])", "async def changelog(self, ctx: commands.Context):\n status, commits = GitHub().repos.harkonenbade.yutu.commits.get(per_page=10)\n if status == 200:\n await ctx.send(content=\"```Changelog:\\n{}```\".format(\"\\n\".join([\"- {}\".format(c['commit']['message'])\n for c in commits])))\n else:\n await ctx.send(content=\"Error: Cannot reach github\")", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def embed():", "def create_commit(self, event_data_yaml):\n os.chdir(str(self.repository_path))\n sh.git.checkout(self.branch)\n sh.git.add(self.event_dir)\n message_body = (\n '\\n\\nEvent config:\\n~~~yaml\\n{}\\n~~~\\n'.format(event_data_yaml)\n + '\\nScraped with [pyvideo_scrape]'\n + '(https://github.com/pyvideo/pyvideo_scrape)')\n if self.minimal_download:\n message = ('Minimal download: '\n + '{}\\n\\nMinimal download executed for #{}'.format(\n self.title, self.issue)\n + '\\n\\nOnly data that needs [no review](https://'\n + 'github.com/pyvideo/pyvideo_scrape#use-cases) was scraped.'\n + '\\nThis event needs further scraping and human '\n + 'reviewing for the description and other data to show.'\n + message_body)\n sh.git.commit('-m', message)\n sh.git.push('--set-upstream', 'origin', self.branch)\n # ~ sh.git.push('--set-upstream', '--force', 'origin', self.branch)\n sh.git.checkout('master')\n else:\n message = (\n 'Scraped {}\\n\\nFixes #{}'.format(self.branch, self.issue)\n + message_body)\n sh.git.commit('-m', message)\n sh.git.checkout('master')\n logger.debug('Conference {} commited', self.branch)", "async def github(self, ctx):\n\n embed = discord.Embed(color=ctx.me.color)\n embed.set_thumbnail(url='https://cdn2.iconfinder.com/data/icons/black-' +\n 'white-social-media/64/social_media_logo_github-512.png')\n embed.add_field(name='🔗 Github Repo',\n value=f'[Klikk her]({self.bot.misc[\"source_code\"]}) for å se den dritt skrevne kildekoden min')\n await Defaults.set_footer(ctx, embed)\n await ctx.send(embed=embed)", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def git_commit(self, msg):\n self.git_repo.git.add(all=True)\n self.git_repo.git.commit(message='[dots] {}'.format(msg))", "async def version_command(self, ctx):\n member = ctx.message.server.get_member(self.bot.user.id)\n current_commit = get_current_commit()\n commit_url = member.game.url + '/commit/' + current_commit\n msg = await self.bot.send_message(ctx.message.channel, 'I am currently running on commit `{}`\\n\\n{}'.format(current_commit, commit_url))", "def update(repository, args, **_):\n _log(repository, 'INFO', \"Going to build commit %s\" % args[2][:7])", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "def cmd_commit(message):\n return ['git', 'commit', '-m', message]", "def commit(self, message, author, *args):\n return self.cmd('commit', '-m ' + message, '--author=', *args)", "def git_webhook():\n client = MongoClient(os.getenv('MONGODB_URI', 'mongodb://localhost:27017'))\n database = client.get_database()\n content = {\n \"event\": request.headers['X-GitHub-Event'],\n \"payload\" : request.json,\n \"date\": datetime.utcnow()\n }\n log.info(\"Content Received - \", request.headers['X-GitHub-Delivery'])\n inserted_id = database.events.insert_one(content).inserted_id\n log.info(\"Content Inserted - \", inserted_id)\n return jsonify({\n \"message\": \"Okay!\"\n })", "async def info(self, ctx):\n python = sys.version_info\n\n start = datetime.now()\n await ctx.trigger_typing()\n end = datetime.now()\n\n process = psutil.Process()\n\n embed = discord.Embed(title='Info',\n color=self.bot.color)\n embed.add_field(name='Latest Changelog',\n value='Restructured the project.',\n inline=False)\n embed.add_field(name='Creator',\n value='\\n'.join(self.bot.get_user(owner).mention for owner in self.bot.owner_ids))\n embed.add_field(name='Created on',\n value=f'{self.bot.created_on.strftime(\"%m/%d/%Y\")}\\n'\n f'(~{timeago.format(self.bot.created_on, datetime.utcnow())})')\n embed.add_field(name='Made With',\n value=f'[Python {python.major}.{python.minor}.{python.micro}](https://www.python.org/)\\n'\n f'[discord.py {discord.__version__}](https://discordpy.readthedocs.io/en/latest/)')\n embed.add_field(name='Status',\n value=f'Ping: {(end - start).total_seconds() * 1000:.2f}ms\\n'\n f'CPU: {process.cpu_percent()}%\\n'\n f'RAM: {process.memory_info().rss / 1048576:.2f}MB') # bits to bytes\n embed.add_field(name='Uptime',\n value='Online since:\\n'\n f'{self.bot.uptime.strftime(\"%m/%d/%Y %H:%M UTC\")}\\n'\n f'(~{timeago.format(self.bot.uptime, datetime.utcnow())})')\n embed.add_field(name='Statistics',\n value=f'Commands Run: {1003}\\n'\n f'Guilds: {len(list(self.bot.guilds))}\\n'\n f'Users: {len(list(self.bot.get_all_members()))} '\n f'(Unique: {len(set(self.bot.get_all_members()))})')\n embed.add_field(name='Acknowledgements',\n value='<@113104128783159296> - Answering a lot of questions I had, couldn\\'t have done it with you!\\n'\n '`[RKN]` - Testing! thanks guys :)',\n inline=False)\n\n await ctx.send(embed=embed)", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def fetch():\n project = get_project(require=True)\n resp = request('post', '/api/v0/projects/{id}/fetch/'.format(id=project.id))\n data = resp.json()\n commits = data.get('commits', ())\n if commits:\n for commit in commits:\n success('Fetched: {ref} ({identifier})'.format(ref=commit['ref'], identifier=commit['identifier']))\n success('{n} new commits were fetched!'.format(n=len(commits)))\n else:\n info('No new commits.')\n errors = data.get('errors', ())\n for error in errors:\n warning(error)" ]
[ "0.5981543", "0.5900061", "0.58865404", "0.5829465", "0.5829347", "0.58225244", "0.5794675", "0.5761677", "0.5699984", "0.56958634", "0.5648896", "0.5571014", "0.5564511", "0.5560195", "0.5509813", "0.550364", "0.54569304", "0.54245734", "0.5418751", "0.5418156", "0.53992987", "0.53991514", "0.5396564", "0.5389076", "0.5362692", "0.53408855", "0.53322023", "0.5318748", "0.5304797", "0.53013146" ]
0.6393245
0
Builds and sends an embed message with notes information.
async def process_note_hook(data: models.NoteHook): note = data.note user = data.user project = data.project colour = discord.Colour.greyple() embed = discord.Embed(url=note.url, description=note.description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) if data.issue: issue = data.issue embed.title = f"[{project.namespace}/{project.name}] New comment on issue #{issue.iid}: {issue.title}" if data.commit: commit = data.commit embed.title = f"[{project.namespace}/{project.name}] New comment on commit `{commit.id[:7]}`" if data.merge_request: merge = data.merge_request embed.title = f"[{project.namespace}/{project.name}] New comment on merge request !{merge.iid}: {merge.title}" await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed():", "async def note(self, ctx):\n note_embed = discord.Embed(color=discord.Color.blurple())\n note_embed.add_field(name=\"__**Please Note**__\", value=RULES_NOTE)\n await ctx.send(embed=note_embed)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "async def _view_note(self, ctx: Context, number: int):\n\n author = ctx.author\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n try:\n note = notes[number-1]\n except IndexError:\n return await ctx.send(\n _(\"Note number {} not found.\").format(number)\n )\n\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Note #{}\").format(author_str, number),\n timestamp=ctx.message.created_at\n )\n await ctx.send(embed=page)\n else:\n page = _(\n \"**{author} TvM Note #{number}**\"\n \"\\n\\n{note}\"\n ).format(\n author=author_str,\n number=number,\n note=note_info\n )\n await ctx.send(page)", "async def _notes(self, ctx: Context):\n pass", "async def discord(self, ctx):\n embed = discord.Embed(title='Join the discord today!', color=0x5643fd, description=\"This server is where \"\n \"all of \"\n \"NOVA's updates and \"\n \"important \"\n \"announcements will pass \"\n \"through. The creator of \"\n \"this \"\n \"bot, YeetVegetabales#5313, \"\n \"will also be there testing \"\n \"and letting the communtiy \"\n \"in \"\n \"on things first hand!\")\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/AQCEqCF4Yl_PWAfuA-GReZoDify6'\n '--y4hXOJVkqaDHo/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n embed.add_field(name='Server Invite', value='<:news:730866149109137520> '\n '[Join here](https://discord.gg/Uqh9NXY)')\n await ctx.send(embed=embed)", "async def _view_all_notes(self, ctx: Context):\n\n author = ctx.author\n\n note_infos = []\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n total = len(notes)\n for page_num, note in enumerate(notes, start=1):\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Notes\").format(author_str),\n timestamp=ctx.message.created_at\n )\n\n page.set_footer(\n text=_(\"Page {page_num}/{leng}\").format(\n page_num=page_num, leng=total\n )\n )\n else:\n page = _(\n \"**{author} TvM Notes**\"\n \"\\n\\n{note}\"\n \"\\n{footer}\"\n ).format(\n author=author_str,\n note=note_info,\n footer=_(\"*Page {page_num}/{leng}*\").format(\n page_num=page_num, leng=total\n )\n )\n\n note_infos.append(page)\n\n await menu(ctx, note_infos, DEFAULT_CONTROLS)", "def _get_body(self):\n\n bodyWrap = (\n u\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\"\n u\"<!DOCTYPE en-note SYSTEM \\\"http://xml.evernote.com/pub/enml2.dtd\\\">\"\n u\"<en-note>{body}</en-note>\"\n )\n att_enml = \"\\n\".join(self.embed_resources)\n\n return bodyWrap.format(body=att_enml)", "def note(self):\n content = sys.argv[2]\n self.record('NOTE %s' % content)\n print('Note added')", "def _build_about_embed(self) -> discord.Embed:\n with self.about_aoc_filepath.open(\"r\", encoding=\"utf8\") as f:\n embed_fields = json.load(f)\n\n about_embed = discord.Embed(title=self._base_url, colour=Colours.soft_green, url=self._base_url)\n about_embed.set_author(name=\"Advent of Code\", url=self._base_url)\n for field in embed_fields:\n about_embed.add_field(**field)\n\n about_embed.set_footer(text=f\"Last Updated (UTC): {datetime.utcnow()}\")\n\n return about_embed", "def note(self, irc, msg, args, user, id):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.errorInvalid('note id')\n if user.id != note.frm and user.id != note.to:\n s = 'You may only retrieve notes you\\'ve sent or received.'\n irc.error(s)\n return\n newnote = self._formatNote(note, user.id)\n irc.reply(newnote, private=(not note.public))\n self.db.setRead(id)", "def createNote(self, authenticationToken, note):\r\n pass", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def generate_message(self) -> List[mido.Message]:\n # check for a None note (which is a \"pause\")\n if self.__note:\n note_value = self.__note + self.__info.octave\n note_velocity = self.__info.volume\n else:\n note_value = 0\n note_velocity = 0\n return [\n mido.Message(\n \"note_on\",\n note=note_value,\n velocity=note_velocity,\n time=NOTE_DURATION,\n ),\n mido.Message(\n \"note_off\",\n note=note_value,\n velocity=note_velocity,\n time=0,\n ),\n ]", "def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed", "def note():", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "async def dadjoke(self, ctx):\n author = ctx.message.author\n joke = await self.get_joke()\n data = Embed.create(self, ctx, title='Demaratus Dad Jokes :joy:',\n description=joke)\n image = (f\"https://media.discordapp.net/attachments/745608075670585344/770068453502877716/DADJOKES.png?width=1442&height=481\")\n data.set_author\n data.set_image(url=image)\n await ctx.send(embed=data)", "async def about(self, ctx):\n embed = Embed(color=self.bot.main_color, timestamp=datetime.utcnow())\n embed.set_author(\n name=\"Modmail - About\",\n icon_url=self.bot.user.avatar_url,\n url=\"https://discord.gg/F34cRU8\",\n )\n embed.set_thumbnail(url=self.bot.user.avatar_url)\n\n desc = \"This is an open source Discord bot that serves as a means for \"\n desc += \"members to easily communicate with server administrators in \"\n desc += \"an organised manner.\"\n embed.description = desc\n\n embed.add_field(name=\"Uptime\", value=self.bot.uptime)\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency * 1000:.2f} ms\")\n embed.add_field(name=\"Version\", value=f\"`{self.bot.version}`\")\n embed.add_field(name=\"Author\", value=\"[`kyb3r`](https://github.com/kyb3r)\")\n\n changelog = await Changelog.from_url(self.bot)\n latest = changelog.latest_version\n\n if parse_version(self.bot.version) < parse_version(latest.version):\n footer = f\"A newer version is available v{latest.version}\"\n else:\n footer = \"You are up to date with the latest version.\"\n\n embed.add_field(\n name=\"GitHub\", value=\"https://github.com/kyb3r/modmail\", inline=False\n )\n\n embed.add_field(\n name=\"Discord Server\", value=\"https://discord.gg/F34cRU8\", inline=False\n )\n\n embed.add_field(\n name=\"Donate\",\n value=\"Support this bot on [`Patreon`](https://patreon.com/kyber).\",\n )\n\n embed.set_footer(text=footer)\n await ctx.send(embed=embed)", "def push_note(self, device_iden, title, body):\n self.session.post(\n PUSH_URL,\n json={\n \"device_iden\": device_iden,\n \"type\": \"note\",\n \"title\": title,\n \"body\": body\n }).raise_for_status()", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "def add_note():\n pass", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "def create_a_note(self, data):\n return self.client._post(\"/notes\", json=data)", "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def build_note(text, level=1, limit=180, strip=True, keyword=\"NOTE\"):\n note = []\n key = int(level)\n tag = keyword\n data = text\n if strip:\n data = data.strip()\n while data != \"\":\n index = limit\n if len(data) < limit:\n index = len(data)\n else:\n while data[index - 1] == \" \" and index > 0:\n index = index - 1\n chunk = data[:index]\n data = data[index:]\n entry = \"{0} {1} {2}\".format(key, tag, chunk)\n note.append(entry)\n tag = \"CONC\"\n key = int(level) + 1\n return note", "def create_note(self, owner, title, text, note_type, important):\r\n note = self.create(owner=owner, title=title, text=text, note_type=note_type, important=important)\r\n return note", "async def helps(ctx):\n embed = discord.Embed(title='**Help....**', description=\"The prefix for the bot is 'qq'.\\\nYah cuz you know _less qq, more pew pew_ ...\", colour=discord.Color.purple())\n embed.set_footer(text='For full list of commands with complete functions do _cmds')\n embed.add_field(name='Core', value='ping, help, cmds, botinfo')\n embed.add_field(name='Economy', value='cry, vaultoftears, tear shop', inline=False)\n embed.add_field(name='Entertainment', value='roast, flirt, compliment, geek, nerdystuff, quote, fortune,\\\n8ball, coffee, wannagrabacoffee, book, dadjoke', inline=False)\n embed.add_field(name='Utility', value='purge, ban, kick, unban', inline=False)\n embed.add_field(name='Games', value='diceroll, guessing_game', inline=False)\n await ctx.send(embed=embed)" ]
[ "0.653712", "0.6195667", "0.5904277", "0.59017", "0.58894485", "0.58441114", "0.5828449", "0.5747832", "0.5732143", "0.57031065", "0.5664874", "0.56358755", "0.55843145", "0.5578333", "0.55621606", "0.5557521", "0.55531627", "0.5505942", "0.54942673", "0.54742163", "0.5460713", "0.5444641", "0.54375815", "0.540969", "0.5406481", "0.5381556", "0.5378305", "0.53701764", "0.53509796", "0.5317061" ]
0.63638
1
Builds and sends an embed message with merge request information.
async def process_merge_request_hook(data: models.MergeRequestHook): project = data.project merge = data.merge_request user = data.user description = "" action = "Issue updated" colour = discord.Colour.light_grey() if merge.action == "open": action = "Merge request opened" description = merge.description colour = discord.Colour.dark_green() elif merge.action == "close": action = "Merge request closed" colour = discord.Colour.dark_grey() embed = discord.Embed(title=f"[{project.namespace}/{project.name}] {action}: !{merge.iid} {merge.title}", url=merge.url, description=description, colour=colour) embed.set_author(name=user.username, icon_url=user.avatar_url) embed.set_footer(text=f"{merge.source_branch} → {merge.target_branch}") await send_message(None, embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_commit_msg(author, reviewers, source_branch, target_branch,\n commit_message, mp_web_link):\n return \"Merge {} into {} [a={}] [r={}]\\n\\n{}\\n\\nMP: {}\".format(\n source_branch, target_branch, author,\n reviewers, commit_message, mp_web_link)", "def build_embed(self, source_object) -> discord.Embed:\n url, location, first_line = self.get_github_url(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n help_cmd = self.bot.get_command(\"help\")\n description = help_cmd.help\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, ModuleType):\n title = f\"Extension: {source_object.__name__}.py\"\n description = discord.Embed.Empty\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = discord.Embed(title=title, description=description, colour=0x87CEEB)\n embed.add_field(name=\"Source Code\", value=f\"[Here's the Github link!]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed", "def embed():", "async def update_embed(self) -> None:\n\n self.embed = build_actions_embed(LoggingActions.all_enabled_actions(self.bits))\n await self.message.edit(embed=self.embed)", "def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)", "async def CoMLegendBuilder(self, ctx):\n me = CoachService.discord_user_to_coach(ctx.author)\n data = getattr(special_play, inspect.currentframe().f_code.co_name)(ctx.channel.name, me)\n await self.send_embed(data, ctx)", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event.begin.format(\"HH:mm:ss ZZZ\") + \" (\" + event.begin.humanize() + \")\", inline=False)\n e.add_field(name=\"Duration: \", value=str(event.duration), inline=False)\n #e.add_field(name=\"Link\", value=info.get(\"url\"), inline=False)\n e.set_image(url=info.get(\"thumbnail\") or e.Empty)\n return e", "async def embed(self, context,message):\n\t\tembed = discord.Embed(\n\t\t\tdescription=message,\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tawait context.send(embed=embed)", "async def prepembed(ctx, channel:discord.TextChannel, *, jsonInput):\n jso = json.loads(jsonInput)\n title = jso['title'] if 'title' in jso else \"\"\n desc = jso['description'] if 'description' in jso else \"\"\n titleUrl = jso['titleUrl'] if 'titleUrl' in jso else \"\"\n hexcolor = jso['hexColor'] if 'hexColor' in jso else \"#2E66B6\"\n webcolor = jso['webColor'] if 'webColor' in jso else \"\"\n thumbnailUrl = jso['thumbnailUrl'] if 'thumbnailUrl' in jso else \"\"\n authorName = jso['authorName'] if 'authorName' in jso else \"\"\n authorUrl = jso['authorUrl'] if 'authorUrl' in jso else \"\"\n authorIcon = jso['authorIcon'] if 'authorIcon' in jso else \"\"\n if 'author' in jso:\n authorName = ctx.message.author.name\n authorIcon = ctx.message.author.avatar_url_as(format=\"jpg\")\n fields = jso['fields'] if 'fields' in jso else \"\"\n footerText = jso['footerText'] if 'footerText' in jso else \"\"\n footerUrl = jso['footerUrl'] if 'footerUrl' in jso else \"\"\n imageUrl = jso['imageUrl'] if 'imageUrl' in jso else \"\"\n embed = assemble_embed(\n title=title,\n desc=desc,\n titleUrl=titleUrl,\n hexcolor=hexcolor,\n webcolor=webcolor,\n thumbnailUrl=thumbnailUrl,\n authorName=authorName,\n authorUrl=authorUrl,\n authorIcon=authorIcon,\n fields=fields,\n footerText=footerText,\n footerUrl=footerUrl,\n imageUrl=imageUrl\n )\n await channel.send(embed=embed)", "def slackbuild_webhook(req: Request):\n global config\n global slack\n global cloudbuild\n\n # slack submits a POST\n if req.method != \"POST\":\n return abort(405)\n\n # not a true request from slack\n verified, err = slack.verify_webhook(req)\n if not verified:\n print(err)\n return abort(403)\n\n body = Slack.parse_request(req)\n argv = Slack.parse_command(body)\n msg = \"\"\n\n output, success = Command.run(argv, cloudbuild, config)\n\n if output is None:\n if success:\n # intentionaly not responding with a slack message\n return ('', 200)\n else:\n return abort(500)\n elif Slack.is_interactive_message(body):\n msg = slack.render_interactive_message(body, success, output)\n else:\n color = Colors.SUCCESS if success else Colors.FAILURE\n msg = slack.render_message({\"result\": output, \"color\": color}, \"command.json\")\n\n msg = json.dumps(msg)\n print(msg)\n return Response(response=msg, content_type=\"application/json\")", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "def merge(self, merge_with_id):\r\n params = base.get_params(None, locals())\r\n url = '{0}/merge'.format(self.get_url())\r\n return http.Request('POST', url, params), parsers.parse_json", "async def sayembed(self, ctx, text_channel: typing.Union[discord.TextChannel, str] = None, *, embed_format=None):\n embed_creator_url = \"https://embedbuilder.nadekobot.me/\"\n if isinstance(text_channel, str):\n if isinstance(embed_format, str):\n embed_format = text_channel + embed_format\n text_channel = ctx.channel\n try:\n if not embed_format or not text_channel:\n return await ctx.send(f\"> **This command follows the format from {embed_creator_url}**\")\n else:\n author_name = None\n author_icon_url = None\n embed_footer_text = None\n embed_footer_url = None\n embed_format = json.loads(embed_format)\n embed_image = embed_format.get('image')\n embed_footer = embed_format.get('footer')\n embed_thumbnail = embed_format.get('thumbnail')\n embed_author = embed_format.get('author')\n if embed_author:\n author_name = embed_author.get(\"name\")\n author_icon_url = embed_author.get(\"icon_url\")\n if embed_footer:\n embed_footer_text = embed_footer.get('text')\n embed_footer_url = embed_footer.get('icon_url')\n author_url = embed_format.get('url')\n\n if author_icon_url or author_url:\n embed_format.pop('author')\n if embed_footer_url:\n embed_format.pop('footer')\n if embed_image:\n embed_format.pop('image')\n if embed_thumbnail:\n embed_format.pop('thumbnail')\n\n embed = discord.Embed.from_dict(embed_format)\n\n if embed_image:\n embed.set_image(url=embed_image)\n if embed_footer_url:\n embed.set_footer(text=embed_footer_text, icon_url=embed_footer_url)\n if embed_thumbnail:\n embed.set_thumbnail(url=embed_thumbnail)\n if author_url and author_icon_url:\n embed.set_author(name=author_name, url=author_url, icon_url=author_icon_url)\n elif not author_icon_url and author_url:\n embed.set_author(name=author_name, url=author_url)\n elif not author_url and author_icon_url:\n embed.set_author(name=author_name, icon_url=author_icon_url)\n\n plain_body = embed_format.get('plainText')\n if plain_body:\n return await text_channel.send(plain_body, embed=embed)\n else:\n return await text_channel.send(embed=embed)\n except Exception as e:\n await ctx.send(f\"ERROR - {e}.\\nFollow the format from {embed_creator_url}\")\n log.console(e)", "def notify_channel_on_merge(self):\n if self.pr.is_merged:\n LOG.debug(\"**** Repo=%s, new merge came to=%s, setting trace to=%s channel\"\n %(self.pr.repo, self.pr.base_branch, self.pr.config.codeChannelName))\n msg = MSG_CODE_CHANNEL.format(title=self.pr.title, desc=self.pr.description, pr=self.pr.link,\n head_branch=self.pr.head_branch, base_branch=self.pr.base_branch,\n pr_by=self.created_by, merge_by=self.merged_by)\n self.slack.postToSlack(self.pr.config.codeChannelName, msg)\n LOG.info(\"informed %s because pr=%s is merged into sensitive branch=%s\" %\n (self.pr.config.codeChannelName, self.pr.link_pretty, self.pr.base_branch))\n return {\"msg\":\"informed %s because pr=%s is merged into sensitive branch=%s\" %\n (self.pr.config.codeChannelName, self.pr.link_pretty, self.pr.base_branch)}\n return {\"msg\", \"Skipped posting to code channel because '%s' is not merge event\" %self.pr.action}", "def handle_new_oembed_details(embed_data):\n\n source = embed_data.get('oembed_source').strip()\n tweet_id = embed_data.get('tweet_id')\n\n assert tweet_id, \"Can only handle tweets\"\n assert embed_data.get('html'), \"Need HTML for embedding\"\n assert source, \"Need to know where this came from\"\n\n print 'new oembed details: %s %s' % (source, len(embed_data))\n\n # store all the data we received\n key = keys.tweet_embed_data(source,tweet_id)\n r = rc.hmset(key, embed_data)\n\n # we are giving preference to embedly data,\n # so also update the tweet's data w/ the embedly html\n if source == 'embedly':\n print 'embedly found, updating tweet data'\n key = keys.tweet_data(tweet_id)\n r = rc.hset(key, 'embed_html', embed_data.get('html'))\n\n # fire event that oembed has been saved\n revent.fire('new_oembed_details_saved', embed_data)\n\n return True", "def submitBuildRequest(ss, reason, props=None, now=False):", "def _update_mandrill_payload(self, payload, message):\n\n accepted_headers = {}\n if message.extra_headers:\n for k in message.extra_headers.keys():\n if k.startswith('X-') or k == 'Reply-To':\n accepted_headers[str(k)] = message.extra_headers[k]\n payload['message'].update({'headers': accepted_headers})\n\n payload['message'].update({\n 'tags': message.tags,\n 'track_opens': message.track_opens,\n 'track_clicks': message.track_clicks,\n 'headers': accepted_headers,\n })\n\n if message.global_merge_vars:\n payload['message']['global_merge_vars'] = [\n {'name': key, 'content': value}\n for key, value in message.global_merge_vars.iteritems()\n ]\n\n # sending html over to mandrill\n if getattr(message, 'alternatives', None):\n if len(message.alternatives) > 1:\n raise ImproperlyConfigured(\n \"Mandrill only accepts plain text and html emails. \"\n \"Please check the alternatives you have attached to \"\n \"your message.\")\n payload['message']['html'] = message.alternatives[0][0]\n\n # using a mandrill template message\n if message.content_subtype == 'mandrill.template':\n payload.update({\n 'template_name': message.template_name,\n 'template_content': message.template_content,\n })", "def embed(self, data, mime_type=\"text/plain\", encode_data_to_base64=True):\n if encode_data_to_base64:\n data = base64.standard_b64encode(data.encode()).decode()\n self.embeddings.append({\"data\": data, \"mime_type\": mime_type})", "async def process_push_hook(push: models.PushHook):\n repository = push.repository\n project = push.project\n commit_str = \"commit\" if push.total_commits_count == 1 else \"commits\"\n # Show link to commit compare if there's more than one commit\n if push.total_commits_count > 1:\n embed_url = f\"{repository.homepage}/compare/{push.before[:7]}...{push.after[:7]}\"\n else:\n embed_url = f\"{repository.homepage}/commit/{push.after[:7]}\"\n\n if push.before == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] New branch created {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n elif push.after == EMPTY_COMMIT:\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}] Branch deleted {push.branch}\",\n url=embed_url, colour=discord.Colour.light_grey())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)\n\n # If there are no commits, do not show a message\n if not push.total_commits_count:\n return\n\n embed = discord.Embed(title=f\"[{project.namespace}/{project.name}:{push.branch}] \"\n f\"{push.total_commits_count} new {commit_str}\",\n url=embed_url, colour=discord.Colour.blurple())\n embed.set_author(name=push.user_name, icon_url=push.user_avatar)\n embed.description = \"\"\n for commit in push.commits:\n message = commit.message.splitlines()[0]\n embed.description += f\"[`{commit.id[:7]}`]({commit.url}) {message} - {commit.author.name}\\n\"\n print(\"Sending push message\")\n await send_message(None, embed=embed, avatar_url=push.project.avatar_url)", "def ExecuteEmbed(self):\r\n \r\n Embed = DiscordEmbed(title=\"Test Title 123\", \r\n description=\"Test Description 321\",\r\n color=\"eb5e34\") \r\n Embed.set_timestamp()\r\n \r\n self.WEBHOOK.add_embed(Embed)\r\n Execute = self.WEBHOOK.execute()", "def form_payload(build_number, job_name, build_url, status):\n message = \"Build #{} {} for {}\".format(build_number, status, job_name)\n description = \"Build #{} {} for {}. \\nPlease check detailed logs here: {}console\".format(build_number, status, job_name, build_url)\n \n branch_name = \"\"\n # Check optional env variable\n if \"BRANCH_NAME\" in os.environ:\n branch_name = os.environ['BRANCH_NAME']\n\n payload_rep = {\"message\" : message , \"description\" : description, \"branch_name\" : branch_name,\n \"build_url\": build_url, \"job_name\": job_name, \"build_number\": build_number, \"node_name\": os.environ['NODE_NAME'],\n \"status\" : status, \"event_id\" : job_name}\n return payload_rep", "def svn_client_mergeinfo_log_merged(char_path_or_url, svn_opt_revision_t_peg_revision, char_merge_source_path_or_url, svn_opt_revision_t_src_peg_revision, svn_log_entry_receiver_t_receiver, svn_boolean_t_discover_changed_paths, apr_array_header_t_revprops, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def reply_embed(self, message: str):\n embed = discord.Embed(color=discord.Color.blurple())\n embed.title = \"\"\n embed.description = message\n return embed", "def build(self, observation):\n raise NotImplementedError(\n 'Needs to be implemented as part of Embedder Interface')", "def _embed_result(self, embedding):\n # project original embedding\n project_weight = self.project.weight # (o, c)\n project_embedding = embedding.permute(0, 2, 1).unsqueeze(-1) \\\n * project_weight.permute(1, 0) # (n, e, c, 1) * (c, o) -> (n, e, c, o)\n project_embedding = project_embedding.permute(0, 3, 2, 1) # (n, o, c, e)\n # interaction\n square_of_sum = torch.sum(project_embedding, dim=2) ** 2\n sum_of_square = torch.sum(project_embedding ** 2, dim=2)\n embed_result = 0.5 * (square_of_sum - sum_of_square).sum(dim=2)\n return embed_result", "def queue_buildrequest(event):\n get().build_queue.put(event)", "async def bubblewrap(self, ctx):\n data = Embed.create(\n self, ctx, title=\"Bubblewrap!\",\n description=(\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n \"||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||||pop||\\n\"\n )\n )\n await ctx.send(embed=data)", "def help(update: Update, context: CallbackContext):\n context.bot.send_message(\n chat_id=update.message.chat_id,\n text=PROMPTS[\"help\"],\n reply_markup=telegram.InlineKeyboardMarkup(\n [\n [\n telegram.InlineKeyboardButton(\n \"Contribute on GitHub!\", url=\"https://github.com/iugov/s4lbot\"\n )\n ]\n ]\n ),\n parse_mode=telegram.ParseMode.MARKDOWN,\n )", "def contact(update: Update) -> None:\n update.message.text(\"@New GEN\")" ]
[ "0.5688839", "0.51731527", "0.514443", "0.5102154", "0.50153357", "0.5012565", "0.4874328", "0.48101324", "0.47793704", "0.46545884", "0.46417007", "0.46417007", "0.46417007", "0.4641089", "0.46038324", "0.45967078", "0.45881125", "0.4585066", "0.45759517", "0.4558107", "0.45423108", "0.45390224", "0.45260185", "0.45243666", "0.4512736", "0.44850183", "0.4480135", "0.44765753", "0.4467348", "0.44617182" ]
0.6398354
0
Function that represents the window which Character Mods can be applied.
def chars_window(): path_dir = r'Sor_Mods_Storage\chars' char_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen chars = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(chars, image=mainTitleImg) title = tk.Label(chars, text="Characters Mods") comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys())) def apply_char_mod(): char_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars') value = f'Character Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def _get_window_width(self):", "def modifiers_coding_map_creator(self):\n self.mapCreatorWindow = map_creator.ModifiersMapCreatorWindow()\n self.mapCreatorWindow.move(self.pos())\n self.mapCreatorWindow.resize(CODING_MAP_RESIZE_W, CODING_MAP_RESIZE_H)\n self.mapCreatorWindow.show()", "def __window_print(self):\n pass", "def window(windowX, windowY, occurrency):\n\tdef window0(dx, dy, dz):\n\n\t\tresizeXY(windowX,windowY,occurrency, dx, dz)\n\n\t\tmodel = []\n\t\tfor xIndex in range(len(windowX)):\n\t\t\tyQuotes = []\n\t\t\txSum = sum(windowX[:xIndex])\n\t\t\tfor yIndex in range(len(windowY)):\n\t\t\t\tif(occurrency[xIndex][yIndex] == False):\n\t\t\t\t\tyQuotes.append(-windowY[yIndex])\n\t\t\t\telse:\n\t\t\t\t\tyQuotes.append(windowY[yIndex])\n\t\t\tmodel.append(PROD([QUOTE([-xSum, windowX[xIndex]]), QUOTE(yQuotes)]))\n\n\t\tresult = STRUCT(model)\n\t\tresult = MAP([S2,S3,S1])(PROD([result, Q(dy)]))\n\t\twindowFrame = STRUCT([result])\n\t\twindowFrame = TEXTURE([\"iron.jpg\"])(windowFrame)\n\n\t\tglass = CUBOID([SIZE([1])(result)[0]*0.98,0.001,SIZE([3])(result)[0]*0.95])\n\t\tglass = T([1,2,3])([dx*0.005, dy/2, 0.01])(glass)\n\t\tglass = TEXTURE([\"glass2.jpg\"])(glass) \n\n\t\twindow = STRUCT([windowFrame, glass])\n\t\twindow = S([1,2,3])([dx/SIZE([1])(window)[0], dy/SIZE([2])(window)[0], dz/SIZE([3])(window)[0]])(window)\n\t\t\n\t\treturn window\n\n\treturn window0", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def _feature_window_function(window_type, window_size, blackman_coeff):\n if window_type == HANNING:\n return torch.hann_window(window_size, periodic=False)\n elif window_type == HAMMING:\n return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46)\n elif window_type == POVEY:\n # like hanning but goes to zero at edges\n return torch.hann_window(window_size, periodic=False).pow(0.85)\n elif window_type == RECTANGULAR:\n return torch.ones(window_size, dtype=torch.get_default_dtype())\n elif window_type == BLACKMAN:\n a = 2 * math.pi / (window_size - 1)\n window_function = torch.arange(window_size, dtype=torch.get_default_dtype())\n # can't use torch.blackman_window as they use different coefficients\n return blackman_coeff - 0.5 * torch.cos(a * window_function) + \\\n (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)\n else:\n raise Exception('Invalid window type ' + window_type)", "def getwinsize(self):", "def login_window(window):\n\n \"define my variables\"\n enter_name = Text(Point(130,150), \"Enter your Nickname:\")\n backround_Login = Image(Point(130,130),r'Login_Backround.gif')\n max_chr = Text(Point(130,110), \"Maximum character!\")\n name = Text(Point(130,130),\"\")\n illegal_name = Text(Point(130,110),\"Illegal Name!\")\n \"\"\"make my setting\"\"\"\n window.setCoords(0, 0, 256, 256)#sets the window coordinates ;bottom left is (0, 0) and top right is (256, 256)\n window.setBackground(\"White\")\n max_chr.setTextColor(\"Red\")\n illegal_name.setTextColor(\"Red\")\n\n backround_Login.draw(window)\n enter_name.draw(window)\n\n while not window.isClosed():\n new_chr = window.getKey()\n max_chr.undraw()\n illegal_name.undraw()\n if new_chr == \"Return\":\n if len(name.getText()) < 1:\n illegal_name.draw(window)\n else:\n break\n if new_chr == \"space\":\n name.setText(name.getText() + \" \")\n continue\n if new_chr == \"BackSpace\":\n name.setText(name.getText() + new_chr)\n name = delete_chr(name)\n else:\n if len(new_chr)>1:\n continue\n if (ord(new_chr) > 126 or ord(new_chr) < 33):\n continue\n else:\n name.setText(name.getText() + new_chr)\n if len(name.getText()) < 11:\n name.undraw()\n name.draw(window)\n else:\n max_chr.draw(window)\n name.setText(name.getText()[:-1])\n name.undraw()\n name.draw(window)\n enter_name.undraw()\n name.undraw()\n return name.getText()", "def getRenWin(self):\n return self.renWinInteract.GetRenderWindow()", "def win(self):\n return \"Win\"", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def _get_code_command_windows():\n while 1:\n print('Use \\'E\\', \\'S\\', \\'W\\', \\'N\\'' +\\\n '[+ 1-9] to move. Or \\'q\\' to give up.')\n hitkeys = input()\n if len(hitkeys) > 0:\n char_ = hitkeys[0].upper()\n if char_ in 'ESNW':\n if len(hitkeys) == 2:\n num_ = hitkeys[1]\n if num_ in '123456789':\n return char_ + num_\n else:\n return char_ + '1'\n elif char_ == 'Q':\n return 'end'", "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def get_main_window():\n\n pass", "def current_window(self):\n pass", "def gen_window(self, field):\n return random.choice(range(10, 200, 10))", "def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def update_window_formatting(self):\n self.update_sequence_window()\n if self.pDB_open:\n self.pDB_open.refresh_primer()\n if self.show_comp_sequence.get==1:\n self.sequ_win.refresh_DNAseq()\n return", "def answer(window_string):\n window = Window(window_string)\n if(window.f==1):\n return window.w * window.h\n else:\n return -1", "def get_word_window(self, pattern, tokens, constraints):\n split_pattern = pattern.split()\n if len(split_pattern) > 1:\n textsnippets = self.__get_word_window_more_words_help(split_pattern, tokens, constraints)\n else:\n textsnippets = self.__get_word_window_one_word_help(pattern, tokens, constraints)\n print(textsnippets)\n return textsnippets", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def __window_prompt(self, text):\n return True", "def wm(self):\n return self.position", "def alt_tab_win(number: int):\n _alt_tab(number)", "def menu_screen(win):\n\tpass", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def pallete_window():\n path_dir = r'Sor_Mods_Storage\\palletes'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n palletes = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(palletes, image=mainTitleImg)\n title = tk.Label(palletes, text=\"Pallete Mods\")\n\n comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys()))\n\n def apply_pallete_mod():\n pallete_selected = comboBox_palletes.get()\n result_window = tk.Toplevel()\n\n value = ''\n if pallete_selected == '':\n value = f'{value} Please Select an Pallete to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes')\n value = f'Enemy Mod {pallete_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod)\n\n title.grid(row=0, column=0)\n comboBox_palletes.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def columnWin( self ):\n\n for x in list(range(0,3)):\n firstVal = self.__grid[x]\n secondVal = self.__grid[x+3]\n thirdVal = self.__grid[x+6]\n\n compiledVal = str(firstVal) + str(secondVal) + str(thirdVal)\n\n if compiledVal.lower() == 'xxx':\n return 'X'\n\n elif compiledVal.lower() == 'ooo':\n return 'O'\n\n return None" ]
[ "0.60196453", "0.5610308", "0.5482973", "0.5434321", "0.5421594", "0.5412486", "0.5364167", "0.5364138", "0.5340363", "0.5302406", "0.530197", "0.5284308", "0.5282597", "0.5280957", "0.52492845", "0.52104515", "0.517051", "0.5156331", "0.5152639", "0.51405567", "0.51250046", "0.5121234", "0.51160693", "0.5083318", "0.50644124", "0.50596154", "0.50479925", "0.50252306", "0.5023388", "0.50059193" ]
0.65728295
0
Function that represents the window which Enemy Mods can be applied.
def enemy_window(): path_dir = r'Sor_Mods_Storage\enemies' enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen enemies = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(enemies, image=mainTitleImg) title = tk.Label(enemies, text="Enemies Mods") comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys())) def apply_enemy_mod(): char_selected = comboBox_enemies.get() result_window = tk.Toplevel() value = '' if char_selected == '': value = f'{value} Please Select an Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies') value = f'Enemy Mod {char_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod) title.grid(row=0, column=0) comboBox_enemies.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def stage_window():\n path_dir = r'Sor_Mods_Storage\\stages'\n stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n stages = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(stages, image=mainTitleImg)\n title = tk.Label(stages, text=\"Stage Mods\")\n\n comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys()))\n\n def apply_stage_mod():\n stage_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if stage_selected == '':\n value = f'{value} Please Select an Stage Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages')\n value = f'Enemy Mod {stage_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def GetWindow(self):\r\n\r\n return self.window", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def show(self, window):\r\n\r\n return", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, **kwargs)->None:\n pass", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def current_window(self):\n pass", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def automatic_window(self):\n \n #Create window and label\n automatic_window = tk.Toplevel(self)\n windowtext = self.translate('How many days do you want the simulation to run for?') \n automatic_window.title(windowtext)\n automatic_window.config(bg=self.default_background)\n lbl_text = tk.Label(automatic_window, text=windowtext,\n bg=self.default_background)\n lbl_text.grid(column=0, row=0)\n \n #Create input box\n self.auto_var = tk.IntVar()\n self.auto_var.set(1)\n auto_menu = tk.Entry(automatic_window)\n auto_menu.insert(0,0)\n auto_menu.configure(width=5)\n auto_menu.grid(column=0, row=1)\n\n #Create button to initate the simulation\n auto_run_button = tk.Button(automatic_window, text=self.translate('Run Simulation'), \n command = lambda: self.auto_run(automatic_window, int(auto_menu.get())),\n bg=self.button_color,\n highlightbackground=self.highlight_color)\n auto_run_button.grid(column=0, row=2)\n \n #Center the window on the screen\n automatic_window.withdraw()\n automatic_window.update_idletasks() # Update \"requested size\" from geometry manager\n x = (self.screenwidth - automatic_window.winfo_reqwidth()) / 2\n y = (self.screenheight - automatic_window.winfo_reqheight()) / 2\n automatic_window.geometry(\"+%d+%d\" % (x, y))\n automatic_window.deiconify()", "def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)", "def details_window(self, instance: Union[Nobleman, Location]):\n window = tk.Toplevel()\n window.title(instance.name)\n window.protocol(\"WM_DELETE_WINDOW\",\n partial(self.close_details_window, instance))\n self.register_extra_window(instance, window)\n self.generate_window_content(instance, window)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def renderWindowEditor(*args, autoResize: bool=True, blendMode: Union[int, bool]=0, caption:\n Union[AnyStr, bool]=\"\", changeCommand: Union[List[AnyStr, AnyStr, AnyStr,\n AnyStr], bool]=None, clear: Union[List[int, int, float, float, float],\n bool]=None, cmEnabled: bool=True, colorManage: bool=True, compDisplay:\n Union[int, bool]=0, compImageFile: Union[AnyStr, bool]=\"\", control:\n bool=True, currentCamera: Union[AnyStr, bool]=\"\", currentCameraRig:\n Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", displayImage:\n Union[int, bool]=0, displayImageViewCount: Union[int, bool]=0,\n displayStyle: Union[AnyStr, bool]=\"\", docTag: Union[AnyStr, bool]=\"\",\n doubleBuffer: bool=True, drawAxis: bool=True, editorName: bool=True,\n exists: bool=True, exposure: Union[float, bool]=0.0, filter:\n Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr, bool]=\"\",\n frameImage: bool=True, frameRegion: bool=True, gamma: Union[float,\n bool]=0.0, highlightConnection: Union[AnyStr, bool]=\"\", loadImage:\n AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", marquee: Union[List[float, float, float, float],\n bool]=None, nbImages: bool=True, nextViewImage: bool=True,\n outputColorManage: bool=True, panel: Union[AnyStr, bool]=\"\", parent:\n Union[AnyStr, bool]=\"\", pcaption: Union[AnyStr, bool]=\"\", realSize:\n bool=True, refresh: bool=True, removeAllImages: bool=True, removeImage:\n bool=True, resetRegion: bool=True, resetViewImage: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectionConnection:\n Union[AnyStr, bool]=\"\", showRegion: Union[List[int, int], bool]=None,\n singleBuffer: bool=True, snapshot: Union[List[AnyStr, int, int],\n bool]=None, snapshotMode: bool=True, stateString: bool=True, stereo:\n Union[int, bool]=0, stereoImageOrientation: Union[List[AnyStr, AnyStr],\n bool]=None, stereoMode: Union[AnyStr, bool]=\"\", toggle: bool=True,\n unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useTemplate: AnyStr=\"\", viewImageCount:\n Union[int, bool]=0, viewTransformName: Union[AnyStr, bool]=\"\",\n writeImage: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def maya_main_window():\n main_window = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window), QtWidgets.QWidget)", "def pallete_window():\n path_dir = r'Sor_Mods_Storage\\palletes'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n palletes = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(palletes, image=mainTitleImg)\n title = tk.Label(palletes, text=\"Pallete Mods\")\n\n comboBox_palletes = ttk.Combobox(palletes, values=list(char_mods_dict.keys()))\n\n def apply_pallete_mod():\n pallete_selected = comboBox_palletes.get()\n result_window = tk.Toplevel()\n\n value = ''\n if pallete_selected == '':\n value = f'{value} Please Select an Pallete to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=pallete_selected, type='palletes')\n value = f'Enemy Mod {pallete_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(palletes, text='Apply', command=apply_pallete_mod)\n\n title.grid(row=0, column=0)\n comboBox_palletes.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def _get_window_width(self):", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def get_parent_window(self): # real signature unknown; restored from __doc__\n pass" ]
[ "0.64761674", "0.63803315", "0.6314744", "0.6216543", "0.62010247", "0.6149478", "0.60766095", "0.60618967", "0.6037659", "0.60210836", "0.60210836", "0.600711", "0.6003741", "0.5992173", "0.59492654", "0.58889663", "0.58503634", "0.57341456", "0.56976503", "0.56692207", "0.56683326", "0.56683326", "0.5647913", "0.5622526", "0.5618607", "0.56168467", "0.5615513", "0.5606547", "0.5605087", "0.5602058" ]
0.678502
0
Function that represents the window which Stage Mods can be applied.
def stage_window(): path_dir = r'Sor_Mods_Storage\stages' stage_mods_dict = sor_module.list_char_mods(path_dir=path_dir) # Loading Images to screen stages = tk.Toplevel() mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png')) imgRandom_label = tk.Label(stages, image=mainTitleImg) title = tk.Label(stages, text="Stage Mods") comboBox_chars = ttk.Combobox(stages, values=list(stage_mods_dict.keys())) def apply_stage_mod(): stage_selected = comboBox_chars.get() result_window = tk.Toplevel() value = '' if stage_selected == '': value = f'{value} Please Select an Stage Mod to Apply!' else: sor_module.apply_mod(mod_dir=path_dir, mod=stage_selected, type='stages') value = f'Enemy Mod {stage_selected} applied!' result_label = tk.Label(result_window, text=value) result_label.pack() btn_apply = tk.Button(stages, text='Apply', command=apply_stage_mod) title.grid(row=0, column=0) comboBox_chars.grid(row=1, column=0) imgRandom_label.grid(row=1, column=1) btn_apply.grid(row=2, column=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window(self): # real signature unknown; restored from __doc__\n pass", "def get_main_window():\n\n pass", "def GetWindow(self):\r\n\r\n return self.window", "def window(self):\n return self._window", "def window(self):\n return self._window", "def showWindow(*args, **kwargs)->None:\n pass", "def current_window(self):\n pass", "def createGameWindow():\n gameWindow = g.GraphWin(\"game\", 450, 800) #Window to show game\n\n return gameWindow", "def maya_window():\n return to_qwidget(\"MayaWindow\")", "def show(self, window):\r\n\r\n return", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def window(self) -> pulumi.Input['AssetModelMetricWindowArgs']:\n return pulumi.get(self, \"window\")", "def _get_window_width(self):", "def window(self) -> Optional[pulumi.Input['MaintenanceWindowArgs']]:\n return pulumi.get(self, \"window\")", "def get_window(self):\n if self.isWindow:\n return self\n else:\n return self.window", "def window(self):\n return self.attribute('VW')", "def window_function(self):\n return self._wndfnc, self._wndfnc_norm", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def maya_main_window():\n main_window_ptr = omui.MQtUtil.mainWindow()\n if sys.version_info.major >= 3:\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\n else:\n return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)", "def GetWindow(self):\r\n\r\n return self._wnd", "def show_window_fields(self):\n self.analyze()\n items = []\n for ba in analyzer.window_actions:\n items.append(\n \"{0} : {1}\".format(\n ba.full_name(), layout_fields(ba)))\n\n return rstgen.ul(items)", "def get_parent_window(self): # real signature unknown; restored from __doc__\n pass", "def chars_window():\n path_dir = r'Sor_Mods_Storage\\chars'\n char_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n chars = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n imgRandom_label = tk.Label(chars, image=mainTitleImg)\n title = tk.Label(chars, text=\"Characters Mods\")\n\n comboBox_chars = ttk.Combobox(chars, values=list(char_mods_dict.keys()))\n\n def apply_char_mod():\n char_selected = comboBox_chars.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='chars')\n value = f'Character Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(chars, text='Apply', command=apply_char_mod)\n\n title.grid(row=0, column=0)\n comboBox_chars.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def access_window(self):\n return self._access_window", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def mayaMainWindow():\n OpenMayaUI.MQtUtil.mainWindow()\n ptr = OpenMayaUI.MQtUtil.mainWindow()\n\n return wrapInstance(long(ptr), QtWidgets.QWidget)", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def enemy_window():\n path_dir = r'Sor_Mods_Storage\\enemies'\n enemy_mods_dict = sor_module.list_char_mods(path_dir=path_dir)\n\n # Loading Images to screen\n enemies = tk.Toplevel()\n mainTitleImg = ImageTk.PhotoImage(Image.open(r'img/axel_daniel2221.png'))\n\n imgRandom_label = tk.Label(enemies, image=mainTitleImg)\n title = tk.Label(enemies, text=\"Enemies Mods\")\n\n comboBox_enemies = ttk.Combobox(enemies, values=list(enemy_mods_dict.keys()))\n\n def apply_enemy_mod():\n char_selected = comboBox_enemies.get()\n result_window = tk.Toplevel()\n\n value = ''\n if char_selected == '':\n value = f'{value} Please Select an Mod to Apply!'\n else:\n sor_module.apply_mod(mod_dir=path_dir, mod=char_selected, type='enemies')\n value = f'Enemy Mod {char_selected} applied!'\n\n result_label = tk.Label(result_window, text=value)\n result_label.pack()\n\n btn_apply = tk.Button(enemies, text='Apply', command=apply_enemy_mod)\n\n title.grid(row=0, column=0)\n comboBox_enemies.grid(row=1, column=0)\n imgRandom_label.grid(row=1, column=1)\n btn_apply.grid(row=2, column=0)", "def get_active_window(self): # real signature unknown; restored from __doc__\n pass" ]
[ "0.6928395", "0.684634", "0.6601684", "0.6564466", "0.6564466", "0.64463425", "0.6378235", "0.6353312", "0.6351047", "0.629362", "0.62887776", "0.6145612", "0.6136457", "0.6110733", "0.60973674", "0.6074384", "0.6053072", "0.6018539", "0.59742665", "0.5963318", "0.59591293", "0.59559196", "0.59359986", "0.59145725", "0.5905236", "0.5902226", "0.5902226", "0.58956766", "0.58951306", "0.5869485" ]
0.6935589
0
delete the specified intentfrom your account.
def delete_intent(intent_name): try: client.get_intent( name=intent_name, versionOrAlias='$LATEST' ) answer=raw_input("Do you want to delete %s from your account(Y/y for YES, other NO):" %(intent_name)) if answer in ['Y', 'y']: client.delete_intent( name=intent_name ) print "You chose to delete the intent %s, deleted..." %(intent_name) else: print "You chose not to delete the inten t%s, exiting..." %(intent_name) except: print "There is no intent called %s, exiting..." %(intent_name) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_account(self, account):\n \n pass", "def delete(self, args):\n try:\n db = get_db('intents')\n intents = db.delete_intent(args['intent'])\n resp = jsonify(intents=intents)\n resp.status_code = 200\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error)\n resp.status_code = 400\n return resp", "def delete_activity():\n pass", "def delete_account(self):\n Credential.account_list.remove(self)", "def delete_account(self):\n signals.before_gameaccount_deleted.send(gameaccount=self.gameaccount)\n db.delete(self.gameaccount)", "def delete(self, accountId, reason, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method delete\")", "def delete(account):\n account.stripe_account.delete()\n account.delete()", "def account_delete(request):\n fields = [\"email\", \"token\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n storage = Storage(\"biit_profiles\")\n try:\n account_db.delete(args[\"email\"])\n storage.delete(args[\"email\"] + \".jpg\")\n return http200(\"Account deleted\")\n except:\n return http400(\"Error in account deletion\")", "def delete(self, data):\n url = self.base_url + '/v2/account/delete/'\n return self._call_vendasta(url, data)", "def delete_account(self):\n print('-=' * 12 + \" Delete Account \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n delete_flag = self.auth.delete_account(mob_num, password)\n if delete_flag:\n print(\"The account is permently deleted\")\n self.logging_page()\n else:\n print(\"Mobile Number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.delete_account, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def delete(self, application_id):", "def delete_my_account():\n # Remove user ownerships\n for p in current_user.projects:\n p.user_id = None\n p.save()\n # Delete user posts\n [ a.delete() for a in current_user.activities ]\n # Delete user account\n current_user.delete()\n logout_user()\n flash('We are sorry to see you go. Your profile has been deleted.', 'info')\n return redirect(url_for('public.home'))", "def delete_account():\n\n username = current_user.get_id()\n app.delete_user(username)\n logger.info('Deleted account of user ' + username + '.')\n logout_user()\n logger.info('Logged ' + username + ' out after account deletion.')\n return Response('Account successfully deleted.')", "def delete_user():", "def delete(self, account_id):\n self.client.delete_account(account_id)", "def delete_account():\n print(\"\\n\")\n print(messages.delete_account)\n u_id = pyip.inputInt(\"User Id: \", greaterThan=0)\n\n credentials = {\"id\":u_id}\n result = BankOperationsBackend.delete_account(credentials)\n start_again() if result else BankOperationsUi.delete_account()", "def remove_address(intent, session):\n sess_data = session.setdefault('attributes', {})\n sess_data['remove_address'] = True\n\n # Retrieve stored data just to check if it exists or not.\n user_data = database.get_user_data(session['user']['userId'])\n if not user_data:\n return reply.build(\"I already don't remember any addresses for you.\",\n is_end=True)\n elif sess_data.get('awaiting_confirmation'):\n # The user has requested removal and\n # we requested confirmation\n if intent['name'] == 'AMAZON.NoIntent':\n return reply.build(\"Okay, keeping your stored addresses.\",\n is_end=True)\n elif intent['name'] == 'AMAZON.YesIntent':\n succ = database.delete_user(session['user']['userId'])\n if succ:\n return reply.build(\"Okay, I've forgotten all the addresses \"\n \"you told me.\", is_end=True)\n else:\n # Only get here if the database interaction fails somehow\n return reply.build(\"Huh. Something went wrong.\", is_end=True)\n else:\n # Shouldn't ever get here.\n return reply.build(\"Sorry, I don't know what you mean. \"\n \"Try again?\", persist=sess_data, is_end=False)\n else:\n # Prompt the user for confirmation of data removal.\n sess_data['awaiting_confirmation'] = True\n return reply.build(\"Do you really want me to forget the addresses \"\n \"you gave me?\",\n reprompt='Say \"yes\" to delete all stored addresses '\n 'or \"no\" to not change anything.',\n persist=sess_data,\n is_end=False)", "def delete(self, args, intent):\n if 'all' in args.keys() and args['all'] == True:\n try:\n db = get_db('expressions')\n db_results = db.delete_all_intent_expressions(intent)\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions)\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp \n elif args['expressions']:\n try:\n db = get_db('expressions')\n db_results = db.delete_expressions_from_intent(intent, args['expressions'])\n expressions = [x[1] for x in db_results]\n resp = jsonify(intent=intent, expressions=expressions, deleted_expressions=args['expressions'])\n return resp\n except DatabaseError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 500\n return resp\n except DatabaseInputError as error:\n resp = jsonify(error=error.value)\n resp.status_code = 400\n return resp", "def delete(ctx, query, force, password, remember):\n\n _init_session(ctx, password, remember)\n session = ctx.obj[\"session\"]\n creds = session.list_credentials()\n hits = _search(creds, query, True)\n if len(hits) == 0:\n click.echo(\"No matches, nothing to be done.\")\n elif len(hits) == 1:\n cred = hits[0]\n if force or (\n click.confirm(\n f\"Delete account: {_string_id(cred)} ?\",\n default=False,\n err=True,\n )\n ):\n session.delete_credential(cred.id)\n click.echo(f\"Deleted {_string_id(cred)}.\")\n else:\n click.echo(\"Deletion aborted by user.\")\n\n else:\n _error_multiple_hits(ctx, hits)", "def delete_activity(recipe_id, activity_id):\n if 'name' in session:\n PLAN.users[session['name']].delete_activity(recipe_id, activity_id)\n return redirect(url_for('view_activities', recipe_id=recipe_id))\n return redirect(url_for('log_in'))", "def delete_credential(self, credential):\r\n return self.delete(self.credential_path % (credential))", "def delete(ctx):\n click.echo('deleting')\n ctx.delete()\n click.echo('done')", "def delete_account(request):\n ubanks = request.user.userbank.all()\n for ubank in ubanks:\n ubank.delete()\n user = request.user\n log_out(request)\n user.delete()\n return HttpResponse(\"Account succesfully deleted\")", "def delete_app(AppId=None):\n pass", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete():", "def delete(cls, aws_cloud_account_id: str):\n\t\tpass", "def delete_account(self, accountid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'accountid': accountid}\n url = SECURE_API_URL + \"raas/v1/account/delete\"\n return self._lr_object._get_json(url, payload)", "def _delete_spam_action(act, session):\n if act is None:\n return\n act.item.spam_flag_counter -= 1\n session.delete(act)", "def destroy(self):\n\t\tos.remove(self.account_file)" ]
[ "0.7136622", "0.7109074", "0.6793135", "0.6618114", "0.6454273", "0.6342631", "0.63189256", "0.62924826", "0.6259348", "0.6142261", "0.6112738", "0.60232806", "0.5955118", "0.59354204", "0.5897123", "0.58737737", "0.5854009", "0.5853782", "0.5827274", "0.5824352", "0.57835615", "0.5770079", "0.57689846", "0.57591033", "0.57539934", "0.5744825", "0.57406825", "0.5740482", "0.5739148", "0.57342786" ]
0.7211831
0
demo function to get the intent's latest configuration
def get_intent_configuration(intent_name, version ="$LATEST"): response=client.get_intent( name=intent_name, version=version ) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config():\n return CONFIG", "def getConfig(self):\n pass", "def config(self) -> \"AutomationConfig\":", "def config(self) -> \"AutomationConfig\":", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def get_details(self):\n return self.__config_data", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config" ]
[ "0.6338923", "0.6251342", "0.61532223", "0.61532223", "0.6144978", "0.60848004", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894", "0.6024894" ]
0.73107255
0
a help function to print the intentinformation in format
def format_print_jobs(intent): print "\nintentName: %s" %(intent['name']) for k,v in intent.iteritems(): if k <> 'name': print "\t" + str(k) + ": " + str(v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printhelp():", "def info(self):", "def info(self):", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def info(self, *args, **kwargs):", "def cmd_info(self):\n self.cmd_author()\n self.cmd_date()\n log = self.get_log() or ''\n print(len(log))\n print(log)", "def printinfo(assign, question):\n print(\"Last Name: Bell\")\n print (\"First Name: Daniel\")\n print(\"Student ID: 282911\")\n print(\"Course: CPSC 231\")\n print(\"Tutorial Section: T02\")\n print(\"Assignment: %d\" %assign)\n print(\"Question: %s\" %question)\n print(\"\")", "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "def print_standout(info):\n sys.stdout.write(\"Info: %s\" % info)\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def info() -> None:", "def printDictIntents(self):\n result = \", \".join(str(value.tag) for key, value in self.dicIntents.items())\n self.ouput.exec('Las Intenciones del ChatBot \"'+self.name+'\" son:'+result)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])", "def describe(self) -> str:", "def print_actions_help():\n print(\\\n'''\\n\nTools for handling SELAFIN files and TELEMAC binary related in python\\n\nP ossible actions:\\n\n scan will print information about the SELAFIN, such as variables,\n their vales etc.\n spec will print information about a spectral file (also SELAFIN),\n such as frequencies, periodes, etc.\n chop will chop a SELAFIN given a new set of time range and step (but\n alter is better)\n alter will alter a SELAFIN file, choping or modifying time,\n converting its coordinates, extracting variables, etc.\n merge will merge two files together, whether they are continuous\n simulations (same variables) or putting variables together\n (same time definition)\n subdivide will subdivide a mesh by one iteration (splitting all triangles\n in four others)\n ''')", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def printInfoDoc():\n global _modinfo\n print _modinfo\n help(\"ProcUtils\")", "def info(capsys, format_str, format_args=None):\n\n if format_args is not None:\n msg = (format_str % format_args)\n else:\n msg = format_str\n\n with capsys.disabled():\n print(msg)", "def run(self):\n logging.debug('Displaying Info: ' + self.recipe.name)\n\n msg = PREFIX[1:] + PREFIX.join(self.recipe.info().split('\\n'))\n print(msg)\n return msg", "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "def process_info(process):\n\thelp(process)", "def info(self):\n import tc\n ## enumerate all options\n opts = self.to_list()\n res = \"\"\n fmt = \"%20s = %5s ## %s\\n\"\n\n for k, v in opts:\n res += fmt % (k, str(self.__getattribute__(k)),\n str(v.doc()).split('\\n')[0])\n\n return res", "def help(self):\n res = \"\"", "def print_info(c, timestamp):\r\n print(f\"\\n[{timestamp}] [{id(c)}] [Fitness: {c.fitness()}]\\n \" +\r\n f\"Age: {c.age} seconds, F.Eaten: {c.food_eaten}, P.Eaten: {c.poison_eaten}\\n\" +\r\n f\"currHP: {c.health}, Gen: {c.gen}, Childs: {c.childs}\\n\" +\r\n f\"DNA: {c.dna}\\n\" +\r\n f\"FoodAttr: {c.food_attraction}, PoisonAttr: {c.poison_attraction}\\n\" +\r\n f\"FoodDist: {c.food_dist}, PoisonDist: {c.poison_dist}\\n\" +\r\n f\"MaxHealth: {c.max_health}, MaxVel: {c.max_vel}, Size: {c.size}\\n\" +\r\n f\"MaxSteer: {c.max_steer_force}, DirAngleMult: {c.dir_angle_mult}\\n\")", "def description():", "def getInfo():", "def print_info(msg):\n print(msg)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def print_params():\n\n help_out = convert_phil_to_text(master_phil, att_level=1)\n txt_out = convert_phil_to_text(master_phil)\n\n return help_out, txt_out", "def print_me(self):\n return \"ID: %s Title: %s\" % (self.ID, self.title)" ]
[ "0.64891857", "0.64813703", "0.64813703", "0.6405768", "0.6331518", "0.6306247", "0.62967855", "0.6288361", "0.6245361", "0.6203289", "0.61881757", "0.617994", "0.6178175", "0.6174316", "0.6146131", "0.614331", "0.6139111", "0.61244285", "0.6111058", "0.60988563", "0.60816693", "0.6077465", "0.60719025", "0.60661864", "0.60510826", "0.60261244", "0.60195094", "0.60188246", "0.60168207", "0.60155016" ]
0.6709204
0
Crawls all requested bug data and bug ids. Saves them in files (bugIDListP.pickle, bugIDList.csv, bugsData.txt ) and/or Mongo DB collections (BugIDs, BugsData) depending if they are given at initialization.
def get_all_bugs(self) -> List: #starting point offset = 0 #list for all bugs resultBugList = [] #list for bug IDs bugIDList = [] #checks if there are still results returned notEmpty = True #queries in 500 bug steps until the result list is empty while notEmpty: print("entered") #interpretation of result as list plus formatting for eval errors result = ast.literal_eval(self.session.get(self.bugURL + "&offset=" + str(offset)).text. replace('true', 'True').replace('false', 'False').replace('null', 'None'))["bugs"] #checks if the query needs to be set again with a new offset if result: resultBugList += result else: notEmpty = False #gets the ID out of all comments partList = [bug["id"] for bug in result] bugIDList += partList #sets new starting point offset += 500 #inserts bug ids and bugs into db if given one if self.mongoDB: for id in bugIDList: self.mongoDB["BugIDs"].insert_one({"ID": id}) self.mongoDB["BugsData"].insert_many(resultBugList) #creates files for bug ids and bugs if given a folder if self.folder: #saves bug list as python object with open(self.folderpath + "bugIDListP.pickle", "wb") as a: pickle.dump(bugIDList, a) #saves bug list as csv with open(self.folderpath + "bugIDList.csv", "w") as b: for id in bugIDList: b.write(str(id) + "\n") with open(self.folderpath + "bugsData.txt", "w") as c: for bug in resultBugList: c.write(str(bug) + "\n") #returns List Object for further processing return(bugIDList)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")", "def get_bug_data(self, current_date=None):\n start_time = time.time()\n bug_data = self.web_connection.get_async_data_using_asyncio_paginated(self.bug_url, self.web_constants, 5)\n end_time = time.time()\n # print(f\"Commit data using Parallel (asyncio)\\n {commit_data}\\n\\n\")\n print(f\"Time Taken to Fetch Bug Details {end_time - start_time}\")\n\t\t\n bugs_parser = BugsJsonParser()\n bug_list_df = bugs_parser.parse_json(bug_data)\n\n if current_date is None:\n current_date = datetime.today().strftime('%Y-%m-%d')\n directory = f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}\"\n CDPConfigValues.create_directory(directory)\n bug_list_df.to_csv(\n f\"{CDPConfigValues.schedule_file_path}/{self.project_name}/{current_date}/\"\n f\"{CDPConfigValues.project_issue_list_file_name}\",\n index=False)\n else:\n bug_list_df.to_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\", index=False)\n\n return bug_list_df", "def getIssues(db, data):\n start = datetime.utcnow() # Time this and log how long refreshing took.\n try:\n cur = getRelevantIssues(db, data)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n count = 0\n dbd_data = {\n # TODO: make sets of these to make the lookups below faster\n \"SLA\": data.get(\"SLA\", []),\n \"FTS\": data.get(\"FTS\", []),\n \"REV\": [], # Just refresh these every time\n \"UNA\": data.get(\"UNA\", []),\n \"active\": data.get(\"active\", {}),\n \"waiting\": data.get(\"waiting\", {})\n }\n\n try:\n revIssues = getREVIssues(db)\n except pymongo.errors.PyMongoError as e:\n return {\"error\": \"Error querying the Mongo database: \" +\n e.message}\n\n updated_data = {\n \"SLA\": [],\n \"FTS\": [],\n \"REV\": revIssues,\n \"UNA\": []\n }\n for i in cur:\n count += 1\n issue = SupportIssue().fromDoc(i)\n\n # Keep track of the totals:\n # --- Active issue count ---\n if issue.isActive():\n dbd_data['active'][issue.key] = 1\n elif issue.key in dbd_data['active']:\n del dbd_data['active'][issue.key]\n # --- Waiting For Customer issue count ---\n if issue.isWFC() and not issue.doc['deleted']:\n dbd_data['waiting'][issue.key] = 1\n elif issue.key in dbd_data['waiting']:\n del dbd_data['waiting'][issue.key]\n\n # For each category, see if the issue belongs, and if not, remove it\n # from the dashboard issues if it was there.\n if isSLA(issue):\n updated_data[\"SLA\"].append(trimmedSLAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"SLA\"])\n if isFTS(issue):\n updated_data[\"FTS\"].append(trimmedFTSIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"FTS\"])\n if isUNA(issue):\n updated_data[\"UNA\"].append(trimmedUNAIssue(issue))\n else:\n removeCompressedIssueIfPresent(issue, dbd_data[\"UNA\"])\n\n mergeAndSortIssues(dbd_data, updated_data)\n\n duration = datetime.utcnow() - start\n logger.info(\"getIssues took {0}, count: {1}\".format(duration, count))\n return dbd_data", "def get_bugs(self, year):\n directory = self.get_bugs_path(year)\n for path in self._get_files(directory, pattern='bugs.*.json'):\n for bug in helpers.load_json(path):\n yield bug", "def get_event_data(self, ):\n \n if os.path.exists(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\"):\n self.bug_data_frame = pd.read_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\")\n else:\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(f\"{self.cdp_dump_path}/github_events_cdp_dump.csv\", encoding='utf-8-sig',\n index=False)\n event_parser = EventsJsonParser()\n event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants, self.event_data_frame,\n f\"{self.cdp_dump_path}/\"\n f\"{CDPConfigValues.closed_events_list_file_name}\")", "def __init__(self, restUrl: str,\n mode: CrawlMode = CrawlMode.NO,\n loginUrl: str = None,\n loginName: str = None,\n loginPW: str = None,\n furtherparams: str = None,\n workers: int = 10,\n mongoDB: Database = None,\n foldername: str = None,\n bugList: Union[List, str] = None) -> None:\n\n self.session = requests.session()\n\n self.workers = workers\n\n if loginUrl:\n #bugzilla user data\n user = loginName\n pw = loginPW\n\n #login process\n loginURL = loginUrl\n self.session.post(loginURL, {'Bugzilla_login': user, 'Bugzilla_password': pw})\n\n #checks for the right ending of restUrl\n if restUrl[-1] != '/':\n restUrl += '/'\n\n #prepares URLs for crawling of bugs and comments\n self.bugURL = restUrl + 'bug?limit=500' + furtherparams\n self.commentURL = restUrl + 'bug/{}/comment'\n\n #database if given one\n self.mongoDB = mongoDB\n\n #foldername if given one\n self.folder = foldername\n if foldername:\n #creates directory\n self.createFolder(foldername)\n self.folderpath = foldername + '/'\n\n #checks on which crawl operation to execute\n self.decide_action(mode, bugList)", "def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))", "def save_bugs(self, year, chunk, bugs, errors=None):\n directory = self.get_bugs_path(year)\n return self._save(directory, chunk, bugs, errors, switch='bugs')", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def parse_log_and_populate_db(self,start_issue,stop_issue):\n \n for issue_num in range(start_issue,stop_issue+1):\n try:\n police_log=PoliceLog.objects.get(issue_number__exact=issue_num)\n except PoliceLog.DoesNotExist:\n #\n # if issue doesn't exist in db, then go to next issue\n pass\n else:\n if len(police_log.filename)>0:\n #\n # in order to parse log file, must have filename\n L=self.parse_log(police_log.filename,\n police_log.pub_date.year)\n\n else:\n L=[]\n \n #\n # add each report to db\n for report in L:\n #\n # hash string is digest of (issue_number, crime category, original text)\n # this should ensure a unique hash\n hasher = hashlib.md5()\n hasher.update(str(police_log.issue_number))\n hasher.update(report['category'])\n hasher.update(report['original_text'])\n\n crime=CrimeReport(hash=hasher.hexdigest(),\n policelog=police_log, # foreign key: specify police log object\n category=report['category'],\n original_text=report['original_text'],\n line_num=report['line_num'],\n address=report['address'],\n map_scale=report['map_scale'],\n date=datetime.date(report['date_year'],\n report['date_month'],\n report['date_day']))\n \n\n # add lat-long coordinates to crime report\n (lat,long)=self.geocoder.geocode(crime.address,crime.map_scale)\n crime.lat=lat\n crime.long=long\n\n crime.save()", "def do_the_issues(user_id, repo_id):\n with tempfile.TemporaryDirectory() as tmp:\n path = os.path.join(tmp, \"{}_{}_issues.txt\".format(repo_id, user_id))\n issues_initial_url = get_initial_url_issues(user_id, repo_id)\n resp_obj = requests.get(issues_initial_url, headers=headers)\n # prase the initial request. for Issue\n all_issues = json.loads(resp_obj.text)\n with open(path, \"w\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n print(\"the len of resp is {}\".format(len(all_issues)))\n LINK_HEADER = \"Link\"\n next_url = None\n if LINK_HEADER in resp_obj.headers:\n # parse next page (if present)\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n # subsequent page\n while next_url:\n resp_obj = requests.get(next_url, headers=headers)\n all_issues = json.loads(resp_obj.text)\n with open(path, \"a\") as out_stream:\n for an_issue in all_issues:\n print(an_issue, file=out_stream)\n if LINK_HEADER in resp_obj.headers:\n next_url = parse_next_url(resp_obj.headers[LINK_HEADER])\n print(next_url)\n else:\n next_url = None\n GsUpload.upload_blob(GS_BUCKET_NAME, path, basename(path))\n print(\"the issues path is \" + str(path))", "def main():\n\n database = MongoDbUtil('ro').database()\n\n tag = 'Px1id'\n daemons = ['daq_files_watcher', 'jobs_validator', 'submitter']\n colls = ['%s_%s'%(coll, tag) for coll in daemons]\n\n datas = []\n for daemon, coll in zip(daemons, colls):\n last_doc = database[coll].find().skip(database[coll].count()-1)[0]\n accum_stats = last_doc['accum_stats']\n\n vals = {}\n timestamps = []\n for key in accum_stats.keys():\n vals[key] = []\n\n for doc in database[coll].find():\n timestamps.append(doc['date'])\n for key in vals:\n vals[key].append(doc['accum_stats'][key])\n\n urls = []\n for key in vals:\n urls.append(draw(timestamps, vals[key], daemon, key))\n\n datas.append({'title': daemon, 'urls': urls})\n\n make_index_file(tag, datas)", "def scrape_issues(self, url):\n try:\n self.driver.get(url)\n except common.exceptions.InvalidSessionIdException:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n except Exception:\n self.driver.close()\n error_message = \"ERROR: Failed to reach URL, check \"\\\n \"specified URL in constants.py\\n\"\n self.logger.log(error_message)\n return []\n\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n buganizer_issues = []\n\n if \"Buganizer\" not in page_title or \"componentid\" not in page_title:\n if \"MOMA Single Sign On\" in page_title:\n error_message = \"ERROR: You must log into your MOMA account \"\\\n \"first. Select the 'Use Security Code' option and generate a security code at go/sc.\\n\"\n self.logger.log(error_message)\n\n while \"Buganizer\" not in page_title:\n source_html = self.driver.page_source\n soup = BeautifulSoup(source_html, \"html.parser\")\n page_title = soup.title.string\n time.sleep(1)\n\n return buganizer_issues\n error_message = \"ERROR: URL does not link to a Buganizer \"\\\n \"componentid, check specified URL \"\\\n \"in constants.py\\n\"\n self.logger.log(error_message)\n return buganizer_issues\n\n for tbody in soup.find_all('tbody'):\n for _tr in tbody.find_all('tr'):\n issue_link = \"https://b.corp.google.com/issues/\" + _tr.get(\n 'data-row-id')\n buganizer_issues.append(issue_link)\n return buganizer_issues", "def bugs_to_csv(promptArgs=False):\n username = os.environ['USER']\n cachedir = \"/tmp/\" + username + \"/.cache/.launchpadlib\"\n anon_or_auth = 'anon_'\n\n if promptArgs is False:\n launchpad = Launchpad.login_anonymously(\n 'anonymously', 'production', cachedir, version='devel')\n\n # Clear credentials if they already existed and check for X11 forwarding\n elif promptArgs is True:\n def no_credential():\n print(\"Can't proceed without Launchpad credential.\")\n sys.exit()\n\n if os.path.isfile(cachedir+'/auth.txt'):\n os.remove(cachedir+'/auth.txt')\n\n try:\n os.environ['DISPLAY']\n except KeyError:\n raise ValueError('X11 Disabled (or) and DISPLAY Variable unset')\n\n launchpad = Launchpad.login_with(\n 'authorize', 'production', cachedir,\n credentials_file=cachedir + '/auth.txt',\n credential_save_failed=no_credential, version='devel')\n anon_or_auth = 'authorized_'\n else:\n raise ValueError(\"Prompt argument was not a boolean\")\n\n # Try to get bugs and place to csv file, if stopped midway or finishes,\n # delete authentication credentials (if used)\n try:\n project = launchpad.projects['starlingx']\n\n bugs = project.searchTasks(status=ALL_STATUSES, omit_duplicates=False)\n currentDate = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')\n \"\"\"\n file_n = 'launchpad-bugs-' + anon_or_auth + currentDate + '.csv'\n print('Destination file is: ' + WORK_DIR + '/' + file_n)\n\n if os.path.isfile(DEST_DIR + file_n):\n updateFileQuestion = \"File Exists, do you want to overwrite it?\"\n overWriteFile = query_yes_no(updateFileQuestion, \"no\")\n if overWriteFile is False:\n raise ValueError(\"Overwrite existing file is False\")\n with open(DEST_DIR + file_n, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(fieldnames)\n for each_bug in bugs:\n bugInfoString = get_bug_info_tuple(each_bug)\n writer.writerow(bugInfoString)\n \"\"\"\n # create my workbook\n workbook_filename = 'stx_lp_workbook-' + anon_or_auth + currentDate + '.xlsx'\n workbook = xlsxwriter.Workbook(WORK_DIR + \"/\"+ workbook_filename)\n print \"Start writing LP data to worksheets according to the tag ......\"\n worksheet_dict = {}\n row_dict = {}\n worksheet_dict['all_open'] = workbook.add_worksheet('all_open')\n worksheet_dict['all_open'].write_row(0, 0, list(fieldnames))\n row_dict['all_open'] = 1\n for tag in targetTags:\n worksheet_dict[tag] = workbook.add_worksheet(tag)\n worksheet_dict[tag].write_row(0, 0, list(fieldnames))\n row_dict[tag] = 1\n for each_bug in bugs:\n bugInfoStringList = get_bug_info_tuple(each_bug)\n worksheet_dict['all_open'].write_row(row_dict['all_open'], 0, bugInfoStringList)\n row_dict['all_open'] += 1\n for tag in targetTags:\n if tag in each_bug.bug.tags:\n worksheet_dict[tag].write_row(row_dict[tag], 0, bugInfoStringList)\n row_dict[tag] += 1\n bugId = str(each_bug.bug.id)\n row = row_dict[tag]\n print \"writting LP \" + bugId + \" at row #\" + str(row) + \" into sheet: \" + tag\n workbook.close()\n # old mehtod: totally loop number of tags * number of bugs\n # not efficient enough, so commented those lines out\n \"\"\"\n for tag in targetTags:\n worksheet = workbook.add_worksheet(tag)\n # with each of sheets (named under targeted tag),\n # loop all bugs to find the bug with such a tag,\n # and wite the bug one by one into this sheeet.\n row = 0;\n worksheet.write_row(row, 0, list(fieldnames))\n print worksheet\n #worksheet = stx_lp_workbook.get_worksheet_by_name(tag)\n for each_bug in bugs:\n if tag in each_bug.bug.tags:\n row += 1\n bugId = str(each_bug.bug.id)\n bugInfoString = get_bug_info_tuple(each_bug)\n worksheet.write_row(row, 0, bugInfoString)\n print \"writting LP \" + bugId + \" at row #\" + str(row+1) + \" into sheet: \" + tag\n\n print \"Complete writting worksheets and closed workbook!\"\n workbook.close()\n \"\"\"\n except BaseException as e:\n print e.message, e.args\n\n finally:\n workbook.close()\n if anon_or_auth == 'authorized_':\n os.remove(cachedir + '/auth.txt')", "def process_results_legacy(refresh_count, output_dir, ext_queue, result_queue,\\\n num_of_workers=8):\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n pass\n\n # uses a pool of 'curl' workers\n curl_worker_pool = Pool(processes=num_of_workers)\n manager = Manager()\n curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('Timing out on get from queue...')\n break\n for entry in found_bugs:\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n bug = WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n saved_location ='Visit%d_%s%d' % (refresh_count, bugname,\\\n dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n obj = curl_worker_pool.apply_async(curl_worker_legacy, \\\n ((output_dir, saved_location, save_to_path, bug, curl_result_queue),))\n try:\n sleep(0.5)\n curl_worker_pool.join()\n curl_worker_pool.close()\n curl_worker_pool.terminate()\n except Exception:\n LOG.debug('Closing pool')\n\n while not curl_result_queue.empty():\n cbug = curl_result_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)", "def fetch_cases():\n logger.info(\"Start fetching cases\")\n fb = fogbugz.FogBugz(\n settings.AUTH_FOGBUGZ_SERVER,\n settings.FOGBUGZ_TOKEN)\n release_query = ' OR '.join('milestone:\"{0}\"'.format(release.number) for release in Release.objects.all())\n resp = fb.search(\n q='({0}) AND ({ciproject}:\"*\")'.format(release_query, ciproject=settings.FOGBUGZ_CI_PROJECT_FIELD_ID),\n cols='sTitle,sOriginalTitle,sFixFor,dtFixFor,sProject,sArea,dtLastUpdated,tags,' +\n settings.FOGBUGZ_CI_PROJECT_FIELD_ID\n )\n cases = resp.findAll('case')\n logger.info('Found %s cases to fetch from fogbugz', len(cases))\n for case_xml in cases:\n update_case_from_fogbugz.apply_async(kwargs=dict(case_id=int(case_xml.attrs['ixbug'])))\n logger.info(\"Task finished\")", "def run(self):\n comment_df_list = []\n post_df_list = []\n subreddit_df_list = []\n\n reddit = sr.reddit_interface()\n subreddits = reddit.subreddits.popular(limit = SUBREDDIT_LIMIT) # Lists the top 50 subreddits\n\n for subreddit in subreddits:\n top_posts = reddit.subreddit(str(subreddit)).top()\n for post in top_posts:\n if not post.stickied:\n post_list = [post.id, str(post.subreddit), post.title, post.num_comments]\n post.comments.replace_more(limit = 0)\n for comment in post.comments.list():\n comment_list = [str(comment.parent()), comment.id, comment.body, int(comment.score)]\n comment_df_list.append(comment_list)\n post_df_list.append(post_list)\n subreddit_df_list.append([str(subreddit)])\n\n comment_df_list = pd.DataFrame(comment_df_list, columns = COMMENTS_COLUMNS)\n post_df_list = pd.DataFrame(post_df_list, columns = POSTS_COLUMNS)\n subreddit_df_list = pd.DataFrame(subreddit_df_list, columns =['Subreddit'])\n reddit_df = [subreddit_df_list, post_df_list, comment_df_list]\n sr.save_xlsx(reddit_df, self.output().path)", "def bug_map(self):\n bug_map = defaultdict(list)\n # obtain bug_id/test_id mapping\n with MongoConnection(self.host, self.port) as mongo:\n collection = mongo.connection[\"kdetector\"][\"dataset\"]\n dataset = collection.find()\n for data in dataset:\n bug_id, test_id = data[\"bug_id\"], data[\"test_id\"]\n bug_map[bug_id].append(test_id)\n return bug_map", "def init():\n\n for subreddit in SUBREDDITS:\n\n writer = csv.writer(open(\"./{}-submissions.csv\".format(subreddit),\n \"w\", newline=\"\", encoding=\"utf-8\"))\n\n # Adding the header.\n writer.writerow([\"datetime\", \"author\", \"title\", \"url\", \"domain\"])\n\n print(\"Downloading:\", subreddit)\n download_submissions(subreddit=subreddit)\n writer.writerows(SUBMISSIONS_LIST)\n\n SUBMISSIONS_LIST.clear()", "def get_defectdojo_findings(filename):\n\n acunetix_scan_report = get_acunetix_scan_report(filename)\n defectdojo_findings = []\n for report_item in acunetix_scan_report.ReportItems:\n defectdojo_finding = dict()\n\n if \"Affects\" in report_item:\n affects = (\" ({})\".format(report_item['Affects']))\n else:\n affects = \"\"\n defectdojo_finding['title'] = \"{}{}\".format(report_item['Name'], affects)\n defectdojo_finding['date'] = acunetix_scan_report.StartTime\n defectdojo_finding['cwe'] = report_item['CWEId']\n defectdojo_finding['url'] = acunetix_scan_report.StartURL\n defectdojo_finding['severity'] = report_item['Severity']\n defectdojo_finding['description'] = get_html2text(report_item['Description'])\n if \"Details\" in report_item and len(report_item['Details'].strip()):\n defectdojo_finding['description'] += \"\\n**Details:**\\n{}\\n\".format(report_item['Details'])\n if \"TechnicalDetails\" in report_item and len(report_item['TechnicalDetails'].strip()):\n defectdojo_finding['description'] += \"\\n**Technical Details:**\\n{}\\n\".format(report_item['TechnicalDetails'])\n defectdojo_finding['mitigation'] = get_html2text(report_item['Recommendation'])\n defectdojo_finding['impact'] = get_html2text(report_item['Impact'])\n defectdojo_finding['references'] = ''\n for ref in report_item['ReferencesURLs']:\n defectdojo_finding['references'] += \"{}\\n\".format(ref)\n defectdojo_finding['false_p'] = report_item['IsFalsePositive']\n\n finding = DefectDojoFinding(**defectdojo_finding)\n defectdojo_findings.append(finding)\n\n return defectdojo_findings", "def __load_bugs(self):\n bugs = []\n with open(self.reffile(), 'rb') as reffile:\n reader = csv.reader(reffile, delimiter=';', quotechar='\\n')\n for line in reader:\n bugs.append(tuple(map(int, line)))\n return bugs", "def scrape_data(self):\n ## OPEN EMPTY CSV FILE\n self.write_into_csv()\n \n ## READ POSTCODES\n postcodes = self.read_postcodes()\n\n for postcode in postcodes:\n\n sleeptime = round(random.uniform(0.5, 1.0), 2)\n time.sleep(sleeptime)\n \n self.get_url_response(postcode)\n\n ## WRITE DATA INTO CSV FILES\n atms = [v for k, v in self.ATMS.items( )] \n if atms:\n self.write_into_csv(atms, 'atm')\n\n branches = [v for k, v in self.BRANCHES.items()]\n if branches:\n self.write_into_csv(branches, 'brc')", "def crawlDocuments(docIds, skipIssns):\n rootLog = logging.getLogger('')\n successCount = 0\n consecErrorCount = 0\n fileLogHandler = None\n for i, docIdTuple in enumerate(docIds):\n docId, srcDir = docIdTuple\n removeLocks()\n checkCreateLock(srcDir)\n if fileLogHandler is not None:\n rootLog.handlers.remove(fileLogHandler)\n fileLogHandler = pubGeneric.logToFile(join(srcDir, 'crawler.log'))\n todoCount = len(docIds) - i\n logging.info('--- Crawling document with ID %s, dir %s (%d IDs left)' % (docId, srcDir, todoCount))\n webCache.clear()\n try:\n artMeta = getArticleMeta(docId)\n except pubGetError:\n writeDocIdStatus(srcDir, docId, 'no meta', '')\n continue\n\n logging.info('Got Metadata: %s, %s, %s' % (artMeta['journal'], artMeta['year'], artMeta['title']))\n try:\n checkIssnErrorCounts(artMeta, skipIssns, srcDir)\n paperData = crawlOneDoc(artMeta, srcDir)\n writePaperData(docId, artMeta, paperData, srcDir)\n consecErrorCount = 0\n successCount += 1\n except pubGetError as e:\n consecErrorCount += 1\n docId = artMeta['pmid']\n writeDocIdStatus(srcDir, docId, e.logMsg, e.longMsg, e.detailMsg)\n issnYear = getIssnYear(artMeta)\n issnYearErrorCounts[issnYear] += 1\n if e.logMsg not in ('noOutlinkOrDoi', 'unknownHost', 'noLicense'):\n waitSec = ERRWAIT * consecErrorCount\n logging.debug('Sleeping for %d secs after error' % waitSec)\n time.sleep(waitSec)\n if consecErrorCount > MAXCONSECERR:\n logging.error('Too many consecutive errors, stopping crawl')\n e.longMsg = 'Crawl stopped after too many consecutive errors / ' + e.longMsg\n raise\n if DO_PAUSE:\n raw_input('Press Enter to process next paper...')\n except:\n raise\n\n logging.info('Downloaded %d articles' % successCount)\n removeLocks()\n if fileLogHandler != None:\n rootLog.handlers.remove(fileLogHandler)\n return", "def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # Each issue fetched is being generated with our schema.\n for issue in jira_issues:\n try:\n jira_dict = jira_obj_to_dict(issue, now)\n defect = create_defect(jira_dict, issue)\n defects.append(defect)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(issue.key, e))\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n\n return post_defects(project, jira_issues, defects)", "def get_issues(): # pragma: no cover\n global issue_data\n team = {\n 'stevex196x': 0,\n 'TheSchaft': 0,\n 'melxtru': 0,\n 'aylish19': 0,\n 'connormlewis': 0,\n 'tsukkisuki': 0\n }\n all_issues = 0\n while all_issues == 0:\n url = ('https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100')\n data = requests.get(\n url, headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n link = data.headers.get('Link', None)\n for i in range(1, int(find_last_page(link)) + 1):\n url = (\n 'https://api.github.com/repos/connormlewis/idb/'\n 'issues?state=all&filter=all&per_page=100' + '&page=' + str(i))\n data = requests.get(\n url,\n headers={'Authorization': 'token ' + os.environ['API_TOKEN']})\n json_list = data.json()\n for entry in json_list:\n if 'pull_request' not in entry:\n all_issues += 1\n if entry['user']['login'] in team:\n team[entry['user']['login']] += 1\n return team, all_issues", "def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)", "def update_issue_tracker():\n # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in\n # the last 24 hours.\n for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS,\n projection=[Flake.count_day]):\n logging.info('Created processing task for %s' % flake.key)\n taskqueue.add(queue_name='issue-updates',\n url='/issues/process/%s' % flake.key.urlsafe())", "def main():\n\n conn = psycopg2.connect(**env.DATABASE)\n cursor = conn.cursor()\n\n for file, city in env.supported_cities().items():\n try:\n data = add_metadata(parse_html(city, get_html(city)))\n save_data_to_db(cursor, data, file.title())\n except Exception as e:\n print(\"Failed to scrape '%s': %s\" %(city, e))\n print(traceback.format_exc())\n\n conn.commit()\n conn.close()", "def exportToDB(self, submissions):\n for p in range(len(submissions)):\n for x in range(len(submissions[p])):\n doc_ref = self.fs_db.collection(u'reddit').document(str(submissions[p][4]))\n doc_ref.set({\n u'content': str(submissions[p][0]),\n u'upvote_ratio': str(submissions[p][1]),\n u'score': submissions[p][2],\n u'title': submissions[p][3],\n u'id': submissions[p][4],\n u'total_awards_received': submissions[p][5],\n u'created_utc': submissions[p][6]\n })", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n # Extract bug type\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n # Get whether or not the bug was reproduced\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n # Skip the 'Attempted to reproduce' line if exists\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n # Populate the sequence of requests that made the bug\n while line and not line.startswith(BUG_START):\n seq += self._get_request(line)\n line = file.readline()\n # Add the bug sequence to the bug list\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(\"Failed to read bug log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException" ]
[ "0.6098931", "0.581118", "0.58095926", "0.5599205", "0.55810404", "0.55514055", "0.5436911", "0.5393213", "0.5392565", "0.52618045", "0.5231558", "0.52075624", "0.519109", "0.514051", "0.5135371", "0.5125481", "0.5122285", "0.50928766", "0.50890225", "0.50798535", "0.50633335", "0.5046962", "0.50446755", "0.5044202", "0.5043592", "0.5025236", "0.5025231", "0.5024478", "0.50176877", "0.5013452" ]
0.7346313
0
Crawls for all comments belonging to the bugs in the BugIDList.
def get_all_comments(self, idList: Union[List, str]) -> None: #loads pickle list if it is one if type(idList) == str and ".pickle" in idList: print("pickle load") with open(idList, "rb") as f: idList = pickle.load(f) elif type(idList) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #goes through idList for id in tqdm(idList): #performs request and replaces trouble some parts commentsString = self.session.get(self.commentURL.format(id)).text.\ replace('true', 'True').replace('false', 'False').replace('null', 'None') #gets only the comments commentsDict = ast.literal_eval(commentsString)["bugs"][str(id)]["comments"] #enters comments into db or file if there are any comments for the id if commentsDict: if self.mongoDB: self.mongoDB["Comments"].insert_many(commentsDict) if self.folder: with open(self.folderpath + "Bugzilla_Comments.txt", 'a') as f: f.write(str(commentsDict) + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)", "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all", "def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']", "def main(u, o):\n click.echo(f\"Web crawling on {u} started successfully...\")\n\n comment_regex = re.compile('<!--(.*?-->)')\n\n with requests.Session() as session:\n resp = session.get(u)\n soup = BeautifulSoup(resp.text, 'lxml')\n #TODO: search for hidden attributes, may be useful\n comments = soup.find_all(text=comment_regex)\n print(comments)", "def test_issue_get_comments(self):\n pass", "def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()", "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()", "def all_comments_by_docket_id(docket_id,\n sort_by='postedDate', sort_order='ASC'):\n # Determine total number of public submissions in docket.\n params = {'docket_id': docket_id, 'document_type': 'PS'}\n total_records = RegulationDocumentSearch.number_of_records(**params)\n\n # Use the maximum page size to download all public submissions.\n documents = []\n for page in range(total_records // 1000 + 1):\n parameters = {\n 'docket_id': docket_id,\n 'document_type': 'PS',\n 'results_per_page': 1000,\n 'offset': page * 1000,\n 'sort_by': sort_by,\n 'sort_order': sort_order\n }\n response = RegulationDocumentSearch.by_docket_id(**parameters)\n documents.extend(response['documents'])\n\n return documents", "def _commentsInThisFunction(self):\n show_unique_c = self.config.display_unique_comments\n\n msg = \"Searching comments within function '\" + misc.get_function_name() + \"'\"\n self._console_output(msg)\n\n comment_list = self.ba.comments_in_function()\n\n # Found any comment at all?\n nrows = len(comment_list)\n if not nrows:\n self._console_output(\"[!] No comments found\", err = True)\n return\n\n self.table.setColumnCount(2)\n self.table_label.setText(\"Comments within current function\")\n self.table.setHorizontalHeaderLabels((\"Address\", \"Comments\"))\n self.table.clearContents()\n self.table.setRowCount(0)\n\n # Fill with contents\n displayed_comments = []\n\n idx = 0\n for (addr, comment) in comment_list:\n if show_unique_c and comment in displayed_comments:\n continue\n\n displayed_comments.append(comment)\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%08x\" % addr)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n comment_item = QTableWidgetItem(comment)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, comment_item)\n\n idx += 0", "def tick(self):\n\n # Get new comments from /r/all\n print('\\n\\nRetrieving comments...', end=\"\")\n comments = list(self.reddit.get_comments('all', limit=None))\n print('[DONE]')\n\n comment_count = comments.__len__()\n print('Comments to read: ' + str(comment_count))\n for i in range(0, comment_count):\n comment = comments[i]\n\n # Update percent counter\n pcent = i / float(comment_count) * 100\n print('\\rReading comments: [%d%%]' % pcent, end=\"\")\n time.sleep(0.1)\n\n # Parse words\n words = comment.body.split()\n permalink = None\n for word in words:\n if word.startswith('/u/'):\n\n # Get the redditor\n redditor = self.parse_redditor(word)\n if redditor is None:\n continue\n\n # Check to see if we've parsed this comment already\n permalink = comment.permalink\n if permalink in self.already_done:\n print('Comment was already read.')\n break\n\n # Notify the mentioned redditor\n self.notify('comment', redditor, permalink, comment.body, comment.author.name)\n self.record_mention(redditor.name, 'comment')\n\n # permalink will not be None if a user was notified\n if permalink is not None:\n self.already_done.append(permalink)\n\n # Wait 30 seconds\n print('')\n util.wait(30)", "def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)", "def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None:\n # loads pickle list if it is one\n if type(list) == str and \".pickle\" in list:\n print(\"wat\")\n with open(list, \"rb\") as f:\n list = pickle.load(f)\n elif type(list) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #gets workers and splits list into chunks fitting the worker amount\n pool = Pool(workers)\n list = np.array(list)\n lists = np.array_split(list, workers)\n\n #each worker crawls for comments\n for sub_list in lists:\n print(sub_list)\n pool.apply_async(self.get_all_comments, (sub_list,))\n\n pool.close()\n pool.join()", "def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()", "def check_comments():\n\n # Get the id of the group track\n try:\n group_track = soundcloud.get('/me/tracks')[config.post_track_id]\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.critical('Cannot find a track with id %d. Please, fix post_track_id in config.py', config.post_track_id)\n sys.exit(1)\n else:\n raise\n\n # Get the comment list for the group track\n comments = soundcloud.get('/tracks/%d/comments' % group_track.id)\n if not comments:\n logging.info('Nothing found...')\n return\n \n # Process each comment and delete it\n for comment in reversed(comments): \n logging.info('Processing a comment by user %d (%s): %s', comment.user_id, comment.user['username'], comment.body)\n response = None\n \n # Try to process the comment\n try:\n response = process_comment(comment)\n except HTTPError as e:\n if e.response.status_code == 429:\n logging.exception('Failed to repost track: too many requests:')\n return\n elif e.response.status_code // 100 == 4:\n logging.exception('Failed to process comment due to a client request error:')\n else:\n raise\n except Exception as e: # Program crash\n logging.exception('Failed to process comment:')\n else:\n if response:\n logging.info('The comment would have this response: %s', response) \n else:\n logging.info('Comment processed successfully')\n \n # Delete the processed comment\n try:\n soundcloud.delete('/tracks/' + str(group_track.id) + '/comments/' + str(comment.id))\n except HTTPError as e:\n if e.response.status_code == 404:\n logging.warning('Comment already deleted')\n else:\n raise\n\n if config.use_advanced_description and should_update_description:\n update_description()", "def problem_comments(self, identifier):\n return self._get(\"problems/%d/comments\" % identifier).json()", "def get_comments(self, project_id, forum_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/' + str(forum_id) + '/comments/'\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_comments(response)", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def watch2():\n\tcomments = r.get_comments('all', limit=None)\n\tfor comment in comments:\n\t\tif comment in visited:\n\t\t\tcontinue\n\t\telse:\n\t\t\tvisited[comment] = 1\n\t\t\tif \"LexiconBot define\" in comment.body:\n\t\t\t\tprint comment, \"from\", comment.permalink, \" / \", comment.submission\n\t\t\t\tmsg = define(comment.body.split()[2])\n\t\t\t\tcomment.reply(msg)\n\n\tprint \"Sleeping...\"\n\tsleep(1)", "def get_comments_from_submission_id(submission_id):\n flat_comments = []\n tree_comments = []\n\n submission = (REDDIT.submission(id=submission_id))\n print(submission.num_comments)\n print(submission.shortlink)\n\n # sort comments by best and get the flattened list\n submission.comment_sort = 'confidence'\n\n # tree comments traversal\n submission.comments.replace_more(limit=1)\n for comm in submission.comments.list():\n tree_comments.append(comm)\n\n flat_comments = list(submission.comments)\n\n return flat_comments, tree_comments", "def all_user_comments(username):\n return commentslist", "def get_comments(self):\n raise NotImplementedError", "def comments_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(\n repository_id, \"comments\", access_token)", "def get_comments(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/comments\".format(project.id,\n story.id)\n params = {\"fields\": Comment.FIELDS}\n comments = self._request(\"get\", resource, params=params)\n\n for comment in comments:\n ret_val.append(Comment(comment))\n\n return ret_val", "def get_comments(convo_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/comments\"\n payload = {}\n headers = {\"Authorization\": BEARER_TOKEN}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n for comment in response.json()[\"_results\"]:\n # For each comment in Front, print out its message\n print_friendly_JSON_object(comment[\"body\"])" ]
[ "0.64830816", "0.6414829", "0.64109683", "0.6044062", "0.5997073", "0.5927429", "0.5870754", "0.58518946", "0.57999045", "0.5750483", "0.5748707", "0.5685098", "0.5681446", "0.5678411", "0.5673924", "0.56592536", "0.5652936", "0.5610223", "0.5589951", "0.55766493", "0.55682576", "0.5560317", "0.5557409", "0.5521636", "0.5514581", "0.5503788", "0.5498528", "0.5495697", "0.54768056", "0.5469559" ]
0.7094204
0
Crawls for all comments belonging to the bugs in the BugIDList utilizing parallelization.
def get_all_comments_mp(self, list: Union[List, str], workers: int = 10) -> None: # loads pickle list if it is one if type(list) == str and ".pickle" in list: print("wat") with open(list, "rb") as f: list = pickle.load(f) elif type(list) == str: print("Error: Buglist parameter seems to be neither a List object or the name of a pickle file " "(needs to contain .pickle).") #gets workers and splits list into chunks fitting the worker amount pool = Pool(workers) list = np.array(list) lists = np.array_split(list, workers) #each worker crawls for comments for sub_list in lists: print(sub_list) pool.apply_async(self.get_all_comments, (sub_list,)) pool.close() pool.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_comments(self, idList: Union[List, str]) -> None:\n\n #loads pickle list if it is one\n if type(idList) == str and \".pickle\" in idList:\n print(\"pickle load\")\n with open(idList, \"rb\") as f:\n idList = pickle.load(f)\n elif type(idList) == str:\n print(\"Error: Buglist parameter seems to be neither a List object or the name of a pickle file \"\n \"(needs to contain .pickle).\")\n\n #goes through idList\n for id in tqdm(idList):\n #performs request and replaces trouble some parts\n commentsString = self.session.get(self.commentURL.format(id)).text.\\\n replace('true', 'True').replace('false', 'False').replace('null', 'None')\n #gets only the comments\n commentsDict = ast.literal_eval(commentsString)[\"bugs\"][str(id)][\"comments\"]\n\n #enters comments into db or file if there are any comments for the id\n if commentsDict:\n if self.mongoDB:\n self.mongoDB[\"Comments\"].insert_many(commentsDict)\n if self.folder:\n with open(self.folderpath + \"Bugzilla_Comments.txt\", 'a') as f:\n f.write(str(commentsDict) + \"\\n\")", "def get_all_bugs(self) -> List:\n #starting point\n offset = 0\n #list for all bugs\n resultBugList = []\n #list for bug IDs\n bugIDList = []\n #checks if there are still results returned\n notEmpty = True\n\n #queries in 500 bug steps until the result list is empty\n while notEmpty:\n print(\"entered\")\n #interpretation of result as list plus formatting for eval errors\n result = ast.literal_eval(self.session.get(self.bugURL + \"&offset=\" + str(offset)).text.\n replace('true', 'True').replace('false', 'False').replace('null', 'None'))[\"bugs\"]\n #checks if the query needs to be set again with a new offset\n if result:\n resultBugList += result\n else:\n notEmpty = False\n\n #gets the ID out of all comments\n partList = [bug[\"id\"] for bug in result]\n bugIDList += partList\n #sets new starting point\n offset += 500\n\n #inserts bug ids and bugs into db if given one\n if self.mongoDB:\n for id in bugIDList:\n self.mongoDB[\"BugIDs\"].insert_one({\"ID\": id})\n self.mongoDB[\"BugsData\"].insert_many(resultBugList)\n\n #creates files for bug ids and bugs if given a folder\n if self.folder:\n #saves bug list as python object\n with open(self.folderpath + \"bugIDListP.pickle\", \"wb\") as a:\n pickle.dump(bugIDList, a)\n #saves bug list as csv\n with open(self.folderpath + \"bugIDList.csv\", \"w\") as b:\n for id in bugIDList:\n b.write(str(id) + \"\\n\")\n with open(self.folderpath + \"bugsData.txt\", \"w\") as c:\n for bug in resultBugList:\n c.write(str(bug) + \"\\n\")\n\n #returns List Object for further processing\n return(bugIDList)", "async def scrape_comments(self):\n\n subreddit_origin = await self.reddit.subreddit(self.subreddit)\n\n comment_count = 0\n async for comment in subreddit_origin.comments(limit=self.limit):\n if self.memory.contains(comment.id):\n continue\n\n self.memory.add(comment.id)\n\n # Parse Comment\n comment = self.parse_comment(comment)\n\n # Save in Pub/Sub\n if self.enable_publish:\n self.publish(comment)\n\n comment_count += 1\n\n return comment_count", "def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()", "def get_comments(yt_id):\n\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)\n\n video_comments = client.commentThreads().list(\n videoId = yt_id,\n part=\"snippet,replies\").execute()\n\n comment_items = video_comments['items']\n\n class MLStripper(HTMLParser):\n def __init__(self):\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n def strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n comments = []\n for sub_block in comment_items:\n comments.append(strip_tags(sub_block['snippet']['topLevelComment']['snippet']['textDisplay']))\n\n comments_all = ' '.join(comments)\n\n print(\"YouTube comments scanned\")\n return comments_all", "def fetch_comments(item):\n # pylint: disable=R0912\n # pylint: disable=R0914\n cw, ch, _ = getxy()\n ch = max(ch, 10)\n ytid, title = item.ytid, item.title\n dbg(\"Fetching comments for %s\", c.c(\"y\", ytid))\n writestatus(\"Fetching comments for %s\" % c.c(\"y\", title[:55]))\n qs = {'textFormat': 'plainText',\n 'videoId': ytid,\n 'maxResults': 50,\n 'part': 'snippet'}\n\n # XXX should comment threads be expanded? this would require\n # additional requests for comments responding on top level comments\n\n jsdata = call_gdata('commentThreads', qs)\n\n coms = jsdata.get('items', [])\n coms = [x.get('snippet', {}) for x in coms]\n coms = [x.get('topLevelComment', {}) for x in coms]\n # skip blanks\n coms = [x for x in coms if len(x.get('snippet', {}).get('textDisplay', '').strip())]\n if not len(coms):\n g.message = \"No comments for %s\" % item.title[:50]\n g.content = generate_songlist_display()\n return\n\n items = []\n\n for n, com in enumerate(coms, 1):\n snippet = com.get('snippet', {})\n poster = snippet.get('authorDisplayName')\n _, shortdate = yt_datetime(snippet.get('publishedAt', ''))\n text = snippet.get('textDisplay', '')\n cid = (\"%s/%s\" % (n, len(coms)))\n out = (\"%s %-35s %s\\n\" % (cid, c.c(\"g\", poster), shortdate))\n out += c.c(\"y\", text.strip())\n items.append(out)\n\n cw = Config.CONSOLE_WIDTH.get\n\n def plain(x):\n \"\"\" Remove formatting. \"\"\"\n return x.replace(c.y, \"\").replace(c.w, \"\").replace(c.g, \"\")\n\n def linecount(x):\n \"\"\" Return number of newlines. \"\"\"\n return sum(1 for char in x if char == \"\\n\")\n\n def longlines(x):\n \"\"\" Return number of oversized lines. \"\"\"\n return sum(len(plain(line)) // cw for line in x.split(\"\\n\"))\n\n def linecounter(x):\n \"\"\" Return amount of space required. \"\"\"\n return linecount(x) + longlines(x)\n\n pagenum = 0\n pages = paginate(items, pagesize=ch, delim_fn=linecounter)\n\n while 0 <= pagenum < len(pages):\n pagecounter = \"Page %s/%s\" % (pagenum + 1, len(pages))\n page = pages[pagenum]\n pagetext = (\"\\n\\n\".join(page)).strip()\n content_length = linecount(pagetext) + longlines(pagetext)\n blanks = \"\\n\" * (-2 + ch - content_length)\n g.content = pagetext + blanks\n screen_update(fill_blank=False)\n xprint(\"%s : Use [Enter] for next, [p] for previous, [q] to return:\"\n % pagecounter, end=\"\")\n v = input()\n\n if v == \"p\":\n pagenum -= 1\n\n elif not v:\n pagenum += 1\n\n else:\n break\n\n g.content = generate_songlist_display()", "def ordered_crawling():\n queue.append(seed_url)\n visited.add(seed_url)\n while len(queue) >= 0:\n try:\n text = req_obj.get_html_text(queue[0])\n print queue[0]\n if text is None:\n raise requests.RequestException()\n add_links_to_queue(text, queue[0])\n # summary generated using summarizer1\n sum_obj.create_and_index_summary(\n req_obj.get_base_url(), text)\n\n # summary generated using summarizer2\n sum_obj2.create_and_index_summary(\n req_obj.get_base_url(), text)\n on_pg_sum.index_on_page_summary(text, queue[0])\n\n result_file.write(str(queue[0]) + \", \" + str(link_weights[queue[0]]))\n er_file.write(\"###########\" + str(link_weights) + \"\\n\\n\\n\\n\")\n update_weights(text)\n queue.sort(compare)\n result_file.write(\"\\n\")\n except requests.RequestException as trace:\n print str(trace) + '\\n'\n er_file.write(queue[0] + '\\n')\n er_file.write(str(trace) + '\\n\\n')\n del link_weights[queue[0]]\n queue.pop(0)", "def run(self):\n comment_df_list = []\n post_df_list = []\n subreddit_df_list = []\n\n reddit = sr.reddit_interface()\n subreddits = reddit.subreddits.popular(limit = SUBREDDIT_LIMIT) # Lists the top 50 subreddits\n\n for subreddit in subreddits:\n top_posts = reddit.subreddit(str(subreddit)).top()\n for post in top_posts:\n if not post.stickied:\n post_list = [post.id, str(post.subreddit), post.title, post.num_comments]\n post.comments.replace_more(limit = 0)\n for comment in post.comments.list():\n comment_list = [str(comment.parent()), comment.id, comment.body, int(comment.score)]\n comment_df_list.append(comment_list)\n post_df_list.append(post_list)\n subreddit_df_list.append([str(subreddit)])\n\n comment_df_list = pd.DataFrame(comment_df_list, columns = COMMENTS_COLUMNS)\n post_df_list = pd.DataFrame(post_df_list, columns = POSTS_COLUMNS)\n subreddit_df_list = pd.DataFrame(subreddit_df_list, columns =['Subreddit'])\n reddit_df = [subreddit_df_list, post_df_list, comment_df_list]\n sr.save_xlsx(reddit_df, self.output().path)", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "def tick(self):\n\n # Get new comments from /r/all\n print('\\n\\nRetrieving comments...', end=\"\")\n comments = list(self.reddit.get_comments('all', limit=None))\n print('[DONE]')\n\n comment_count = comments.__len__()\n print('Comments to read: ' + str(comment_count))\n for i in range(0, comment_count):\n comment = comments[i]\n\n # Update percent counter\n pcent = i / float(comment_count) * 100\n print('\\rReading comments: [%d%%]' % pcent, end=\"\")\n time.sleep(0.1)\n\n # Parse words\n words = comment.body.split()\n permalink = None\n for word in words:\n if word.startswith('/u/'):\n\n # Get the redditor\n redditor = self.parse_redditor(word)\n if redditor is None:\n continue\n\n # Check to see if we've parsed this comment already\n permalink = comment.permalink\n if permalink in self.already_done:\n print('Comment was already read.')\n break\n\n # Notify the mentioned redditor\n self.notify('comment', redditor, permalink, comment.body, comment.author.name)\n self.record_mention(redditor.name, 'comment')\n\n # permalink will not be None if a user was notified\n if permalink is not None:\n self.already_done.append(permalink)\n\n # Wait 30 seconds\n print('')\n util.wait(30)", "def crawl(self):\n if os.path.exists(self.__work_path):\n shutil.rmtree(self.__work_path)\n print '\\nOld Data Was Found And Removed.\\n'\n\n initial_first_run = True\n initial_recursion_depth = 0\n initial_prev_link_size = 0\n for url in self.__urls:\n self.__start_recursion(url, initial_first_run,\n initial_recursion_depth, initial_prev_link_size)\n\n Crawler.mission_report(self.__work_path)", "def main(u, o):\n click.echo(f\"Web crawling on {u} started successfully...\")\n\n comment_regex = re.compile('<!--(.*?-->)')\n\n with requests.Session() as session:\n resp = session.get(u)\n soup = BeautifulSoup(resp.text, 'lxml')\n #TODO: search for hidden attributes, may be useful\n comments = soup.find_all(text=comment_regex)\n print(comments)", "def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)", "def iterateComments(db, post_id):\n c=db.cursor()\n c.execute(\"\"\"SELECT * FROM comments WHERE post_id=%d\"\"\" % post_id)\n for comment in c.fetchall():\n yield Comment(answer)\n c.close()", "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1", "def test_print_comments():\n flat_comments, tree_comments = get_comments_from_submission_id('jrjn70')\n print(len(flat_comments))\n print(len(tree_comments))\n\n print('flat comments')\n for c in flat_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)\n\n print()\n print('tree comments')\n for c in tree_comments[0:5]:\n comment_instance = REDDIT.comment(c)\n print(comment_instance.body)", "def analyze_comments():\n\n scores = {} # {docket_id: [comment1_score, comment2_score, ...]}\n positive_counts = {} # {docket_id: num_positive_comments}\n neutral_counts = {} # {docket_id: num_neutral_comments}\n negative_counts = {} # {docket_id: num_negative_comments}\n\n comment_sentiments = {} # {comment_id: sentiment} to write to database\n comment_complexity = {} # {comment_id: complexity} to write to database\n\n for comment in lib.mongo.retrieve_comments(1000):\n docket_id = comment['docketId']\n comment_id = comment['documentId']\n text = comment.get('commentText', '').strip()\n\n # Fill in the 'sentiment' field of this comment.\n if 'sentiment' in comment:\n score = comment['sentiment']\n else:\n score = lib.analyze_text.getSentiment(text)\n comment_sentiments[comment_id] = score\n\n logging.info('docket %s, comment %s: sentiment %s (%r)' %\n (docket_id, comment_id, score, text[:20]))\n\n # Fill in the 'complexity' field of this comment.\n if 'complexity' not in comment:\n comment_complexity[comment_id] = lib.analyze_text.get_complexity(text)\n\n # Aggregate the sentiment scores for each docket.\n scores.setdefault(docket_id, []).append(score)\n counts = positive_counts if score > 0 else (\n negative_counts if score < 0 else neutral_counts)\n counts[docket_id] = counts.get(docket_id, 0) + 1\n\n if len(comment_sentiments) >= 10:\n logging.info('updating %d comments sentiment...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n comment_sentiments = {}\n\n if len(comment_complexity) >= 10:\n logging.info('updating %d comments complexity...' % len(comment_complexity))\n lib.mongo.update_comments('complexity', comment_complexity)\n comment_complexity = {}\n\n logging.info('updating %d comments...' % len(comment_sentiments))\n lib.mongo.update_comments('sentiment', comment_sentiments)\n lib.mongo.update_comments('complexity', comment_complexity)\n logging.info('done!')\n\n docket_sentiments = {} # {docket_id: sentiment} to write to database\n\n for docket in lib.mongo.dockets.find():\n docket_id = docket.get('docketId', '')\n positive_count = positive_counts.get(docket_id, 0)\n neutral_count = neutral_counts.get(docket_id, 0)\n negative_count = negative_counts.get(docket_id, 0)\n rating = compute_rating(positive_count, neutral_count, negative_count)\n logging.info('docket %s: %d positive, %d neutral, %d negative - %s' %\n (docket_id, positive_count, neutral_count, negative_count,\n rating))\n\n docket_sentiments[docket_id] = {\n 'positive': positive_count,\n 'neutral': neutral_count,\n 'negative': negative_count,\n 'rating': rating\n }\n\n logging.info('updating %d dockets...' % len(docket_sentiments))\n lib.mongo.update_dockets('sentiment', docket_sentiments)\n logging.info('done!')", "def _crawl(self, data: list) -> list:\n \n\n self.smph = th.Semaphore(self.MAX_THREADS)\n self.miss_lock = th.Lock()\n self.data_lock = th.Lock()\n\n total = len(data)\n\n with Progress(total) as self.prog:\n for url, *args in data:\n self.smph.acquire()\n new_thread = th.Thread(target=self._crawl_this, args=(url, *args))\n new_thread.start()\n self.prog.wait()\n\n return self._miss", "def test_get_comments_from_submission():\n # gets a test submission\n threads = list(get_submissions(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n submission_id = threads[0].d_['id']\n\n # prints link to thread\n thread_full_link = threads[0].d_['full_link']\n print(thread_full_link)\n\n # prints submission title\n thread_title = threads[0].d_['title']\n print(thread_title)\n\n submission = get_comments_from_submission(submission_id)\n for top_level_comment in submission.comments:\n print(top_level_comment.body)", "async def crawl(self):\n fetch_urls = [self.start_url]\n results = []\n while len(fetch_urls):\n \"\"\"\n slicing array urls with max_async_call arg and then run extract_data_urls\n extract_data_urls return a object that contains url, data, found_urls, and all_urls\n url is a url that we crawled\n data is Html content of the url\n found_urls are new urls that we have to crawl that\n all_urls are all links in the html page\n \"\"\"\n urls = await self.extract_data_urls(fetch_urls[0:self.max_async_call])\n del fetch_urls[0:self.max_async_call]\n for url, data, found_urls, all_urls in urls:\n fetch_urls.extend(found_urls)\n result = self.parse_html_content(data)\n result['urls'] = all_urls\n results.append((url, result))\n return results", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def all_comments_by_docket_id(docket_id,\n sort_by='postedDate', sort_order='ASC'):\n # Determine total number of public submissions in docket.\n params = {'docket_id': docket_id, 'document_type': 'PS'}\n total_records = RegulationDocumentSearch.number_of_records(**params)\n\n # Use the maximum page size to download all public submissions.\n documents = []\n for page in range(total_records // 1000 + 1):\n parameters = {\n 'docket_id': docket_id,\n 'document_type': 'PS',\n 'results_per_page': 1000,\n 'offset': page * 1000,\n 'sort_by': sort_by,\n 'sort_order': sort_order\n }\n response = RegulationDocumentSearch.by_docket_id(**parameters)\n documents.extend(response['documents'])\n\n return documents", "def test_issue_get_comments(self):\n pass", "def watch2():\n\tcomments = r.get_comments('all', limit=None)\n\tfor comment in comments:\n\t\tif comment in visited:\n\t\t\tcontinue\n\t\telse:\n\t\t\tvisited[comment] = 1\n\t\t\tif \"LexiconBot define\" in comment.body:\n\t\t\t\tprint comment, \"from\", comment.permalink, \" / \", comment.submission\n\t\t\t\tmsg = define(comment.body.split()[2])\n\t\t\t\tcomment.reply(msg)\n\n\tprint \"Sleeping...\"\n\tsleep(1)", "def get_event_data(self, ):\n \n if os.path.exists(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\"):\n self.bug_data_frame = pd.read_csv(f\"{self.cdp_dump_path}/{CDPConfigValues.project_issue_list_file_name}\")\n else:\n self.bug_data_frame = self.get_bug_data()\n self.closed_bug_data_frame = self.bug_data_frame[self.bug_data_frame['STATE'] == 'closed']\n self.closed_bug_data_frame = self.closed_bug_data_frame.reset_index()\n\n self.event_data_frame = self.closed_bug_data_frame[[\"ISSUE_ID\", \"CREATED_TIMESTAMP\", \"UPDATED_TIMESTAMP\"]]\n\n \"\"\"Fetch the Bug Id's from the data frame\"\"\"\n list_of_issues = self.closed_bug_data_frame['ISSUE_ID'].tolist()\n\n \"\"\"using the Bugs Id list create event url list\"\"\"\n url_list = Utilities.format_url(self.event_url, list_of_issues)\n start_time = time.time()\n\n results = self.web_connection.get_async_data_using_asyncio(url_list, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size)\n\n list_of_buggy_commits = results[0]\n failed_urls = results[1]\n loop_counter = 1\n\n while len(failed_urls) > 0:\n time.sleep(60 * loop_counter)\n print(f\"Total Failed URL's re-trying {len(failed_urls)}\")\n results = self.web_connection.get_async_data_using_asyncio(failed_urls, self.web_constants,\n batch_size=CDPConfigValues.git_api_batch_size // 2)\n failed_urls = results[1]\n list_of_buggy_commits = list_of_buggy_commits + results[0]\n end_time = time.time()\n print(\"Parallel time taken to get all event data using (asyncio) =\", end_time - start_time)\n\n list_of_buggy_commits = pd.DataFrame(list_of_buggy_commits, columns=[\"ISSUE_ID\", \"JSON_RESPONSE\"])\n list_of_buggy_commits['ISSUE_ID'] = list_of_buggy_commits['ISSUE_ID'].astype(str)\n self.event_data_frame['ISSUE_ID'] = self.event_data_frame['ISSUE_ID'].astype(str)\n self.event_data_frame = pd.merge(self.event_data_frame, list_of_buggy_commits, how=\"left\",\n left_on=[\"ISSUE_ID\"],\n right_on=[\"ISSUE_ID\"])\n\n self.event_data_frame.to_csv(f\"{self.cdp_dump_path}/github_events_cdp_dump.csv\", encoding='utf-8-sig',\n index=False)\n event_parser = EventsJsonParser()\n event_parser.find_buggy_commits_based_on_repository_fixes(self.web_constants, self.event_data_frame,\n f\"{self.cdp_dump_path}/\"\n f\"{CDPConfigValues.closed_events_list_file_name}\")", "def comments(self, q=None, sort=None):\n params = {}\n if sort is not None:\n params[\"sort\"] = sort\n if q is not None:\n params[\"q\"] = q\n for comment in self._get_paged(\"comments\", params=params):\n yield Comment(comment, **self._new_session_args)", "def dfs(comment, fun):\n # comment has no replies\n if not comment.replies:\n return\n else:\n for r in comment.replies:\n # do something with a comment here\n fun(r)\n # recurr\n Comment.dfs(r, fun)" ]
[ "0.6642331", "0.6419399", "0.6326556", "0.590753", "0.58366686", "0.5725965", "0.56005704", "0.55982554", "0.55817443", "0.5574108", "0.5573544", "0.55464286", "0.5534647", "0.55332947", "0.55286044", "0.5525885", "0.55153495", "0.54998296", "0.549831", "0.5463732", "0.5442687", "0.5419565", "0.5389291", "0.5376693", "0.5376668", "0.53130734", "0.53128296", "0.5299989", "0.5294363", "0.5293513" ]
0.64263386
1
download sequencing file from SRA archive requires local install of SRA tools in path requires verification of filenames and paths
def download_SRA(SRA): print("Downloading SRA archive") output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT) print("Extracting FASTQ data") output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_sra_files(remote_location, local_location = '', max_recursion = 3, verbose = False):\n\n downloaded_files = list();\n\n def printv(*args):\n if(verbose):\n print(*args);\n sys.stdout.flush();\n\n printv(\"Reading folder: \", remote_location);\n\n req = urllib2.Request(remote_location);\n\n response = urllib2.urlopen(req);\n\n the_page = response.read();\n\n entries = the_page.split('\\r\\n');\n\n #Identify sub folders\n folders = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == 'd'): #if directory flag\n folders.append(spl_entry[-1]);\n\n\n for folder in folders:\n dl_files = download_sra_files(remote_location + '/' + folder, local_location, max_recursion - 1, verbose);\n downloaded_files.extend(dl_files);\n\n #Identify SRA files\n files = list();\n for entry in entries:\n if(len(entry) == 0):\n continue;\n\n spl_entry = entry.split();\n if(spl_entry[0][0] == '-' and #Not a directory\n spl_entry[-1].lower().endswith('.sra')): #Has extension '.sra'\n\n files.append(spl_entry[-1]);\n\n if(len(files) > 0):\n printv(\"Identified sra files: \");\n for file_name in files:\n printv(\" \", file_name);\n\n abs_local_location = os.path.abspath(local_location);\n\n if(not os.path.isdir(abs_local_location)):\n os.makedirs(abs_local_location);\n\n for file_name in files:\n\n printv(\"Downloading \", file_name);\n\n file_str = remote_location + '/' + file_name;\n\n req = urllib2.Request(file_str);\n response = urllib2.urlopen(req);\n\n dest_file_name = abs_local_location + os.sep + file_name;\n dest_file = open(dest_file_name, 'wb');\n shutil.copyfileobj(response, dest_file)\n dest_file.close();\n downloaded_files.append(dest_file_name);\n\n return downloaded_files;", "def download(self):\r\n \r\n # RAR Files names\r\n if self.debug==0:\r\n rar_files_name = [\"K001.rar\",\"K002.rar\",\"K003.rar\",\"K004.rar\",\"K005.rar\",\"K006.rar\",\r\n \"KA01.rar\", \"KA03.rar\", \"KA04.rar\", \"KA05.rar\", \"KA06.rar\", \"KA07.rar\", \r\n \"KA08.rar\", \"KA09.rar\", \"KA15.rar\", \"KA16.rar\", \"KA22.rar\", \"KA30.rar\", \r\n \"KB23.rar\", \"KB24.rar\", \"KB27.rar\", \r\n \"KI01.rar\", \"KI03.rar\", \"KI04.rar\", \"KI05.rar\", \"KI07.rar\", \"KI08.rar\", \r\n \"KI14.rar\", \"KI16.rar\", \"KI17.rar\", \"KI18.rar\", \"KI21.rar\"]\r\n else:\r\n rar_files_name = [\"K002.rar\", \"KA01.rar\", \"KI01.rar\"]\r\n\r\n url = self.url\r\n \r\n dirname = self.rawfilesdir\r\n dir_rar = \"rar_files\"\r\n if not os.path.isdir(dirname):\r\n os.mkdir(dirname)\r\n if not os.path.isdir(os.path.join(dirname, dir_rar)):\r\n os.mkdir(os.path.join(dirname, dir_rar))\r\n \r\n\r\n print(\"Downloading RAR files:\")\r\n for i in rar_files_name:\r\n file_name = i\r\n if not os.path.exists(os.path.join(dirname, dir_rar, file_name)):\r\n urllib.request.urlretrieve(url+file_name, os.path.join(dirname, dir_rar, file_name))\r\n print(file_name)\r\n \r\n print(\"Extracting files:\")\r\n for i in rar_files_name:\r\n if not os.path.exists(os.path.join(dirname, i[:4])):\r\n file_name = os.path.join(dirname, dir_rar, i)\r\n Archive(file_name).extractall(dirname) \r\n print(i)\r\n\r\n if self.debug==0:\r\n files_path = self.files\r\n else:\r\n files_path = files_debug(self.rawfilesdir)\r\n\r\n print(files_path)\r\n self.files = files_path", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None", "def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)", "def _download_scn_asf(params):\n pid = params[0]\n product_file_id = params[1]\n remote_url = params[2]\n db_info_obj = params[3]\n scn_lcl_dwnld_path = params[4]\n asf_user = params[5]\n asf_pass = params[6]\n success = False\n\n eodd_wget_downloader = eodatadown.eodatadownutils.EODDWGetDownload()\n start_date = datetime.datetime.now()\n try:\n success = eodd_wget_downloader.downloadFile(remote_url, scn_lcl_dwnld_path, username=asf_user,\n password=asf_pass, try_number=\"10\", time_out=\"60\")\n except Exception as e:\n logger.error(\"An error has occurred while downloading from ASF: '{}'\".format(e))\n end_date = datetime.datetime.now()\n\n if success and os.path.exists(scn_lcl_dwnld_path):\n logger.debug(\"Set up database connection and update record.\")\n db_engine = sqlalchemy.create_engine(db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == pid).one_or_none()\n if query_result is None:\n logger.error(\"Could not find the scene within local database: \" + product_file_id)\n else:\n query_result.Downloaded = True\n query_result.Download_Start_Date = start_date\n query_result.Download_End_Date = end_date\n query_result.Download_Path = scn_lcl_dwnld_path\n ses.commit()\n ses.close()\n logger.info(\"Finished download and updated database: {}\".format(scn_lcl_dwnld_path))\n else:\n logger.error(\"Download did not complete, re-run and it should try again: {}\".format(scn_lcl_dwnld_path))", "def _download_metafile(dataset, path=None):\n if not path:\n path = sunpy.config.get('downloads', 'sample_dir')\n base_url = 'https://spdf.gsfc.nasa.gov/pub/software/cdawlib/0MASTERS/'\n fname = dataset.lower() + '_00000000_v01.cdf'\n url = base_url + fname\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=fname, path=path, progressbar=False)\n return downloaded_file", "def elar_download(bundle_id, phpsessid, extension):\n\n # check for validity of ID\n try:\n soasID = bundle_id.split(\"oai:soas.ac.uk:\")[1]\n except IndexError: # bundle_id does not start with oai:soas.ac.uk:, so we are not interested\n print(\"not a SOAS file\", soasID)\n return\n # prepare request\n url = \"https://elar.soas.ac.uk/Record/%s\" % soasID\n cookies = {\"PHPSESSID\": phpsessid}\n print(\"checking\", url)\n # retrieve catalog page\n with requests.Session() as s:\n r = s.post(url, cookies=cookies)\n html = r.text\n # extract links to ELAN files\n try:\n links = fromstring(html).findall(\".//tbody/tr/td/a\")\n locations = {\n a.attrib[\"href\"] for a in links if a.attrib[\"href\"].endswith(extension)\n }\n except AttributeError: # not an ELAN file\n print(\"files are not accessible\")\n return\n # dowload identified files\n if locations == []:\n print(\"files are not accessible\")\n return\n for location in locations:\n download_url = location\n bs = location.split(\"/\")[-1].split('-b-')\n if len(bs) == 1:\n collectionname = 'no_collection'\n basename = bs[0]\n else:\n collectionname = bs[0]\n basename = '-b-'.join(bs[1:])\n filepath = os.path.join('elar', collectionname, basename)\n if len(filepath) > 150:\n filepath = os.path.join('elar', collectionname, \"%s.%s\" % (hash(basename[:-4]),extension))\n print(\" downloading %s as %s:\" % (location, filepath))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n save_file(s, filepath, download_url, cookies)", "def download_from_archive(filename, sub_path='raw_files', env_var='DRAGONS_TEST'):\n # Find cache path and make sure it exists\n root_cache_path = os.getenv(env_var)\n\n if root_cache_path is None:\n raise ValueError(f'Environment variable not set: {env_var}')\n\n root_cache_path = os.path.expanduser(root_cache_path)\n\n if sub_path is not None:\n cache_path = os.path.join(root_cache_path, sub_path)\n\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n\n # Now check if the local file exists and download if not\n local_path = os.path.join(cache_path, filename)\n if not os.path.exists(local_path):\n tmp_path = download_file(URL + filename, cache=False)\n shutil.move(tmp_path, local_path)\n\n # `download_file` ignores Access Control List - fixing it\n os.chmod(local_path, 0o664)\n\n return local_path", "def cli(date, path, mission):\n download.main(path, mission, date)", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def _download_karakas():\n #url = 'http://zenodo.org/record/12800/files/dartmouth.h5'\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FMNRAS%2F403%2F1413'\n import urllib\n print('Downloading Karakas 2010 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download_latex(self):\n try:\n # $ Set the Arxiv Object to ensure Proper extraction\n identity,paper = self.extract_meta_from_remote(self.paper_id)\n self.identity = identity\n\n if not dir_exists(self.paper_root_path):\n os.makedirs(self.paper_root_path)\n # $ Download the paper. \n downloaded_data = arxiv.download(paper,dirpath=self.paper_root_path,slugify=lambda paper: paper.get('id').split('/')[-1],prefer_source_tarfile=True)\n return downloaded_data\n except Exception as e:\n raise ArxivAPIException(self.paper_id,str(e))", "def download_scn(self, unq_id):\n if not os.path.exists(self.baseDownloadPath):\n raise EODataDownException(\"The download path does not exist, please create and run again.\")\n\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id,\n EDDSentinel1ASF.Downloaded == False).filter(\n EDDSentinel1ASF.Remote_URL is not None).all()\n ses.close()\n success = False\n if query_result is not None:\n if len(query_result) == 1:\n record = query_result[0]\n logger.debug(\"Building download info for '\" + record.Remote_URL + \"'\")\n scn_lcl_dwnld_path = os.path.join(self.baseDownloadPath,\n \"{}_{}\".format(record.Product_File_ID, record.PID))\n if not os.path.exists(scn_lcl_dwnld_path):\n os.mkdir(scn_lcl_dwnld_path)\n out_filename = record.Remote_FileName\n _download_scn_asf([record.PID, record.Product_File_ID, record.Remote_URL, self.db_info_obj,\n os.path.join(scn_lcl_dwnld_path, out_filename), self.asfUser, self.asfPass])\n success = True\n elif len(query_result) == 0:\n logger.info(\"PID {0} is either not available or already been downloaded.\".format(unq_id))\n else:\n logger.error(\"PID {0} has returned more than 1 scene - must be unique something really wrong.\".\n format(unq_id))\n raise EODataDownException(\"There was more than 1 scene which has been found - \"\n \"something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return success", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(self, download) -> None:\n path_cifarh = path.join(self.root, self.filename_cifarh)\n path_cifar = path.join(self.root, self.filename_cifar)\n is_there = path.isfile(path_cifarh) and path.isfile(path_cifar)\n if is_there:\n print(\"Files already exist.\")\n if download == \"force\" or not is_there:\n download_and_extract_archive(\n self.url_cifar, self.root, filename=self.filename_cifar\n )\n download_and_extract_archive(\n self.url_cifarh, self.root, filename=self.filename_cifarh\n )", "def main() -> None:\n run_time = datetime.datetime.now()\n datetime_string = run_time.strftime(\"%Y%m%d_%H%M%S\")\n\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-l\",\n \"--logfolder\",\n type=str,\n default=\"ia_downloader_logs\",\n help=(\n \"Folder to write logs to (if not specified, folder 'ia_downloader_logs' will be used in\"\n \" same directory as this script)\"\n ),\n )\n\n subparsers = parser.add_subparsers(\n help=(\n \"Either 'download' files associated with an Internet Archive identifier, or 'verify' a\"\n \" previously-completed download was successful and files match expected MD5 hash values\"\n ),\n dest=\"command\",\n required=True,\n )\n\n download_parser = subparsers.add_parser(\"download\")\n download_parser.add_argument(\n \"identifiers\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) Archive.org identifiers (e.g.\"\n \" 'gov.archives.arc.1155023'). If specifying a collection (and you wish to download all\"\n \" items within the collection), use the prefix 'collection:' (e.g. 'collection:nasa')\"\n ),\n )\n download_parser.add_argument(\"output_folder\", type=str, help=\"Folder to download files to\")\n download_parser.add_argument(\n \"-t\",\n \"--threads\",\n type=check_argument_int_greater_than_one,\n default=5,\n help=(\n \"Number of download threads (i.e. how many downloads to perform simultaneously)\"\n \" (default is 5)\"\n ),\n )\n download_parser.add_argument(\n \"-v\",\n \"--verify\",\n default=False,\n action=\"store_true\",\n help=\"Perform an MD5 hash check on each file as downloads complete\",\n )\n download_parser.add_argument(\n \"-r\",\n \"--resume\",\n default=False,\n action=\"store_true\",\n help=(\n \"Attempt to resume downloads using already-downloaded data if a connection error occurs\"\n ),\n )\n download_parser.add_argument(\n \"-s\",\n \"--split\",\n type=check_argument_int_greater_than_one,\n default=1,\n help=(\n \"To increase per-file download speeds, split files above 10MB into provided number of\"\n \" chunks, and reconstruct on completion\"\n ),\n )\n download_parser.add_argument(\n \"-f\",\n \"--filefilters\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) file name filters; only files that contain any of the\"\n \" provided filter strings (case insensitive) will be downloaded. If multiple filters\"\n \" are provided, the search will be an 'OR' (i.e. only one of the provided strings needs\"\n \" to hit)\"\n ),\n )\n download_parser.add_argument(\n \"-c\",\n \"--credentials\",\n type=str,\n nargs=2,\n help=(\n \"Email address and password (as separate strings) for Internet Archive account\"\n \" (required for download of some Internet Archive items)\"\n ),\n )\n download_parser.add_argument(\n \"--hashfile\",\n type=str,\n help=(\n \"Output path to write file containing hash metadata to (if not specified, file will\"\n \" be created in the output folder)\"\n ),\n )\n download_parser.add_argument(\n \"--cacherefresh\",\n default=False,\n action=\"store_true\",\n help=\"Flag to update any cached Internet Archive metadata from previous script executions\",\n )\n\n verify_parser = subparsers.add_parser(\"verify\")\n verify_parser.add_argument(\n \"data_folders\",\n type=str,\n nargs=\"+\",\n help=\"Path to folder containing previously downloaded data\",\n )\n verify_parser.add_argument(\n \"-i\",\n \"--identifiers\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) Archive.org identifiers (e.g.\"\n \" 'gov.archives.arc.1155023') - to be used if only certain item(s) in the target\"\n \" folder(s) are to be verified\"\n ),\n )\n verify_parser.add_argument(\n \"--hashfile\",\n type=str,\n help=(\n \"Path to file containing hash metadata from previous download using this script (if not\"\n \" specified, cached data from previous script execution will be used)\"\n ),\n )\n verify_parser.add_argument(\n \"-f\",\n \"--filefilters\",\n type=str,\n nargs=\"+\",\n help=(\n \"One or more (space separated) file name filters; only files that contain any of the\"\n \" provided filter strings (case insensitive) will be verified. If multiple filters\"\n \" are provided, the search will be an 'OR' (i.e. only one of the provided strings needs\"\n \" to hit)\"\n ),\n )\n verify_parser.add_argument(\n \"--nopaths\",\n default=False,\n action=\"store_true\",\n help=(\n \"If files are no longer in the same relative paths, perform lookup based only on\"\n \" whether MD5 hashes are present in the data set (rather than also checking where those\"\n \" files are stored)\"\n ),\n )\n\n args = parser.parse_args()\n\n # Set up logging\n log_subfolders = [\"logs\", \"cache\"]\n for log_subfolder in log_subfolders:\n pathlib.Path(os.path.join(args.logfolder, log_subfolder)).mkdir(parents=True, exist_ok=True)\n log, counter_handler = prepare_logging(\n datetime_string,\n os.path.join(args.logfolder, log_subfolders[0]),\n \"ia_downloader\",\n dict(vars(args)),\n )\n log.info(\n \"Internet Archive is a non-profit organisation that is experiencing unprecedented service\"\n \" demand. Please consider making a donation: https://archive.org/donate\"\n )\n log.info(\"Logs will be stored in folder '{}'\".format(args.logfolder))\n\n try:\n if args.command == \"download\":\n if args.credentials is not None:\n try:\n internetarchive.configure(args.credentials[0], args.credentials[1])\n except internetarchive.exceptions.AuthenticationError:\n log.error(\n \"Authentication error raised for supplied email address and password -\"\n \" check these were entered correctly (if the password has spaces, it must\"\n \" be wrapped in quotation marks)\"\n )\n return\n if args.hashfile is not None:\n log.info(\n \"Internet Archive metadata will be written to hash file at '{}'\".format(\n args.hashfile\n )\n )\n if args.threads > 5 or args.split > 5:\n log.info(\n \"Reducing download threads to 5, to optimise script performance and reduce\"\n \" Internet Archive server load\"\n )\n args.threads = min(args.threads, 5)\n args.split = min(args.split, 5)\n if args.split > 1:\n if args.threads > 1:\n log.info(\n \"While using file splitting, only one file will be downloaded at a time so\"\n \" as to not overwhelm Internet Archive servers\"\n )\n args.threads = 1\n hashfile_file_handler = None\n if args.hashfile:\n hashfile_file_handler = open(args.hashfile, \"w\")\n\n for identifier in args.identifiers:\n download(\n identifier=identifier,\n output_folder=args.output_folder,\n hash_file=hashfile_file_handler,\n thread_count=args.threads,\n resume_flag=args.resume,\n verify_flag=args.verify,\n split_count=args.split,\n file_filters=args.filefilters,\n cache_parent_folder=os.path.join(args.logfolder, log_subfolders[1]),\n cache_refresh=args.cacherefresh,\n )\n\n if args.hashfile:\n hashfile_file_handler.close()\n\n elif args.command == \"verify\":\n verify(\n hash_file=args.hashfile,\n data_folders=args.data_folders,\n no_paths_flag=args.nopaths,\n hash_flag=True,\n cache_parent_folder=os.path.join(args.logfolder, log_subfolders[1]),\n identifiers=args.identifiers,\n file_filters=args.filefilters,\n )\n\n if counter_handler.count[\"WARNING\"] > 0 or counter_handler.count[\"ERROR\"] > 0:\n log.warning(\n \"Script complete; {} warnings/errors occurred requiring review (see log entries\"\n \" above, replicated in folder '{}')\".format(\n counter_handler.count[\"WARNING\"] + counter_handler.count[\"ERROR\"],\n args.logfolder,\n )\n )\n else:\n log.info(\"Script complete; no errors reported\")\n\n except KeyboardInterrupt:\n log.warning(\n \"KeyboardInterrupt received, quitting immediately (any in-progress downloads or\"\n \" verifications have been terminated)\"\n )\n except Exception:\n log.exception(\"Exception occurred:\")", "def download(self, download_path):\n return", "def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_source():\n \n #if os.path.exists(UNFCC_FILE): \n # os.rename(UNFCC_FILE,'old_'+UNFCC_FILE)\n #if os.path.exists(EBAL_FILE):\n # os.rename(EBAL_FILE,'old_'+EBAL_FILE)\n\n try:\n unsd = sdmx.Request('UNSD')\n sdmx.logger.setLevel(logging.INFO)\n \n logger.info('Loading UNFCC Data')\n resp_unfcc = unsd.data('DF_UNData_UNFCC')\n\n logger.info('Loading UN Energy Balance Data')\n resp_ebal = unsd.data('DF_UNData_EnergyBalance')\n except Exception as e:\n logger.error('Error!! Please look at SDMX logs to troubleshoot' + str(e))\n traceback.print_exc(file = sys.stdout)\n\n try:\n df_ebal = resp_ebal.to_pandas()\n df_unfcc = resp_unfcc.to_pandas()\n\n df_unfcc.reset_index().to_csv(UNFCC_FILE,index=False)\n logger.info('UNFCC Greenhouse Data stored as {}'.format(UNFCC_FILE))\n\n df_ebal.reset_index().to_csv(EBAL_FILE,index=False)\n logger.info('UN Energy Balance Data stored as {}'.format(EBAL_FILE))\n except Exception as e:\n logger.error('Error!! While saving data from SDMX to CSV ' + str(e))\n traceback.print_exc(file = sys.stdout)", "def _download(self, revision: str, path: str) -> scm.DownloadResult:\n command = f'{self.command} {revision}'\n if path:\n command += f' {path}'\n content = internals.run(command)\n return scm.DownloadResult(revision, path, content)", "def download_fastq():\n\n mkdir(FASTQ_DIR)\n\n template = \"\"\"fastq-dump --split-files --gzip {}\"\"\"\n\n printp(\"\"\"\\n#\\n# download all the fastq files\\n#\"\"\")\n printp(\"\"\"\\n# drmr:label fastq-download\"\"\")\n printp(\"\"\"\\n# drmr:job time_limit=2h working_directory={}\"\"\".format(FASTQ_DIR))\n\n for library, info in DATA.items():\n printp(template.format(get_srr(library)))\n printp(template.format(get_input_control_srr(library)))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def download() -> Path:\n rts_downloader.download()\n rts_gmlc_dir = Path(rts_downloader.rts_download_path) / \"RTS-GMLC\"\n return rts_gmlc_dir" ]
[ "0.6527539", "0.63202286", "0.6183208", "0.61473656", "0.6141469", "0.5976712", "0.5897661", "0.58915883", "0.58283", "0.5815497", "0.5812849", "0.58102864", "0.58102864", "0.5790022", "0.57642645", "0.5758532", "0.57328504", "0.57257515", "0.57255936", "0.5705985", "0.5704997", "0.5703746", "0.570062", "0.5697409", "0.5690241", "0.56886864", "0.56876963", "0.568679", "0.56622994", "0.5649596" ]
0.77889556
0
maps reads (bowtie to rRNA for legacy?) to extract ambiguous and uniquely mapped reads
def map_reads(SRA): #1. bowtie to rRNA print("Bowtie alignement on contaminant RNA...") cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam' output = subprocess.run(cmd_bowtie, shell=True) # 2. STAR to ref genome print("STAR alignement to yeast genome...") cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_' output = subprocess.run(cmd_STAR, shell=True) # 3. Samtools keep uniquely mapped reads and sort print("Samtools to keep uniquely mapped reads and sort...") cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam' output = subprocess.run(cmd_samtools1, shell=True) cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' output = subprocess.run(cmd_samtools2, shell=True) cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' output = subprocess.run(cmd_samtools3, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True", "def map_reads_2genes(self, reads_file):\n start1 = time()\n read_starts = self.__get_reads_pos(reads_file)\n start2 = time()\n times = 0\n for ref_gene in self.ref_genes:\n times += 1\n if times % 500 == 0:\n print 'calculated %d genes read count ...' % times\n if len(read_starts[ref_gene.chrom]) == 0:\n continue\n starts = read_starts[ref_gene.chrom]\n for es, ed in zip(ref_gene.exon_starts, ref_gene.exon_ends):\n # rd = starts[(starts > es) & (starts < ed)].size\n rd = cal_read_count(es, ed, starts)\n ref_gene.read_count += rd\n\n print 'start calculate rpkm ...'\n mapped_read_count = self.mapped_read_count\n for ref_gene in self.ref_genes:\n # calculate RPKM\n ref_gene.read_density = \\\n ref_gene.read_count * 1000 * 1000 * 1000. / (ref_gene.mRNA_length * mapped_read_count)\n print 'got reads time: %f' % (time() - start1)\n print 'map reads time: %f' % (time() - start2)", "def determine_crossmapped_reads(self, read_alignment_path):\n references_by_species = self._get_references_by_species()\n crossmapped_reads = set()\n done_replicon_comparison = []\n with pysam.AlignmentFile(read_alignment_path) as bam:\n for org, replicon_ids in references_by_species.items():\n for replicon_id in replicon_ids:\n self._read_ids = set()\n # First, collect the ids of the aligned reads of\n # this replicon\n for alignment in bam.fetch(reference=replicon_id):\n self._read_ids.add(alignment.qname)\n # Then compare them to the alignments of each\n # replicon of the other organism(s)\n for (\n comp_org,\n comp_replicon_ids,\n ) in references_by_species.items():\n # Only compare replicons of different species\n if org == comp_org:\n continue\n for comp_replicon_id in comp_replicon_ids:\n comparison = sorted([replicon_id, comp_replicon_id])\n # Check if comparison of the two replicons\n # has been done already\n if comparison in done_replicon_comparison:\n continue\n done_replicon_comparison.append(comparison)\n # Compare all read ids of the comparison\n # replicon to the query replicon read ids\n for alignment in bam.fetch(\n reference=comp_replicon_id\n ):\n if alignment.qname in self._read_ids:\n crossmapped_reads.add(alignment.qname)\n no_of_crossmapped_reads = len(crossmapped_reads)\n return crossmapped_reads", "def get_read_alignments(sam_f):\n sparser = samparser.SamParser(sam_f=sam_f, aligned_only=True, mapq=20, mismatches=1)\n \n # parse all the hits into this to make sure multi mapping hits map to the same contig\n hit_dict = {}\n ambig_reads = 0\n processed_reads = 0\n for hit in sparser.parse_sam_file():\n processed_reads += 1\n if hit_dict.get(hit['qname'], 0):\n if hit_dict[hit['qname']] != hit['rname']:\n print(\"Warning read: {} aligns to two different contigs\".format(hit['qname']), file=sys.stderr)\n ambig_reads += 1\n else:\n continue\n else:\n hit_dict[hit['qname']] = hit['rname']\n\n print(\"{} of {} processed reads were ambiguous.\".format(ambig_reads, processed_reads))\n\n # condense the hit dict into a contig dict\n contig_dict = {}\n for read, contig in hit_dict.items():\n if contig_dict.get(contig, 0):\n contig_dict[contig].append(read)\n else:\n contig_dict[contig] = [read]\n\n return contig_dict", "def _read_miraligner(fn):\n reads = defaultdict(realign)\n with open(fn) as in_handle:\n in_handle.readline()\n for line in in_handle:\n cols = line.strip().split(\"\\t\")\n iso = isomir()\n query_name, seq = cols[1], cols[0]\n chrom, reference_start = cols[-2], cols[3]\n iso.mirna = cols[3]\n subs, add, iso.t5, iso.t3 = cols[6:10]\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _parse_mut(subs), add\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n reads[query_name].set_precursor(chrom, iso)\n return reads", "def mapping(reads_list, k, h, index, genome):\n snps_dict = {}\n # Map the read on the genome and store the snps found\n for read in reads_list:\n reversed_read = reverse_read(read)\n reverse = False\n list_mapping = seed_and_extend(read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = False\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on straight strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n list_mapping = seed_and_extend(reversed_read, k, h, index, genome)\n if list_mapping[0] < len(genome):\n reverse = True\n if VERBOSE:\n print(\"Read number : \", reads_list.index(read) + 1, \\\n \"\\n Mapping at position :\", list_mapping[0], \\\n \" on reverse strand. \\n With \", list_mapping[1], \\\n \"substitutions at positions :\", list_mapping[2])\n else:\n reverse = False\n if VERBOSE:\n print(\"No mapping found for read number :\", reads_list.index(read) + 1)\n if list_mapping[0] < len(genome):\n for mismatch in list_mapping[2]:\n if reverse == False:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [read[mismatch - list_mapping[0]]]\n else:\n if mismatch in snps_dict.keys():\n snps_dict[mismatch].append(reversed_read[mismatch - list_mapping[0]])\n else:\n snps_dict[mismatch] = [reversed_read[mismatch - list_mapping[0]]]\n\n return snps_dict", "def map_to_mirbase(fastqs, bam_file, sample_id):\n read_groups = ['@RG\\\\tID:{rgid}\\\\tSM:{lb}\\\\tLB:{lb}'\\\n\t\t\t.format(rgid=sample_id+\"_\"+lane_id, lb=sample_id) for lane_id in ['L001', 'L002', 'L003', 'L004']]\n map_reads(fastqs, mirbase_reference, bam_file, read_groups, mapper='bowtie')", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def test_extract_read_to_sample_mapping(self):\r\n\r\n labels = [\r\n 'S160_1 E86FECS01DW5V4 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0',\r\n 'S160_2 E86FECS01DW5V5 orig_bc=CAGTACGATCTT new_bc=CAGTACGATCTT bc_diffs=0']\r\n\r\n expected = {'E86FECS01DW5V4': 'S160_1',\r\n 'E86FECS01DW5V5': 'S160_2'}\r\n\r\n self.assertEqual(extract_read_to_sample_mapping(labels),\r\n expected)", "def load_data_reps(fasta, bams, regions, features, strains, strains_unique, maxReads=10000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n strain2idx = {s: idx for idx, s in enumerate(strains_unique)}\n region2data = {}\n for ri, (ref, pos, strand) in enumerate(regions, 1):\n if type(strand)==float: strand=\"+\" # sometimes strand is missing, assume +\n start, end = pos-1, pos\n sys.stderr.write(\" %s / %s %s:%s-%s \\r\"%(ri, len(regions), ref, start, end))\n # extend start/end by nn and end by dt_shift\n ##this is for RNA, for DNA start start needs to be -dt_shift\n parsers = [bam2data(bam, ref, start-nn if start>=nn else 0, end+2*nn, True, \n nn, features, maxReads) for bam in bams]\n refparser = fasta2bases(fasta, ref, start, end, strands)\n for ((pos, _, _strand, refbase, mer), *calls) in zip(refparser, *parsers):\n if _strand==strand:\n sdata = [[], []] #np.hstack(c) for c in calls]\n for c, s in zip(calls, strains): sdata[strain2idx[s]].append(np.hstack(c))\n # merge replicates\n region2data[(ref, pos, strand)] = (mer, [np.vstack(sd) for sd in sdata])\n return region2data", "def test_combine_mappings(self):\r\n\r\n self.tmp_dir = mkdtemp(dir=\"./\", suffix=\"/\")\r\n\r\n combine_mappings(\r\n fasta,\r\n denoiser_mapping,\r\n denoised_seqs,\r\n otu_picker_map,\r\n self.tmp_dir)\r\n\r\n observed_otu_map = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_otu_map.txt\")))\r\n\r\n expected_otu_map = \"\"\"1:\\tS1_1\\tS1_2\\tS2_4\\tS2_5\r\n2:\\tS2_3\\tS1_6\r\n\"\"\"\r\n self.assertEqual(observed_otu_map, expected_otu_map)\r\n\r\n observed_fasta = \"\".join(\r\n list(open(self.tmp_dir + \"/denoised_all.fasta\")))\r\n expected_fasta = \"\"\">S1_1 Read1\r\nAAA\r\n>S1_2 Read2\r\nTTT\r\n>S2_3 Read3\r\nGGG\r\n\"\"\"\r\n self.assertEqual(observed_fasta, expected_fasta)", "def look_for_read_in_sim(read, sim_info):\n\t\n\tsim_ints = {}\n\t\n\n\t# look through rows of sim info for matches\n\tfor sim_row in sim_info:\n\t\t\n\t\t# look in chimeric\n\t\tif read['merged']:\n\t\t\t\n\t\t\t# if read was merged, we just want to look for either read 1 or 2 annotated as chimeric\n\t\t\tfor annotated_read in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\t\t\t\n\t\t\tfor annotated_read in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tif re.match(f\"{read['qname']}/\", annotated_read):\n\t\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\t\n\t\telse:\n\t\t\t# if read wasn't merged, check for this specific read number\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['left_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_left_chimeric\"] = sim_row\n\t\t\n\t\t\tif f\"{read['qname']}/{read['num']}\" in sim_row['right_chimeric'].split(\";\"):\n\t\t\t\tsim_ints[f\"{sim_row['id']}_right_chimeric\"] = sim_row\n\t\t\t\n\t\t# look in discordant\n\t\tif read['qname'] in sim_row['left_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_left_discord\"] = sim_row\n\t\t\t\n\t\tif read['qname'] in sim_row['right_discord'].split(\";\"):\n\t\t\tsim_ints[f\"{sim_row['id']}_right_discord\"] = sim_row\n\t\t\t\n\treturn sim_ints", "def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def map_reads(self, qc_dic):\n if self.aligner == \"hisat2\":\n build([hisat2.HisatMapW(fastq_dic=qc_dic, num_cpus=self.num_cpus,\n indexfile=self.hisat_index, workdir=self.workdir,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler)\n elif self.aligner in [\"STAR\", \"star\"]:\n build([star.map_starW(fastq_dic=qc_dic, num_cpus=self.num_cpus,\n stardb_dir=self.stardb_dir, workdir=self.workdir)],\n local_scheduler=self.local_scheduler)", "def _read_pyMatch(fn, precursors):\n with open(fn) as handle:\n reads = defaultdict(realign)\n for line in handle:\n query_name, seq, chrom, reference_start, end, mism, add = line.split()\n reference_start = int(reference_start)\n # chrom = handle.getrname(cols[1])\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n if query_name not in reads:\n reads[query_name].sequence = seq\n iso = isomir()\n iso.align = line\n iso.start = reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)\n logger.debug(\"%s %s %s %s %s\" % (query_name, reference_start, chrom, iso.subs, iso.add))\n if len(iso.subs) > 1:\n continue\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())", "def extract_read_to_sample_mapping(labels):\r\n sample_id_mapping = {}\r\n\r\n re = compile(r'(\\S+) (\\S+)')\r\n for label in labels:\r\n tmatch = search(re, label)\r\n sample_id = tmatch.group(1)\r\n flowgram_id = tmatch.group(2)\r\n sample_id_mapping[flowgram_id] = sample_id\r\n\r\n return sample_id_mapping", "def caricaReadsEsIn(fileInput):\n\n\tidx_gene \t= 4 \n\tidx_chrom \t= 0\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 6\n\n\tdictReadsEsIn = {}\n\n\tlines = [x.strip('\\n').split('\\t') for x in open(fileInput)]\n\t\n\tfor riga in lines:\n\t\tgeneName \t= riga[idx_gene]\n\t\tchrom\t\t= riga[idx_chrom]\n\t\tstart\t\t= riga[idx_start]\n\t\tend\t\t\t= riga[idx_end]\n\t\treads\t\t= riga[idx_reads]\n\n\t\tif not geneName in dictReadsEsIn:\n\t\t\tdictReadsEsIn[geneName] = {}\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\t# Il primo campo indica se il cromosoma ha almeno..\n\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..una regione con reads\n\t\telif chrom not in dictReadsEsIn[geneName]:\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\n\t\telse:\n\t\t\tdictReadsEsIn[geneName][chrom][idx_start].append(start)\n\t\t\tdictReadsEsIn[geneName][chrom][idx_end].append(end)\n\t\t\tdictReadsEsIn[geneName][chrom][3].append(reads)\n\n\t\ti = len(dictReadsEsIn[geneName][chrom][3])\n\t\tif int(dictReadsEsIn[geneName][chrom][3][i-1]) != 0:\n\t\t\tdictReadsEsIn[geneName][chrom][0] = True\t\t\t\t\t\t\t# Indica se c'e' almeno una regione esonica/intronica\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# che mappa delle reads\n\n\t# Si eliminano i cromosomi che non hanno mappato reads ne' su introni\n\t# ne' su esoni (primo value del dizionario = FALSE)\n\t#\n\tgeneKeys = dictReadsEsIn.keys()\n\tfor geneName in geneKeys:\n\t\tchromKeys = dictReadsEsIn[geneName].keys()\n\t\tfor chrom in chromKeys:\n\t\t\tif not dictReadsEsIn[geneName][chrom][0]:\n\t\t\t\tdel dictReadsEsIn[geneName][chrom]\n\t\t\t\t# Si eliminano i geni che non hanno piu' cromosomi\n\t\t\t\t#\n\t\t\t\tif not dictReadsEsIn[geneName]:\n\t\t\t\t\tdel dictReadsEsIn[geneName]\n\t\t\t\t\tprint 'Il gene %s non presenta cromosomi con reads mappanti.\\n' % geneName,\n\n\treturn dictReadsEsIn", "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def test_fastq_map():\n cluster = clust.Clustering.from_fastq(TMP + 'map.fastq', 4, 'ACGT',\n threshold=2, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid1_expect].size == 5, \"%r != %r\" % (cluster[uid1_expect].size, 5)\n assert cluster[uid2_expect].size == 5, \"%r != %r\" % (cluster[uid2_expect].size, 5)", "def read_bowtie_output(self, filename):\n\t\tself.filenames.append(filename)\n\t\tmatches, read_count, phreds = {}, {}, {}\n\t\tf = open(filename)\n\t\tfor line in f:\n\t\t\traw = line.strip().split()\n\t\t\tif len(raw) == 5:\n\t\t\t\tid, strand, ref_seq_id, offset, seq = raw\n\t\t\t\tqual = [BOWTIE_PHRED_OFFSET] * len(seq) # pretend perfect quality\n\t\t\telse:\n\t\t\t\tid, strand, ref_seq_id, offset, seq, qual = raw[:6]\n\t\t\t\tqual = [ord(x) - BOWTIE_PHRED_OFFSET for x in qual]\n\t\t\tif seq in read_count:\n\t\t\t\tread_count[seq] += 1\n\t\t\t\tphreds[seq] += qual\n\t\t\telse:\n\t\t\t\tread_count[seq] = 1\n\t\t\t\tphreds[seq] = np.array(qual)\n\t\t\t\tmatches[id] = BowTieMatch(id, strand, ref_seq_id, int(offset), Seq(seq), None, None)\n\t\tprint >> sys.stderr, \"removing low quality reads with score < {0}\".format(MIN_PHRED_SCORE)\n\t\tremove_low_quality_for_matched(matches, read_count, phreds, MIN_PHRED_SCORE, None)\n\t\tfor id, m in matches.iteritems():\n\t\t\tgapped_pos = self.refmap.ungapped_to_gapped(m.ref_seq_id, m.offset)\n\t\t\tif gapped_pos not in self.M:\n\t\t\t\tself.M[gapped_pos] = []\n\t\t\tread = Read(id, seq=m.read.tostring(), ref_seq_id=m.ref_seq_id, offset=m.offset, \\\n\t\t\t\t\tcopy=read_count[m.read.tostring()])\n\t\t\tself.M[gapped_pos].append(read)", "def test_read_mapping_file_multiple(reference_multi):\n content, reference = reference_multi\n from_names = list(reference.keys())\n to_names = []\n block_names = []\n\n for k in reference:\n to_names.extend(reference[k].keys())\n for to in reference[k]:\n block_names.extend(reference[k][to].keys())\n force_fields = case_to_dummy_ffs(from_names + to_names, block_names,\n {(0, 'X1'): [(0, 'A')], (0, 'X2'): [(0, 'B')], (0, 'X3'): [(0, 'D')]},\n {(0, 'A'): {(0, 'X1'): 1.0}, (0, 'B'): {(0, 'X2'): 1.0}, (0, 'C'): {(0, 'X2'): 1.0}, (0, 'D'): {(0, 'X3'): 1.0}},\n [])\n mappings = vermouth.map_input.read_backmapping_file(content, force_fields)\n compare_old_new_mappings(mappings, reference)", "def remove_cds_and_remap_reads(self, cds_aln):\n super(GreedySolver, self).remove_cds_and_remap_reads(cds_aln)\n # Dictionary where key is read_id and value is cds alignment to which it maps.\n # If it does not map to any cds alignment then value is None.\n new_read_mappings = {}\n\n for aln_reg in cds_aln.aligned_regions.values():\n if aln_reg.active:\n # Find alternative cds alignment with highest coverage\n best_alt_cds_aln = None\n for alt_cds_aln in self._cds_aln_container.read2cds[aln_reg.read_id]:\n if best_alt_cds_aln == None or self._get_coverage(alt_cds_aln) > self._get_coverage(best_alt_cds_aln): \n best_alt_cds_aln = alt_cds_aln\n # Activate it in best alternative cds alignment (if there is one)\n if (best_alt_cds_aln != None):\n best_alt_cds_aln.aligned_regions[aln_reg.read_id].active = True\n # Add mapping to output dictionary\n new_read_mappings[aln_reg.read_id] = best_alt_cds_aln\n\n # Delete original cds alignment\n del self._cds_aln_container.cds_repository[cds_aln.cds]\n # Remove original cds alignment from read2cds\n for cds_alns in self._cds_aln_container.read2cds.values():\n if cds_aln in cds_alns: cds_alns.remove(cds_aln)\n\n # Force recalculation of coverage for updated cds alignments by forgeting coverage\n for updated_cds_aln in set(filter(lambda x: x != None, new_read_mappings.values())):\n del self._coverages[updated_cds_aln]\n\n return new_read_mappings", "def corrected_records(handle):\n\n seen = coll.defaultdict(set)\n for record in SeqIO.parse(handle, \"fasta\"):\n\n if not str(record.seq):\n continue\n\n # These are probably protein, so skip them\n if record.id.startswith(\"XM_\") or record.id.startswith(\"NM_\"):\n continue\n\n # Change given ids into a probably unique id\n given = record.id.replace(\",\", \"\")\n match = re.search(r\"gene RGD:(\\d+),\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must state gene id: %s\", record.description)\n gene = match.group(1)\n\n match = re.search(\"locus: (.+)$\", record.description)\n if not match:\n raise ValueError(\"RGD fasta must have a locus\")\n location = match.group(1)\n\n record.id = \"{given}-{gene}-{location}\".format(\n given=given,\n gene=gene,\n location=location,\n )\n\n # Prevent writing duplicate entries\n if str(record.seq) in seen[record.id]:\n continue\n\n seen[record.id].add(str(record.seq))\n yield record", "def map_RE(self, index):\n if index is None:\n self.logger.error(\"The bowtie genome index must be specified to \"\n \"map restriction enzyme sites\")\n return None\n self.logger.info(\"Mapping restriction enyzme recognition sites\")\n # Start bowtie as a subprocess\n mapping = subprocess.Popen(\n self.arguments + [index, '-'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Send the raw sequence of the DpnII recognition site\n mapping.stdin.write(b'GATC')\n mapping.stdin.close()\n bed = {}\n total = 0\n # Retrieve the alignments from bowtie\n with mapping.stdout as f:\n for line in f:\n line = line.decode('UTF-8').split('\\t')\n chrom, start = line[2], int(line[3])\n stop = start + 4\n if chrom not in bed:\n bed[chrom] = []\n bed[chrom].append((start, stop))\n total += 1\n # Log mapping results\n with mapping.stderr as f:\n for line in f:\n if line[0] == '#':\n continue\n self.logger.debug(line.decode('UTF-8').rstrip('\\n'))\n # Sort chromosome list by name/number\n chroms = numpy.array(list(bed))\n chrints = []\n for i in range(chroms.shape[0]):\n try:\n chrints.append((\n str(int(chroms[i].lstrip('chr'))).rjust(2, '0'),\n chroms[i]))\n except ValueError:\n chrints.append((chroms[i], chroms[i]))\n chrints.sort()\n chroms = []\n for i in range(len(chrints)):\n chroms.append(chrints[i][1])\n self.chroms = numpy.array(chroms)\n self.chr_indices = numpy.zeros(self.chroms.shape[0] + 1,\n dtype=numpy.int32)\n if self.focus is None:\n self.logger.info(\"Defaulting to a fragment-focused analysis\")\n self.focus = 'fragments'\n if self.focus == 'fragments':\n N = total - self.chroms.shape[0]\n else:\n N = total\n # Arrange data into single array with indexed chromosomes\n self.data = numpy.zeros(N, dtype=numpy.dtype([\n ('chr', numpy.int32), ('coords', numpy.int32, (2,)),\n ('treatment', numpy.int32), ('control', numpy.int32),\n ('score', numpy.float64), ('alignable', numpy.bool)]))\n self.data['alignable'].fill(True)\n for i in range(self.chroms.shape[0]):\n chrom = self.chroms[i]\n bed[chrom] = numpy.array(bed[chrom])\n bed[chrom] = bed[chrom][numpy.argsort(bed[chrom][:, 0]), :]\n start = self.chr_indices[i]\n if self.focus == 'fragments':\n self.chr_indices[i + 1] = start + bed[chrom].shape[0] - 1\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, 0] = bed[chrom][:-1, 1]\n self.data['coords'][start:stop, 1] = bed[chrom][1:, 0]\n else:\n self.chr_indices[i + 1] = start + bed[chrom].shape[0]\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, :] = bed[chrom]\n self.data['chr'][start:stop] = i", "def map_STAR(args):\n for type in ['joined', 'merged']:\n for strand in ['watson', 'crick']:\n if strand == 'watson':\n n = 1\n else:\n n = 3\n STAR_index_dir = os.path.join(args.output_dir,'STAR_%s_%s'%(type, strand))\n cmd = \"STAR --runThreadN %s --genomeDir %s\"%(args.threads, STAR_index_dir)\n\n if type == 'merged':\n cmd += \" --readFilesIn %s\" % vars(args)['%s_%s' % (strand, type)]\n else:\n #TODO: define custom parameters for PE reads\n cmd += \" --readFilesIn %s \" % vars(args)['%s_%s_r1' % (strand, type)]\n cmd += \" %s\" % vars(args)['%s_%s_r2' % (strand, type)]\n\n cmd += \" --outSAMattributes NM MD AS --outSAMtype SAM\"\n cmd += \" --outFileNamePrefix %s\" % (os.path.join(args.output_dir,'%s_%s'%(strand, type)))\n cmd += \" --outReadsUnmapped Fastx\" #output of unmapped reads for inspection\n cmd += \" --scoreGapATAC -2 --scoreGapNoncan -2\"\n #outFilterScoreMinOverLread : float: sam as outFilterMatchNmin, but normalized to the read length (sum of mates’ lengths for paired-end reads)\n #outFilterMatchNminOverLread: float: same as outFilterScoreMin, but normalized to read length (sum of mates’ lengths for paired-end reads)\n\n # –outFilterMultimapNmax 1 int: maximum number of loci the read is allowed to map to. Alignments (all of\n # them) will be output only if the read maps to no more loci than this value.\n cmd += \" --outFilterMismatchNoverLmax 0.95\"\n # TODO: implement --alignEndsType endtoend mapping after joined reads are merged\n cmd += \"--outFilterMatchNminOverLread 0.9 --scoreGap -4 \" \\\n \" --alignEndsType EndToEnd\" \\\n \" --alignSoftClipAtReferenceEnds No\" \\\n \" --outSAMorder PairedKeepInputOrder\" \\\n \" --outFilterMultimapNmax 1\" \\\n \" --scoreInsOpen -1\" \\\n #make sure we have a bam file sorted by name\n if args.extraflags:\n cmd += ' %s' % args.extraflags\n log = \"run STAR for % strand on %s reads\"%(strand, type)\n run_subprocess([cmd],args, log)\n log = \"write final log of STAR to normal log\"\n cmd = \"cat %s \" % os.path.join(args.output_dir, '%s_%s' % (strand, type) + 'Log.final.out')\n run_subprocess([cmd], args, log)\n return args", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def run_multimapping(SRA):\n\n if not os.path.exists(\"TMP/ambiguous_reads/\"):\n os.mkdir(\"TMP/ambiguous_reads/\")\n\n cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n \n # Keep only multi-mapping reads:\n cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all'\n output = subprocess.run(cmd_filter, shell=True)\n\n cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam'\n output = subprocess.run(cmd_samtools2, shell=True)" ]
[ "0.6826506", "0.66215014", "0.6525083", "0.6418822", "0.6416063", "0.631023", "0.6223504", "0.6182403", "0.60843307", "0.6026228", "0.6004919", "0.5979466", "0.5960708", "0.59569067", "0.59227425", "0.59207463", "0.5912195", "0.58296555", "0.5819795", "0.5811446", "0.5797608", "0.5784261", "0.5753471", "0.5741667", "0.5721999", "0.5694153", "0.56585646", "0.5657997", "0.5639698", "0.5630461" ]
0.6700885
1
wrapper to run scikitribo from the same pipeline requires local install of modified scikitribo toolbox requires local install of all dependencies of scikitribo environment (see conda environment file)
def run_scikit_ribo(SRA, genome_fasta, genome_gtf): # 3. Scikit-ribo index print("Building scikit-ribo index") if not os.path.exists(SCIKIT_DIR): os.mkdir(SCIKIT_DIR) cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-build.py' + ' ' + '-g' + ' ' + genome_gtf + ' ' + '-f' + ' ' + genome_fasta + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + SCIKIT_DIR output = subprocess.run(cmd_scikit, shell=True) print("scikit-ribo-run.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'scikit-ribo-run.py' + ' ' + '-i' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + '-f' + ' ' + SCIKIT_DIR + ' ' + '-p' + ' ' + SRA + ' ' + '-o' + ' ' + 'TMP/scikit_'+SRA output = subprocess.run(cmd_scikit, shell=True) print("plot_ribo_density_dict.py...") cmd_scikit = 'python' + ' ' + SCIKIT_PATH + 'plot_ribo_density_dict_noCDT.py' + ' ' + '-i' + ' ' + TMP_DIR+'scikit_'+SRA+'/riboseq_input.txt' + ' ' + '-g' + ' ' + 'all' + ' ' + '-o' + ' ' + TMP_DIR+'scikit_'+SRA #+'_profiles' output = subprocess.run(cmd_scikit, shell=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transformation_catalog():\n tc = TransformationCatalog()\n\n # Add docker container\n #crisis_container = Container(\n # 'crisis_container',\n # Container.DOCKER,\n # image = \"docker://slnagark/crisis_wf:latest\",\n # arguments=\"--runtime=nvidia --shm-size=1gb\"\n # ).add_env(TORCH_HOME=\"/tmp\")\n \n crisis_container = Container(\n 'galaxy_container',\n Container.SINGULARITY,\n image = str(Path(\".\").parent.resolve() / \"containers/crisis-computing_latest.sif\"),\n image_site = \"local\",\n mounts=[\"${DONUT_USER_HOME}:${DONUT_USER_HOME}\"]\n ).add_env(TORCH_HOME=\"/tmp\")\n\n\n # preprocessing scripts\n preprocess_images = Transformation(\n \"preprocess_images\",\n site = \"local\",\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_images.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n preprocess_tweets = Transformation(\n \"preprocess_tweets\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/preprocess_tweets.py\"), \n is_stageable = True,\n container=crisis_container\n )\n\n \n # HPO, training and inference scripts for ResNet-50\n hpo_train_resnet = Transformation(\n \"hpo_train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_resnet = Transformation(\n \"train_resnet\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_resnet.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n resnet_inference = Transformation(\n \"resnet_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/resnet_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # HPO, training and inference scripts for Bi-LSTM\n\n hpo_train_bilstm = Transformation(\n \"hpo_train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/hpo_train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n #.add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n train_bilstm = Transformation(\n \"train_bilstm\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/train_bilstm.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n \n bilstm_inference = Transformation(\n \"bilstm_inference\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/bilstm_inference.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n # late fusion script\n late_fusion = Transformation(\n \"late_fusion\",\n site = 'local',\n pfn = os.path.join(os.getcwd(), \"bin/late_fusion.py\"),\n is_stageable = True,\n container=crisis_container\n )\\\n .add_pegasus_profile(cores=16, gpus=1, runtime=14400, grid_start_arguments=\"-G -m 10\")\\\n .add_env(key=\"KICKSTART_MON_GRAPHICS_PCIE\", value=\"TRUE\")\n#\\\n# .add_env(key=\"KICKSTART_MON_GRAPHICS_UTIL\", value=\"TRUE\")\n\n\n tc.add_containers(crisis_container)\n tc.add_transformations(preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion)\n tc.write()\n\n return preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion", "def bootstrap():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements()", "def ci(session):\n session.install('-rrequirements-dev.txt')\n session.install('-e', '.')\n run_sphinx(session)\n run_yapf(session, True)\n run_all_linters(session)\n run_pytest_units(session)\n run_pytest_integrations(session)", "def install_sm_local_dependencies(framework, job_type, image, ec2_conn, ec2_instance_ami):\n python_invoker = get_python_invoker(ec2_instance_ami)\n # Install custom packages which need to be latest version\"\n # using virtualenv to avoid package conflicts with the current packages\n ec2_conn.run(f\"sudo apt-get install virtualenv -y \")\n ec2_conn.run(f\"virtualenv env --python {python_invoker}\")\n ec2_conn.run(f\"source ./env/bin/activate\")\n if framework == \"pytorch\":\n # The following distutils package conflict with test dependencies\n ec2_conn.run(\"sudo apt-get remove python3-scipy python3-yaml -y\")\n ec2_conn.run(f\"sudo {python_invoker} -m pip install -r requirements.txt \", warn=True)", "def bootstrap():\n local('virtualenv fabric_factory/ve')", "def bootstrap_aws():\n sub_install_packages()\n sub_install_virtualenv()\n sub_create_virtualenv()\n sub_install_python_requirements_aws()", "def main():\n\n # Force scripts to not use graphical output\n env = dict()\n env.update(os.environ)\n\n if \"DISPLAY\" not in os.environ:\n # No DISPLAY, set suitable default matplotlib backend as pyplot is used\n env[\"MPLBACKEND\"] = \"Agg\"\n\n if \"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\" not in os.environ:\n env[\"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"] = str(multiprocessing.cpu_count())\n\n # Prevent user site packages from interfering with SCT dependencies (See issue #3067)\n env[\"PYTHONNOUSERSITE\"] = \"True\"\n\n command = os.path.basename(sys.argv[0])\n pkg_dir = os.path.dirname(sct.__file__)\n\n script = os.path.join(pkg_dir, \"scripts\", \"{}.py\".format(command))\n assert os.path.exists(script)\n\n cmd = [sys.executable, script] + sys.argv[1:]\n\n mpi_flags = os.environ.get(\"SCT_MPI_MODE\", None)\n if mpi_flags is not None:\n if mpi_flags == \"yes\": # compat\n mpi_flags = \"-n 1\"\n cmd = [\"mpiexec\"] + mpi_flags.split() + cmd\n\n os.execvpe(cmd[0], cmd[0:], env)", "def superconductor(local_dir, cpus, gpus, num_parallel, num_samples, oracle):\n\n # Final Version\n\n from design_baselines.autofocused_cbas import autofocused_cbas\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(autofocused_cbas, config={\n \"logging_dir\": \"data\",\n \"normalize_ys\": True,\n \"normalize_xs\": True,\n \"task\": f\"Superconductor-{oracle}-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"bootstraps\": 5,\n \"val_size\": 200,\n \"ensemble_batch_size\": 100,\n \"vae_batch_size\": 100,\n \"embedding_size\": 256,\n \"hidden_size\": 256,\n \"num_layers\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"ensemble_lr\": 0.0003,\n \"ensemble_epochs\": 100,\n \"latent_size\": 32,\n \"vae_lr\": 0.0003,\n \"vae_beta\": 1.0,\n \"offline_epochs\": 200,\n \"online_batches\": 10,\n \"online_epochs\": 10,\n \"autofocus_epochs\": 10,\n \"iterations\": 20,\n \"percentile\": 80.0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def main():\n get_obofoundry(force_download=True)", "def slurm(ctx, alloc, nodes, memory, walltime, feature, conda_env, module,\n stdout_path, verbose):\n\n name = ctx.obj['NAME']\n tech = ctx.obj['TECH']\n points = ctx.obj['POINTS']\n sam_files = ctx.obj['SAM_FILES']\n res_file = ctx.obj['RES_FILE']\n sites_per_worker = ctx.obj['SITES_PER_WORKER']\n dirout, fout = os.path.split(ctx.obj['OUT_FPATH'])\n logdir = ctx.obj['LOGDIR']\n output_request = ctx.obj['OUTPUT_REQUEST']\n site_data = ctx.obj['SITE_DATA']\n max_workers = ctx.obj['MAX_WORKERS']\n mem_util_lim = ctx.obj['MEM_UTIL_LIM']\n timeout = ctx.obj['TIMEOUT']\n curtailment = ctx.obj['CURTAILMENT']\n gid_map = ctx.obj['GID_MAP']\n verbose = any([verbose, ctx.obj['VERBOSE']])\n\n slurm_manager = ctx.obj.get('SLURM_MANAGER', None)\n if slurm_manager is None:\n slurm_manager = SLURM()\n ctx.obj['SLURM_MANAGER'] = slurm_manager\n\n pc = get_node_pc(points, sam_files, tech, res_file, nodes)\n\n for i, split in enumerate(pc):\n node_name, fout_node = get_node_name_fout(name, fout, i, pc,\n hpc='slurm')\n\n node_fpath = os.path.join(dirout, fout_node)\n cmd = get_node_cmd(node_name, tech, sam_files, res_file, node_fpath,\n points=points,\n points_range=split.split_range,\n sites_per_worker=sites_per_worker,\n max_workers=max_workers,\n logdir=logdir,\n output_request=output_request,\n site_data=site_data,\n mem_util_lim=mem_util_lim,\n timeout=timeout,\n curtailment=curtailment,\n gid_map=gid_map,\n verbose=verbose)\n\n status = Status.retrieve_job_status(dirout, 'generation', node_name,\n hardware='eagle',\n subprocess_manager=slurm_manager)\n\n if status == 'successful':\n msg = ('Job \"{}\" is successful in status json found in \"{}\", '\n 'not re-running.'\n .format(node_name, dirout))\n elif 'fail' not in str(status).lower() and status is not None:\n msg = ('Job \"{}\" was found with status \"{}\", not resubmitting'\n .format(node_name, status))\n else:\n logger.info('Running reV generation on SLURM with node name \"{}\" '\n 'for {} (points range: {}).'\n .format(node_name, pc, split.split_range))\n # create and submit the SLURM job\n out = slurm_manager.sbatch(cmd,\n alloc=alloc,\n memory=memory,\n walltime=walltime,\n feature=feature,\n name=node_name,\n stdout_path=stdout_path,\n conda_env=conda_env,\n module=module)[0]\n if out:\n msg = ('Kicked off reV generation job \"{}\" (SLURM jobid #{}).'\n .format(node_name, out))\n # add job to reV status file.\n Status.add_job(\n dirout, 'generation', node_name, replace=True,\n job_attrs={'job_id': out, 'hardware': 'eagle',\n 'fout': fout_node, 'dirout': dirout})\n\n click.echo(msg)\n logger.info(msg)", "def cli():\n # Configuration\n AppConfig()\n\n # Parse the cli arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('standard_data_path', help='path to the standard data directory')\n parser.add_argument('queue', help='job queue')\n parser.add_argument('--app-name', help='spark application name which must contain the application prd',\n default='gmt00-diaman-ai')\n parser.add_argument('--driver-mem', help='amount of memory to use for the driver process',\n default='4g')\n parser.add_argument('--driver-cores', help='number of cores to use for the driver process',\n default=1)\n parser.add_argument('--executor-mem', help='amount of memory to use per executor process',\n default='8g')\n parser.add_argument('--executor-cores', help='number of cores to use on each executor',\n default=4)\n parser.add_argument('--min-executors', help='minimum number of executors to run if dynamic allocation is enabled',\n default=4)\n parser.add_argument('--max-executors', help='maximum number of executors to run if dynamic allocation is enabled',\n default=12)\n parser.add_argument('--ini-executors', help='initial number of executors to run if dynamic allocation is enabled',\n default=4)\n args = parser.parse_args()\n\n # Instantiate spark\n _, spark_session = spark_config.get_spark(app_name=args.app_name,\n queue=args.queue,\n driver_mem=args.driver_mem,\n driver_cores=args.driver_cores,\n executor_mem=args.executor_mem,\n executor_cores=args.executor_cores,\n min_executors=args.min_executors,\n max_executors=args.max_executors,\n ini_executors=args.ini_executors)\n\n # Run the train pipeline\n train_pipeline.run(spark_session, args.standard_data_path)", "def setup(app):\n wheel = ensure_wheel()\n subprocess.check_call([\n \"jupyter\", \"lite\", \"build\", f\"--LiteBuildConfig.federated_extensions={wheel}\",\n ], cwd=DEMO)", "def bootstrap(environment: Environment):\n pass", "def main(argv):\n parser = argparse.ArgumentParser(description=\"\"\"Bootstrap CI Scripts\"\"\")\n parser.add_argument(\"-d\", \"--directory\",\n type=str,\n required=True,\n help=(\"\"\"Directory to store language runtimes, \"\"\"\n \"\"\"scripts and other script details in\"\"\"))\n parser.add_argument(\"-s\", \"--script\",\n type=str,\n help=\"\"\"Script to pass control to\"\"\")\n parser.add_argument(\"-e\", \"--eval-output\",\n type=str,\n choices=[\n \"bash\",\n \"powershell\"\n ],\n help=\"\"\"Evaluate output in shell\"\"\")\n parser.add_argument(\"-p\", \"--print-to\",\n type=str,\n help=\"\"\"Where to print output script to\"\"\")\n parser.add_argument(\"-r\", \"--scripts-directory\",\n type=str,\n help=(\"\"\"Directory where scripts are already \"\"\"\n \"\"\"stored in\"\"\"))\n parser.add_argument(\"--keep-scripts\",\n action=\"store_true\",\n help=\"\"\"Don't remove stale scripts.\"\"\")\n args, remainder = parser.parse_known_args(argv)\n\n print_script_to, print_messages_to = _determine_outputs(args.print_to)\n\n with closing(print_script_to):\n parent_shell = construct_parent_shell(args.eval_output,\n print_script_to)\n container = ContainerDir(parent_shell,\n stale_check=_stale_check_url(args),\n **(vars(args)))\n util = container.fetch_and_import(\"util.py\")\n # suppress(unused-attribute)\n util.PRINT_MESSAGES_TO = print_messages_to\n bootstrap_script = container.script_path(\"bootstrap.py\").fs_path\n bootstrap_script_components = bootstrap_script.split(os.path.sep)\n scripts_path = os.path.sep.join(bootstrap_script_components[:-2])\n\n # Overwrite CONTAINER_DIR in the output script, but not\n # for our own invocation, we'll need the parent instance\n # if we're actually in a test\n parent_shell.overwrite_environment_variable(\"CONTAINER_DIR\",\n container.path())\n _set_ci_environment_variables(parent_shell)\n\n _define_script_command(\"polysquare_run\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n None)\n _define_script_command(\"polysquare_cleanup\",\n parent_shell,\n bootstrap_script,\n container.path(),\n scripts_path,\n \"clean.py\")\n\n # Done, pass control to the script we're to run\n container.fetch_and_import(args.script).run(container,\n util,\n parent_shell,\n argv=remainder)\n\n # Print a final new line so that active messages don't get\n # truncated.\n util.print_message(\"\\n\")\n\n if container.return_code() != 0:\n parent_shell.exit(container.return_code())\n\n return container.return_code()", "def main():\n op = help()\n for t in [\"bowtie2\", \"samtools\", \"bamToBed\"]:\n if not isTool(t):\n logger.error(\"%s not exits! Please install through conda.\" % t)\n return\n if not os.path.exists(op.fqd):\n logger.error(\"Input %s not exists! Return.\" % op.fqd)\n return\n if len(glob(op.ref + \"*.bt2\")) == 0:\n logger.error(\"Bowtie2 reference not exists for prefix of %s! Return.\" %\n op.ref)\n return\n if not os.path.exists(op.output):\n os.makedirs(op.output, exist_ok=True)\n else:\n fs = glob(os.path.join(op.output, \"*\"))\n if len(fs) > 0:\n logger.info(\n \"Target output directory %s is not empty, may over-write some files.\"\n % op.output)\n\n #mapping\n data = preFqs(op.fqd)\n if len(data) == 0:\n logger.error(\n \"No matched _R1.fastq.gz and _R2.fastq.gz in %s. Return.\" %\n (op.fqd))\n return\n ref = op.ref\n sams = Parallel(n_jobs=op.number,backend=\"multiprocessing\")(\n delayed(tracMapping)(sample, fqs, ref, op.output, cpus=op.cpu)\n for sample, fqs in data.items())\n sams = [sam for sam in sams if sam is not None]\n\n #sam to bam and bedpe\n cpus = op.number * op.cpu\n ncpus = int(min(len(sams), cpus / 2))\n bedpes = Parallel(n_jobs=ncpus,backend=\"multiprocessing\")(delayed(sam2bamBedpe)(sam) for sam in sams)\n\n #cLoops2 qc\n cmd = \"cLoops2 qc -f %s -o bedpeQc -p %s\" % (\",\".join(bedpes),\n min(len(bedpes), cpus))\n callSys([cmd], logger)\n\n #combine report\n mata = parseBowtielog()\n matb = pd.read_csv(\"bedpeQc_bedpeQc.txt\", index_col=0, sep=\"\\t\")\n matb.index = [i.split(\"_all\")[0] for i in matb.index]\n for c in matb.columns:\n mata[c] = matb[c]\n mata.to_csv(\"tracPre_summary.txt\", sep=\"\\t\")\n cmd = \"rm bedpeQc_bedpeQc.txt\"\n os.system(cmd)", "def sktime_custom_env(tmp_path):\n conda_env = tmp_path.joinpath(\"conda_env.yml\")\n _mlflow_conda_env(conda_env, additional_pip_deps=[\"sktime\"])\n return conda_env", "def bootstrap():\n\n require('environment', provided_by=env.environments)\n sudo('mkdir -p %(root)s' % env, user=env.deploy_user)\n clone_repo()\n setup_dirs()\n link_config_files()\n update_services()\n create_virtualenv()\n update_requirements()\n create_local_settings()", "def installRequiredPackages(self, force=False):\n # Need to install if forced or any packages cannot be imported\n needToInstall = force\n if not needToInstall:\n try:\n import jupyter\n import jupyterlab\n import ipywidgets\n import pandas\n import ipyevents\n import ipycanvas\n except:\n needToInstall = True\n\n if needToInstall:\n # Install required packages\n import os\n if os.name != 'nt':\n # PIL may be corrupted on linux, reinstall from pillow\n slicer.util.pip_install('--upgrade pillow --force-reinstall')\n\n slicer.util.pip_install(\"jupyter jupyterlab ipywidgets pandas ipyevents ipycanvas --no-warn-script-location\")\n\n # Install Slicer Jupyter kernel\n # Create Slicer kernel\n slicer.modules.jupyterkernel.updateKernelSpec()\n # Install Slicer kernel\n import jupyter_client\n jupyter_client.kernelspec.KernelSpecManager().install_kernel_spec(slicer.modules.jupyterkernel.kernelSpecPath(), user=True, replace=True)", "def superconductor(local_dir, cpus, gpus, num_parallel, num_samples, oracle):\n\n # Final Version\n\n from design_baselines.mins import mins\n ray.init(num_cpus=cpus,\n num_gpus=gpus,\n include_dashboard=False,\n _temp_dir=os.path.expanduser('~/tmp'))\n tune.run(mins, config={\n \"logging_dir\": \"data\",\n \"task\": f\"Superconductor-{oracle}-v0\",\n \"task_kwargs\": {\"relabel\": False},\n \"val_size\": 200,\n \"offline\": True,\n \"normalize_ys\": True,\n \"normalize_xs\": True,\n \"base_temp\": 0.1,\n \"noise_std\": 0.0,\n \"method\": \"wasserstein\",\n \"use_conv\": False,\n \"gan_batch_size\": 128,\n \"hidden_size\": 1024,\n \"num_layers\": 1,\n \"bootstraps\": 1,\n \"initial_max_std\": 0.2,\n \"initial_min_std\": 0.1,\n \"oracle_lr\": 0.001,\n \"oracle_batch_size\": 128,\n \"oracle_epochs\": 100,\n \"latent_size\": 32,\n \"critic_frequency\": 10,\n \"flip_frac\": 0,\n \"fake_pair_frac\": 0.,\n \"penalty_weight\": 10.,\n \"generator_lr\": 2e-4,\n \"generator_beta_1\": 0.0,\n \"generator_beta_2\": 0.9,\n \"discriminator_lr\": 2e-4,\n \"discriminator_beta_1\": 0.0,\n \"discriminator_beta_2\": 0.9,\n \"initial_epochs\": 200,\n \"epochs_per_iteration\": 0,\n \"iterations\": 0,\n \"exploration_samples\": 0,\n \"exploration_rate\": 0.,\n \"thompson_samples\": 0,\n \"solver_samples\": 128, \"do_evaluation\": True},\n num_samples=num_samples,\n local_dir=local_dir,\n resources_per_trial={'cpu': cpus // num_parallel,\n 'gpu': gpus / num_parallel - 0.01})", "def prepare():\n sh('pip install pylint pyflakes behave nose clonedigger pep8 sphinx')\n sh('pip install watchdog coverage ipython sphinx_rtd_theme')\n develop()", "def _run_env(self):\n raise NotImplementedError()", "def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')", "def test_srnaseq_bowtie(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"test_srnaseq\"),\n os.path.join(data_dir, \"run_info-srnaseq_bowtie.yaml\")]\n subprocess.check_call(cl)", "def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/collective/demo.plone.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n if env.latest:\n if env.python3:\n sudo('ln -s local_demo_nightly_py3.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_demo_nightly_py2.cfg local.cfg', user=env.deploy_user) # noqa: E501\n else:\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n if env.latest:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/plone/buildout.coredev/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n else:\n sudo('./bin/pip install --no-cache-dir -r https://raw.githubusercontent.com/starzel/buildout/5.2/requirements.txt', user=env.deploy_user) # noqa: E501\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def get_estimator(arguments):\n \n numerical_indices = [1, 2, 4, 5,6,7,8,9,10,11,12,13,14]\n categorical_indices = [0]\n original_indices = list(set(range(59))-set(numerical_indices)-set(categorical_indices))\n \n p1 = make_pipeline(my_module.PositionalSelector(categorical_indices),OneHotEncoder())\n p2 = make_pipeline(my_module.PositionalSelector(numerical_indices),StandardScaler())\n p3 = make_pipeline(my_module.PositionalSelector(original_indices))\n \n feats = FeatureUnion([('categoricals', p1),\n ('numericals', p2),\n ('originals', p3),])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n pipeline = Pipeline([('pre', feats),\n ('estimator', linear_model.LogisticRegression(penalty=\"l2\",\n tol=arguments.tol,\n C = arguments.C,\n solver='lbfgs',\n max_iter=10000))])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n #classifier = linear_model.LogisticRegression(\n # penalty=\"l2\",\n # tol=arguments.tol,\n # C = arguments.C,\n # solver='lbfgs',\n # max_iter=1000\n #)\n \n return pipeline", "def workflow(base_dir, # base tool path\n use_cache=1, # whether to skip already executed runs (in cache) or not (1/0)\n ignore_git=0): # whether to ignore git version or not (1/0)\n\n # get some needed variables from config file\n runs = int(config['general']['runs'])\n workers = int(config['general']['workers'])\n\n batch_size = int(config['mtje']['batch_size'])\n epochs = int(config['mtje']['epochs'])\n use_malicious_labels = int(config['mtje']['use_malicious_labels'])\n use_count_labels = int(config['mtje']['use_count_labels'])\n gen_type = config['mtje']['gen_type']\n similarity_measure = config['mtje']['similarity_measure'].lower()\n net_type = 'mtje'\n\n training_n_samples = int(config['sorel20mDataset']['training_n_samples'])\n validation_n_samples = int(config['sorel20mDataset']['validation_n_samples'])\n test_n_samples = int(config['sorel20mDataset']['test_n_samples'])\n\n min_n_anchor_samples = int(config['freshDataset']['min_n_anchor_samples'])\n max_n_anchor_samples = int(config['freshDataset']['max_n_anchor_samples'])\n fresh_n_queries = int(config['freshDataset']['n_queries'])\n n_evaluations = int(config['freshDataset']['n_evaluations'])\n\n f_c_epochs = int(config['familyClassifier']['epochs'])\n f_c_train_split_proportion = int(config['familyClassifier']['train_split_proportion'])\n f_c_valid_split_proportion = int(config['familyClassifier']['valid_split_proportion'])\n f_c_test_split_proportion = int(config['familyClassifier']['test_split_proportion'])\n f_c_batch_size = int(config['familyClassifier']['batch_size'])\n\n c_l_epochs = int(config['contrastiveLearning']['epochs'])\n c_l_train_split_proportion = int(config['contrastiveLearning']['train_split_proportion'])\n c_l_valid_split_proportion = int(config['contrastiveLearning']['valid_split_proportion'])\n c_l_test_split_proportion = int(config['contrastiveLearning']['test_split_proportion'])\n c_l_batch_size = int(config['contrastiveLearning']['batch_size'])\n c_l_rank_size = int(config['contrastiveLearning']['rank_size'])\n c_l_knn_k_min = int(config['contrastiveLearning']['knn_k_min'])\n c_l_knn_k_max = int(config['contrastiveLearning']['knn_k_max'])\n\n # initialize Hash object\n ch = Hash()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items('sorel20mDataset'))))\n # get config file sha256 digest\n dataset_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the current net type)\n ch.update(json.dumps(dict(config.items(net_type))))\n # get config file sha256 digest\n config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('freshDataset'))))\n # get config file sha256 digest\n fresh_dataset_config_sha = ch.get_b64()\n\n # create copy of the current config hash digest\n ch_copy = ch.copy()\n\n # update hash with the content of the config file (for the freshDataset)\n ch.update(json.dumps(dict(config.items('familyClassifier'))))\n # get config file sha256 digest\n family_class_config_sha = ch.get_b64()\n\n # update hash with the content of the config file (for the freshDataset)\n ch_copy.update(json.dumps(dict(config.items('contrastiveLearning'))))\n # get config file sha256 digest\n contr_learn_config_sha = ch_copy.get_b64()\n\n # instantiate key-n_samples dict\n n_samples_dict = {'train': training_n_samples,\n 'validation': validation_n_samples,\n 'test': test_n_samples}\n\n # Note: The entrypoint names are defined in MLproject. The artifact directories\n # are documented by each step's .py file.\n\n # start mlflow run\n with mlflow.start_run() as active_run:\n # get code git commit version\n git_commit = active_run.data.tags.get(mlflow_tags.MLFLOW_GIT_COMMIT)\n\n # log config file\n mlflow.log_text(json.dumps({s: dict(config.items(s)) for s in config.sections()}), 'config.txt')\n\n # set dataset destination dir\n dataset_dir = os.path.join(base_dir, 'dataset')\n # set dataset base path (directory containing 'meta.db')\n dataset_base_path = os.path.join(dataset_dir, '09-DEC-2020', 'processed-data')\n # set pre-processed dataset base path (directory containing .dat files)\n pre_processed_dataset_dir = os.path.join(dataset_dir, '09-DEC-2020', 'pre-processed_dataset')\n # set fresh dataset base path (directory containing .dat files)\n fresh_dataset_dir = os.path.join(dataset_dir, 'fresh_dataset')\n\n # if pre-processed dataset files for this run parameters are not present, generate them\n if not preproc_check_files(destination_dir=pre_processed_dataset_dir,\n n_samples_dict=n_samples_dict):\n logger.info(\"Pre-processed dataset not found.\")\n\n # if the original Sorel20M dataset is not present, download it\n if not download_check_files(dataset_dir):\n logger.info(\"Dataset not found.\")\n\n # run dataset downloader\n download_dataset_run = run(\"download_dataset\", {\n 'destination_dir': dataset_dir\n }, config_sha=dataset_config_sha)\n\n # pre-process dataset\n preprocess_dataset_run = run(\"preprocess_dataset\", {\n 'ds_path': dataset_base_path,\n 'destination_dir': pre_processed_dataset_dir,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'test_n_samples': test_n_samples,\n 'batch_size': batch_size,\n 'remove_missing_features': str(os.path.join(dataset_base_path, \"shas_missing_ember_features.json\"))\n }, config_sha=dataset_config_sha)\n\n # if the fresh dataset is not present, generate it\n if not fresh_check_files(fresh_dataset_dir):\n logger.info(\"Fresh dataset not found.\")\n\n # generate fresh dataset\n build_fresh_dataset_run = run(\"build_fresh_dataset\", {\n 'dataset_dest_dir': fresh_dataset_dir\n }, config_sha=fresh_dataset_config_sha)\n\n # initialize results files dicts\n results_files = {}\n c_l_results_files = {}\n\n # instantiate common (between consecutive training runs) training parameters\n common_training_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'epochs': epochs,\n 'training_n_samples': training_n_samples,\n 'validation_n_samples': validation_n_samples,\n 'use_malicious_labels': use_malicious_labels,\n 'use_count_labels': use_count_labels,\n 'workers': workers\n }\n\n # instantiate common (between consecutive training runs) evaluation parameters\n common_evaluation_params = {\n 'ds_path': pre_processed_dataset_dir,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'gen_type': gen_type,\n 'batch_size': batch_size,\n 'test_n_samples': test_n_samples,\n 'evaluate_malware': use_malicious_labels,\n 'evaluate_count': use_count_labels\n }\n\n # for each training run\n for training_run_id in range(runs):\n logger.info(\"initiating training run n. {}\".format(str(training_run_id)))\n\n # -- Model Training and Evaluation Steps -------------------------------------------------------------------\n # set training parameters\n training_params = common_training_params\n training_params.update({'training_run': training_run_id})\n\n # train network (get or run) on Sorel20M dataset\n training_run = get_or_run(\"train_network\",\n training_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n resume=True,\n config_sha=config_sha)\n\n # get model checkpoints path\n checkpoint_path = parse.unquote(parse.urlparse(os.path.join(training_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n checkpoint_file = os.path.join(checkpoint_path, \"epoch_{}.pt\".format(epochs))\n\n # set evaluation parameters\n evaluation_params = common_evaluation_params\n evaluation_params.update({'checkpoint_file': checkpoint_file})\n\n # evaluate model against Sorel20M dataset\n evaluation_run = get_or_run(\"evaluate_network\",\n evaluation_params,\n git_commit,\n ignore_git=bool(ignore_git),\n use_cache=bool(use_cache),\n config_sha=config_sha)\n\n # get model evaluation results path\n results_path = parse.unquote(parse.urlparse(os.path.join(evaluation_run.info.artifact_uri,\n \"model_results\")).path)\n\n # set model evaluation results filename\n results_file = os.path.join(results_path, \"results.csv\")\n\n # add file path to results_files dictionary (used for plotting mean results)\n results_files[\"run_id_\" + str(training_run_id)] = results_file\n\n # compute (and plot) all tagging results\n all_tagging_results_run = get_or_run(\"compute_all_run_results\", {\n 'results_file': results_file,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Model Evaluation using Fresh Dataset Steps ------------------------------------------------------------\n # evaluate model against fresh dataset\n fresh_evaluation_run = get_or_run(\"evaluate_fresh\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'net_type': net_type if similarity_measure == 'dot' else net_type + '_{}'.format(similarity_measure),\n 'min_n_anchor_samples': min_n_anchor_samples,\n 'max_n_anchor_samples': max_n_anchor_samples,\n 'n_query_samples': fresh_n_queries,\n 'n_evaluations': n_evaluations\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n\n # get model evaluation results path\n fresh_results_path = parse.unquote(parse.urlparse(os.path.join(fresh_evaluation_run.info.artifact_uri,\n \"fresh_prediction_results\")).path)\n\n # set model evaluation results filename\n fresh_results_file = os.path.join(fresh_results_path, \"fresh_prediction_results.json\")\n\n # compute (and plot) all family prediction results (on fresh dataset)\n all_tagging_results_run = get_or_run(\"compute_all_run_fresh_results\", {\n 'results_file': fresh_results_file\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=fresh_dataset_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Family Classifier Steps -------------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n f_c_train_run = get_or_run(\"train_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': f_c_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model checkpoints path\n f_c_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(f_c_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n f_c_checkpoint_file = os.path.join(f_c_checkpoint_path, \"epoch_{}.pt\".format(f_c_epochs))\n\n # evaluate model against fresh dataset\n f_c_eval_run = get_or_run(\"evaluate_family_classifier\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': f_c_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': f_c_train_split_proportion,\n 'valid_split_proportion': f_c_valid_split_proportion,\n 'test_split_proportion': f_c_test_split_proportion,\n 'batch_size': f_c_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n\n # get model evaluation results path\n f_c_results_path = parse.unquote(parse.urlparse(os.path.join(f_c_eval_run.info.artifact_uri,\n \"family_class_results\")).path)\n\n # set model evaluation results filename\n f_c_results_file = os.path.join(f_c_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n f_c_compute_results_run = get_or_run(\"compute_all_family_class_results\", {\n 'results_file': f_c_results_file,\n 'fresh_ds_path': fresh_dataset_dir\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=family_class_config_sha)\n # ----------------------------------------------------------------------------------------------------------\n\n # -- Contrastive Learning Steps ----------------------------------------------------------------------------\n # create family classifier from previously trained network and train it on fresh dataset\n c_l_train_run = get_or_run(\"train_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': checkpoint_file,\n 'epochs': c_l_epochs,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model checkpoints path\n c_l_checkpoint_path = parse.unquote(parse.urlparse(os.path.join(c_l_train_run.info.artifact_uri,\n \"model_checkpoints\")).path)\n\n # set model checkpoint filename\n c_l_checkpoint_file = os.path.join(c_l_checkpoint_path, \"epoch_{}.pt\".format(c_l_epochs))\n\n # evaluate model against fresh dataset\n c_l_eval_run = get_or_run(\"evaluate_contrastive_model\", {\n 'fresh_ds_path': fresh_dataset_dir,\n 'checkpoint_path': c_l_checkpoint_file,\n 'training_run': training_run_id,\n 'train_split_proportion': c_l_train_split_proportion,\n 'valid_split_proportion': c_l_valid_split_proportion,\n 'test_split_proportion': c_l_test_split_proportion,\n 'batch_size': c_l_batch_size,\n 'rank_size': c_l_rank_size,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_results_path = parse.unquote(parse.urlparse(os.path.join(c_l_eval_run.info.artifact_uri,\n \"contrastive_learning_results\")).path)\n\n # set model evaluation results filename\n c_l_results_file = os.path.join(c_l_results_path, \"results.csv\")\n\n # compute (and plot) all tagging results\n c_l_compute_results_run = get_or_run(\"compute_contrastive_learning_results\", {\n 'results_file': c_l_results_file,\n 'fresh_ds_path': fresh_dataset_dir,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # get model evaluation results path\n c_l_scores_dir_path = parse.unquote(parse.urlparse(os.path.join(c_l_compute_results_run.info.artifact_uri,\n \"contrastive_learning_scores\")).path)\n\n # add dir path to c_l_results_files dictionary (used for plotting mean score trends)\n c_l_results_files[\"run_id_\" + str(training_run_id)] = c_l_scores_dir_path\n # ----------------------------------------------------------------------------------------------------------\n\n # create temp dir name using the value from config_sha (sha of some parts of the config file).\n # -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n tempdir = os.path.join(base_dir, 'tmp_{}'.format(config_sha))\n # create temp dir\n os.makedirs(tempdir, exist_ok=True)\n\n # create contrastive learning temp dir name using the value from config_sha (sha of some parts of the config\n # file). -> This is done in order to have a different (but predictable) run_to_filename at each set of runs with\n # different parameters. This allows mlflow to know when it is needed to run 'per_tag_plot_runs'. If, on the\n # other hand a simple tempfile.TemporaryDirectory() was used then mlflow would run 'per_tag_plot_runs' every\n # time, even if a precedent run was available (because the parameter 'run_to_filename_json' would be different)\n c_l_tempdir = os.path.join(base_dir, 'tmp_{}'.format(contr_learn_config_sha))\n # create temp dir\n os.makedirs(c_l_tempdir, exist_ok=True)\n\n # set run-to-filename file path\n run_to_filename = os.path.join(tempdir, \"results.json\")\n\n # create and open the results.json file in write mode\n with open(run_to_filename, \"w\") as output_file:\n # save results_files dictionary as a json file\n json.dump(results_files, output_file)\n\n mlflow.log_artifact(run_to_filename, \"run_to_filename\")\n\n # set run-to-filename file path\n c_l_run_to_filename = os.path.join(c_l_tempdir, \"c_l_results.json\")\n\n # create and open the c_l_results.json file in write mode\n with open(c_l_run_to_filename, \"w\") as output_file:\n # save c_l_results_files dictionary as a json file\n json.dump(c_l_results_files, output_file)\n\n mlflow.log_artifact(c_l_run_to_filename, \"run_to_filename\")\n\n # if there is more than 1 run, compute also per-tag mean results\n if runs > 1:\n # plot all roc distributions\n per_tag_plot_runs = get_or_run(\"plot_all_roc_distributions\", {\n 'run_to_filename_json': run_to_filename,\n 'use_malicious_labels': use_malicious_labels,\n 'use_tag_labels': 1\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=config_sha)\n\n # plot all model mean scores trends\n plot_all_scores_trends = get_or_run(\"plot_all_contrastive_scores_trends\", {\n 'run_to_filename_json': c_l_run_to_filename,\n 'knn_k_min': c_l_knn_k_min,\n 'knn_k_max': c_l_knn_k_max\n }, git_commit, ignore_git=bool(ignore_git), use_cache=bool(use_cache), config_sha=contr_learn_config_sha)\n\n # remove temp files and temporary directory\n os.remove(run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(tempdir)\n\n # remove contrastive learning temp files and temporary directory\n os.remove(c_l_run_to_filename)\n # os.remove(fresh_run_to_filename)\n os.rmdir(c_l_tempdir)", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def setup():\n\n with cd(env.homedir):\n\n # clone repository from github\n sudo('git clone https://github.com/starzel/demo.starzel.de.git', user=env.deploy_user) # noqa: E501\n\n with cd(env.directory):\n\n # requirements\n # sudo('python python-dev build-essential zlib1g-dev libssl-dev libxml2-dev libxslt1-dev wv poppler-utils libtiff5-dev libjpeg62-dev zlib1g-dev libfreetype6-dev liblcms1-dev libwebp-dev') # noqa: E501\n\n # prepare buildout\n sudo('ln -s local_production.cfg local.cfg', user=env.deploy_user)\n sudo('echo -e \"[buildout]\\nlogin = admin\\npassword = admin\" > secret.cfg', user=env.deploy_user) # noqa: E501\n\n # bootstrap and run bildout once\n sudo('./bin/pip install -r requirements.txt', user=env.deploy_user)\n sudo('./bin/buildout', user=env.deploy_user)\n\n # start supervisor which starts plone instance also\n sudo('./bin/supervisord', user=env.deploy_user)", "def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return" ]
[ "0.57629657", "0.56087464", "0.54360694", "0.54355013", "0.5393979", "0.53826815", "0.5367081", "0.5317132", "0.52905655", "0.5287661", "0.52496874", "0.5199404", "0.51819116", "0.51798064", "0.51774645", "0.5171329", "0.50645137", "0.50610745", "0.50353134", "0.5028036", "0.50058246", "0.49911824", "0.4987422", "0.49488166", "0.49283755", "0.49149662", "0.49086893", "0.48963335", "0.48933706", "0.48926333" ]
0.6405816
0
Returns dictionary with strand orientation as values and geneIDs as Keys/
def gather_strand_by_geneID_dict(genome_gtf): strand_by_geneID_dict = {} with open(genome_gtf) as f: for line in f: current_line = line.split('\t') if current_line[2] == "CDS": current_orf = current_line[8].split(';')[2].split()[1].strip('\"') current_strand = current_line[6] strand_by_geneID_dict[current_orf] = current_strand return strand_by_geneID_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_gene_map(self) -> OrderedDict:\n if \"gene\" not in self.data:\n return OrderedDict()\n\n genes: OrderedDict = OrderedDict()\n for idx, genestr in self.data[\"gene\"].items():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(\",\"):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes", "def organize_by_chromosome(genes, transcripts):\n gene_dict = {}\n transcript_dict = {}\n\n for ID in genes:\n gene = genes[ID]\n chromosome = gene.chromosome\n if chromosome not in gene_dict:\n chrom_genes = {}\n chrom_genes[ID] = gene\n gene_dict[chromosome] = chrom_genes\n gene_dict[chromosome][ID] = gene\n\n for ID in transcripts:\n transcript = transcripts[ID]\n chromosome = transcript.chromosome\n if chromosome not in transcript_dict:\n chrom_transcripts = {}\n chrom_transcripts[ID] = transcript\n transcript_dict[chromosome] = chrom_transcripts\n transcript_dict[chromosome][ID] = transcript\n transcript_dict[chromosome][ID] = transcript\n\n return gene_dict, transcript_dict", "def _load_orgs_and_genes(self):\n organisms = {}\n genes = {}\n for gene in self.gene_ids:\n org_file_path = self._get_organisms_file_path(gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])\n with open(org_file_path, \"r\") as orgs:\n org = orgs.read().splitlines()\n genes[gene[self.GENE_NAME_IDX]] = {}\n # we only care about unique organisms\n for o in org:\n if not o.startswith(\">\"):\n continue\n clean_o = o.replace(\">\", \"\", 1).replace(\"_\", \" \").title()\n # I hate to do this but there's a special case for Canis Familiaris\n # EBI does not recognize it but it does recognize Canis Lupus (Canis Lupus Familiaris)\n if \"Canis Familiaris\" in clean_o:\n clean_o = \"Canis lupus\"\n if not organisms.get(clean_o):\n organisms[clean_o] = {self.FREQ_KEY: 1, self.GENE_IDS_KEY: [gene]}\n else:\n organisms[clean_o][self.FREQ_KEY] = organisms[clean_o][self.FREQ_KEY] + 1\n organisms[clean_o][self.GENE_IDS_KEY].append(gene)\n genes[gene[self.GENE_NAME_IDX]][clean_o] = 1\n return organisms, genes", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def init_gene():\n gene_details=dict(\n id = '', \n anno_id = [],\n confgenes_id = [],\n name = '',\n source = '',\n gene_info = {},\n alias = '',\n name2 = [],\n strand = '',\n chr = '',\n chr_num = [],\n paralogs = [],\n start = '',\n stop = '',\n transcripts = [],\n transcript_info = [],\n transcript_status = [],\n transcript_valid = [],\n exons = [],\n exons_confirmed = [],\n cds_exons = [],\n utr5_exons = [],\n utr3_exons = [],\n tis = [],\n tis_conf = [],\n tis_info = [],\n cdsStop = [],\n cdsStop_conf = [],\n cdsStop_info = [],\n tss = [],\n tss_info = [],\n tss_conf = [],\n cleave = [],\n cleave_info = [],\n cleave_conf = [],\n polya = [],\n polya_info = [],\n polya_conf = [],\n is_alt = [],\n is_alt_spliced = 0,\n is_valid = [],\n transcript_complete = [],\n is_complete = [],\n is_correctly_gff3_referenced = '',\n splicegraph = []\n )\n return gene_details", "def genomic_tx_data():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(2053, 2188),\n alt_pos_range=(140439611, 140439746),\n alt_aln_method=\"splign\",\n tx_exon_id=780496,\n alt_exon_id=1927265,\n pos_change=(92, 43),\n alt_pos_change_range=(140439703, 140439703),\n tx_ac=\"NM_004333.4\",\n alt_ac=\"NC_000007.13\"\n )", "def create_species_encode():\n\tdata = pd.read_csv(\"../train.csv\")\n\tspecies = sorted(data.species.unique())\n\tspecies_dict = {species: index for index, species in enumerate(species)}\n\treturn species_dict", "def inizializzazione(fileInput, geneNames):\n\t\n\tdictTranscript \t= {}\n\tdictGenes \t\t= {}\n\tdictEsoni \t\t= {}\n\tdictIntroni \t= {}\n\tdictGeneChr \t= {}\n\n\t# - Filtraggio file di annotazione in input per 'exon' e per nome gene\n\t# - Calcolo delle coordinate dei geni nei cromosomi\n\t#\n\tlines, dictGeneChr = filtraFileDiAnn(fileInput, geneNames)\n\t\n\t\n\t# Indici all'interno del dizionario degli esoni\n\t#\n\tidx_starts \t= 0\n\tidx_ends \t= 1\n\tidx_strand \t= 2\n\t\n\t# Indici all'interno del dizionario dei Geni\n\t#\n\tidx_transcripts = 2\n\n\n\t# Creazione dei dizionari utili alla risoluzione del problema B\n\t#\n\tfor riga in lines:\n\t\tcromosoma \t\t= riga[0]\n\t\tstart_esone \t= riga[3]\n\t\tend_esone \t\t= riga[4]\n\t\tstrand \t\t\t= riga[6]\n\t\tgeneName \t\t= riga[11]\n\t\ttranscriptName \t= riga[12]\n\t\t\n\t\tTranscriptID \t= riga[9]\n\t\tGeneID \t\t\t= riga[8]\n\t\n\t\t# Creazione del dizionario dei transcritti\n\t\t#\n\t\tdictTranscript[TranscriptID] = [transcriptName, GeneID]\n\t\t\n\t\t# Creazione del dizionario dei geni\n\t\t#\n\t\tif not dictGenes.has_key(GeneID):\t\t\t\t\t\t\t\t\t\t# Se il GeneID non e' presente..\n\t\t\tdictGenes[GeneID] = [geneName, cromosoma, [TranscriptID]]\t\t\t# ..nel dizionario (come key)\n\t\telif TranscriptID not in dictGenes[GeneID][idx_transcripts]:\t\t\t# Se il GeneID e' presente ma non lo e'..\n\t\t\tdictGenes[GeneID][idx_transcripts].append(TranscriptID)\t\t\t\t# ..il TranscriptID questo si aggiunge alla lista\n\t\t\n\t\t# Creazione del dizionario degli esoni\n\t\t#\n\t\tif not dictEsoni.has_key(TranscriptID):\t\t\t\t\t\t \t# Se il TranscriptID non e' presente.. \n\t\t\tdictEsoni[TranscriptID] = [[start_esone],[end_esone],strand] \t# ..nel dizionario (come key)\n\t\telse:\n\t\t\tdictEsoni[TranscriptID][idx_starts].append(start_esone)\t\t\t \t# Il TranscriptID e' gia' presente quindi..\n\t\t\tdictEsoni[TranscriptID][idx_ends].append(end_esone)\t\t\t \t# ..si aggiunge l'esone alla lista degli esoni\n\t\t\t\n\t\t\t\n\t# Creazione del dizionario degli introni\n\t#\n\tfor TranscriptID in dictEsoni:\n\t\tesoniPerTranscript = len(dictEsoni[TranscriptID][idx_starts])\t \t# Si valuta il nr di esoni per TranscriptID corrente\n\t\t\n\t\tif int(esoniPerTranscript) > 1:\n\t\t\tstart_introni \t= []\t\t\t\t\t\t\t\t\t\t\t # Si preparano le variabili necessarie\n\t\t\tend_introni \t= []\n\t\t\t\n\t\t\tstart_esoni \t= []\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tend_esoni \t\t= []\n\t\t\t\n\t\t\t# Si considera lo strand relativo al TranscriptID\n\t\t\t#\n\t\t\tif dictEsoni[TranscriptID][idx_strand] == '+':\t\t\t\t\t \t# Strand positivo -> esoni scritti in ordine crescente\n\t\t\t\tstrand = True\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts]\n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends]\n\t\t\t\t\n\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t# Strand negativo -> esoni scritti in ordine inverso..\n\t\t\t\tstrand = False\t\t\t\t\t\t\t\t\t\t\t\t \t# ..e per comodita' sono invertiti in ordine crescente\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts][::-1] \t \n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends][::-1]\n\n\t\t\t# Calcolo delle regioni introniche\n\t\t\t#\n\t\t\ti = 0\n\t\t\twhile i < int(esoniPerTranscript) - 1:\t\t\t\t\t\t\t \t# Per ogni coppia di esoni\n\t\t\t\tif (int(start_esoni[i+1]) - int(end_esoni[i])) > 2:\t\t\t \t# Se la regione tra due esoni consecutivi e' > 2..\n\t\t\t\t\tstart_introni.append(int(end_esoni[i]) + 1)\t\t\t \t# ..(considerando che gli estremi dell'introne sono..\n\t\t\t\t\tend_introni.append(int(start_esoni[i+1]) - 1)\t\t \t \t#..interni a quelli dei due esoni consecutivi correnti)\n\t\t\t\ti += 1\n\t\t\t\n\t\t\tif not strand:\t\t\t\t\t\t\t\t\t\t\t\t \t# Si mantiene traccia del fatto che derivano da un..\n\t\t\t\tstart_introni.reverse()\t\t\t\t\t\t\t\t\t \t# ..TranscriptID con strand negativo..\n\t\t\t\tend_introni.reverse()\t\t\t\t\t\t\t\t\t\t\t# ..(si inverte l'ordine degli introni)\n\t\t\n\t\t\tdictIntroni[TranscriptID] = [start_introni, end_introni]\n\n\n\t# Si eliminano i geni che non presentano regioni introniche:\n\t# \t- dalla lista di tutti i geni si rimuovono quelli che hanno introni;\n\t#\t- dal dizionario si rimuovono quelli rimasti nella lista.\n\t#\n\ttuttiIGeni = geneNames.keys()\n\tfor TranscriptID in dictIntroni:\n\t\tgeneID = dictTranscript[TranscriptID][1]\n\t\tnomeGene = dictGenes[geneID][0]\n\t\t\n\t\tif nomeGene in tuttiIGeni:\n\t\t\ttuttiIGeni.remove(nomeGene)\n\n\n\tfor nomeGene in tuttiIGeni:\n\t\tdel geneNames[nomeGene]\n\t\tprint 'Il gene %s non presenta regioni introniche.' % nomeGene\n\n\n\treturn [dictTranscript, dictGenes, dictEsoni, dictIntroni, dictGeneChr]", "def get_gene_transcript_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col).reset_index()\n r = {}\n for gene_id, s in df.groupby('GeneId'):\n r[gene_id] = s.TranscriptId.tolist()\n return r", "def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict", "def get_transcript_gene_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.GeneId)))", "def gene_ID_wrangler(inpAcc):\n \n print(\"processing gene symbols\")\n \n resD = {}\n \n for convI in inpAcc:\n keyI = convI[\"InputValue\"]\n valueI = convI[\"Gene ID\"]\n resD[keyI] = valueI\n\n return resD", "def gencode_dic(gencode_file,gene_type_dic):\n gen_dic = {}\n for i in range(1,len(gencode_file)):\n words_gen = gencode_file[i].strip().split('\\t')\n chr_no = words_gen[2]\n trans_id = words_gen[1]\n cds_info = words_gen[13]\n cde_info = words_gen[14]\n gene_type = gene_type_dic[trans_id]\n gene_name = words_gen[12]\n TSS_start = int(words_gen[4])\n TSS_end = int(words_gen[5])\n CDS_start = int(words_gen[6])\n CDS_end = int(words_gen[7])\n strand = words_gen[3]\n start_list = [int(x) for x in words_gen[9].split(',')[:-1]]\n end_list = [int(x) for x in words_gen[10].split(',')[:-1]]\n exon_no = int(words_gen[8])\n# if (chr_no,trans_id) in gen_dic: #Some trans_id are not unique, especially transcripts in chrX and chrY\n# print trans_id\n interval_list = [P.closedopen(start_list[x],end_list[x]) for x in range(0,exon_no)]\n interval_merge = P.empty()\n for i in range(0,len(interval_list)):\n interval_merge = interval_merge | interval_list[i]\n if gene_type == 'protein_coding':\n if (cds_info == 'cmpl') and (cde_info == 'cmpl'):\n # print (interval_merge)\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n else:\n gen_dic.setdefault((chr_no,strand),[]).append([TSS_start,TSS_end,CDS_start,CDS_end,\\\n gene_name,gene_type,interval_merge])\n return gen_dic", "def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier", "def genorates_to_dict(store: GenoRates) -> GenoDistribSerialisable:\n to_return = dict()\n num_alleles = store.shape[0]\n\n for gene in range(num_alleles):\n for allele in range(3):\n to_return[(geno_to_str(gene, allele))] = store[gene][allele]\n\n return to_return", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def get_genes_organisms(self):\n path = os.path.join(self.parent_path, \"genes_organisms.txt\")\n with open(path, \"w\") as f:\n f.write(\"Gene,Organisms\\n\")\n for gene in self.genes.keys():\n f.write(\"{},{}\".format(gene, \"/\".join(self.genes.get(gene).keys()) + \"\\n\"))", "def create_gene_dict(self, variants):\n \n # organise the variants into entries for each gene\n genes = {}\n for var in variants:\n # variants (particularly CNVs) can span multiple genes, so we need\n # to check each gene separately, and then collapse duplicates later\n for gene_list in var.get_genes():\n for gene in gene_list:\n if gene not in genes:\n genes[gene] = []\n # add the variant to the gene entry\n genes[gene].append(var)\n \n return genes", "def produce_geneName_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if otherIDtype == 'Gene_Name':\n if uniprotID in swissProtIDs:\n idMap[uniprotID] = otherID.upper()\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def get_snps(self):\n d = {}\n with open(self.snp_file, 'r') as infile:\n for row in infile:\n if row:\n row_split = row.strip().split('\\t')\n chrom = row_split[0]\n pos = row_split[1]\n name = row_split[3].split('|')\n snp_id = name[0]\n gene = name[1]\n ref_allele = name[2]\n alt_alleles = name[3]\n freq = name[4]\n genome = name[5]\n d[snp_id] = {\n 'chrom': chrom,\n 'pos': pos,\n 'ref': ref_allele,\n 'alt': alt_alleles,\n 'gene': gene,\n 'maf': freq,\n 'genome_build': genome\n }\n return d", "def update_strandinfo(self):\n params = ['x','y','rho','theta','spiral','inward','outward']\n infos = {'min':np.min,\n 'max':np.max,\n 'count':lambda x:len(set(x))}\n\n self.strands = {}\n\n for f in ['pwm','channel']:\n self.strands[f] = [ s[f] for s in self.strands_config]\n\n for f in params:\n if f in self.strands_config[0]:\n self.strands[f] = np.array([ s[f] for s in self.strands_config],dtype=np.int16)\n\n for f in ['intensity','last_intensity']:\n self.strands[f] = np.zeros_like(self.strands['x'],dtype=np.int16)\n\n self.strandinfo = { param: { info : None for info in infos} for param in params }\n for p in params:\n for ik,iv in infos.items():\n self.strandinfo[p][ik] = iv(self.strands[p])\n\n print('self.strands:', self.strands)\n print('strandinfo:',self.strandinfo)", "def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data", "def get_gene_biotype_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.GeneId, df.GeneBiotype)))", "def get_keys():\n SCALE_DICT = {\n 'major': [2,2,1,2,2,2,1],\n 'minor':[2,1,2,2,1,2,2],\n 'chrom':[1,1,1,1,1,1,1,1,1,1,1,1],\n 'ionanian':[2,2,1,2,2,2,1],\n 'dorian':[2,1,2,2,2,1,2],\n 'phrygian':[1,2,2,2,1,2,2],\n 'lydian':[2,2,2,1,2,2,1],\n 'mixolydian':[2,2,1,2,2,1,2],\n 'aeolian':[2,1,2,2,1,2,2],\n 'locrian':[1,2,2,1,2,2,2],\n 'minor_pent':[3,2,2,3,2],\n 'major_pent':[2,2,3,2,3],\n 'pent_6':[2,2,3,1,3],\n 'pent_2':[1,3,3,2,3],\n 'pent_3':[2,1,4,2,3],\n 'pent_5':[2,2,2,3,3],\n 'mixo_pent':[2,2,3,3,2],\n 'phryg_pent':[1,2,3,1,3],\n 'dim_pent':[2,1,3,1,3],\n 'blues':[3,2,1,1,3,2],\n 'harmonic_minor':[2,1,2,2,1,3,2],\n 'melodic_mimnor':[2,1,2,2,1,3,2],\n 'whole_tone':[2,2,2,2,2,2],\n 'whole_half':[2,1,2,1,2,1,2,1],\n 'half_whole':[1,2,1,2,1,2,1,2],\n 'lydian_flat7':[2,2,2,1,2,1,2]\n }\n\n return SCALE_DICT", "def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict", "def genome_index_to_dict(self, index):\n chrom_pos = self.chrom_and_pos(index)\n return {'Chromosome': chrom_pos[0], 'Position': chrom_pos[1]}", "def read_cDNA_file_to_dict(filename):\n \n #initialize dictionary\n cDNA_dictionary = {}\n\n #open file\n with open(cDNA_file) as f:\n \n #loop through file line by line\n for line in f:\n\n #remove newline\n line = line.rstrip()\n \n #get gene name\n if line.startswith(\">\"):#If the line starts with the character \">\" then,\n gene_name = line.split(\"|\")[1]#I separate the line by the character \"|\" and assign index 1 to gene_name\n \n #read in sequence in uppercase\n if not line.startswith(\">\"):#If the line does not start with the character \">\" then,\n line = line.upper()#I make all of the characters within the line uppercase\n\n #put name and sequence in dictionary\n cDNA_dictionary[gene_name] = line#I assign the gene_name as the key and the line (sequence) as the value\n\n #return dictionary \n return cDNA_dictionary", "def produce_isoform_geneName_dict(geneMapFile, isoformFile, outPath):\n isoformData = pd.read_table(isoformFile, sep=\"\\t\")\n with open(geneMapFile, 'rb') as f:\n geneMap = pickle.load(f)\n isoformGeneMap = {}\n isoformData[\"refID\"] = isoformData[\"Isoform\"].apply(lambda x: x if x.find('-') == -1 else x[:x.find('-')])\n for _, row in isoformData.iterrows():\n if row.refID in geneMap:\n isoformGeneMap[row.Isoform] = geneMap[row.refID]\n with open(outPath, 'wb') as fOut:\n pickle.dump(isoformGeneMap, fOut)", "def get_indices_convert_dict(fn):\n pdb_inp = pdb.input(file_name=fn)\n pdb_hierarchy = pdb_inp.construct_hierarchy()\n \n newids = OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_hierarchy.atoms()))\n oldids= OrderedDict((atom.id_str(), idx) for (idx, atom) in enumerate(pdb_inp.atoms()))\n \n return {'p2a': np.array([newids[atom.id_str()] for atom in pdb_inp.atoms()]),\n 'a2p': np.array([oldids[atom.id_str()] for atom in pdb_hierarchy.atoms()])}" ]
[ "0.6775984", "0.63313013", "0.6185268", "0.6166058", "0.61002773", "0.5993038", "0.5952857", "0.5945249", "0.5915649", "0.59024686", "0.5877856", "0.5875492", "0.5870065", "0.5817717", "0.57851946", "0.57501155", "0.57454574", "0.5728968", "0.57258415", "0.56919414", "0.5636224", "0.5629619", "0.5620638", "0.5593645", "0.5579922", "0.5567293", "0.5565742", "0.5550464", "0.5533519", "0.55269367" ]
0.6580928
1
Determine relevant entries in crkeng.xml and build a smaller xml file for testing.
def build_test_xml(): crkeng_file_path = find_latest_xml_file(shared_res_dir / "dictionaries") print(f"Building test dictionary files using {crkeng_file_path.name}") crkeng_root = ET.parse(str(crkeng_file_path)).getroot() # relevant entries in crkeng.xml file we want to determine relevant_xml_ls: Set[str] = set() xml_ls: Set[str] = set() crkeng_entries = crkeng_root.findall(".//e") for element in crkeng_entries: xml_l = extract_l_str(element) xml_ls.add(xml_l) test_words = get_test_words() print(f"Analyzing xml l elements and test words") word_to_analyses = morphodict.analysis.relaxed_analyzer().bulk_lookup( xml_ls | test_words ) print("Analysis done") test_word_lemmas: Set[str] = set() for test_word in test_words: for analysis in word_to_analyses[test_word]: lemma = fst_analysis_parser.extract_lemma(analysis) if lemma is None: logger.warn( "Skipping test word: %s. " "Could not extract lemma from its analysis: %s", test_word, analysis, ) continue test_word_lemmas.add(lemma) for xml_l in tqdm(xml_ls, desc="screening relevant entries in crkeng.xml"): if xml_l in test_words: relevant_xml_ls.add(xml_l) continue for xml_l_analysis in word_to_analyses[xml_l]: xml_lemma = partition_analysis(xml_l_analysis)[1] for test_word_lemma in test_word_lemmas: if test_word_lemma == xml_lemma: relevant_xml_ls.add(xml_l) break relevant_crkeng_entries = [] for element in crkeng_entries: xml_l = extract_l_str(element) if xml_l in relevant_xml_ls: relevant_crkeng_entries.append(element) crkeng_xml_utils.write_xml_from_elements( list(crkeng_root.findall(".//source")) + relevant_crkeng_entries, shared_res_dir / "test_dictionaries" / "crkeng.xml", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()", "def makexmlfunc(healpix,ra,dec,week1,week2,distance):\n\t\n\tif week1!=week2:\n\t\tidentity=\"%06d_%d_%d_w%03d_w%03d\" %(healpix,ra,dec,week1,week2)\n\t\tltcube=\"%s/lat_ltcube_weekly_w%03d_w%03d_p203_v001.fits\" %(cfg.home,week1,week2)\n\t\tspacecraft=\"%s/w%03d_w%03d_newspacecraft.fits\" %(cfg.ispace,week1,week2)\n\telse:\n\t\tidentity=\"%06d_%d_%d_w%03d\" %(healpix,ra,dec,week1)\n\t\tltcube=\"%s/lat_spacecraft_weekly_w%03d_p203_v001_ltcube.fits\" %(cfg.home,week1)\n\t\tspacecraft=\"%s/lat_spacecraft_weekly_w%03d_p202_v001.fits \" %(cfg.ispace,week1)\n\n\tregion_filtered=\"%s_region_filtered_gti.fits\" %(identity)\n\tfermisources=\"%s_fermisources_model.xml\" %(identity)\n\tinputmodel=\"%s_input_model.xml\" %(identity)\n\tfermis=\"%s_fermis.xml\" %identity\n\tresponse=\"P7REP_SOURCE_V15\"\n\tmakexmllog=\"%s_output_makexml.log\" %identity\n\tglobal extendedsource\n\tglobal numberofextendedsources\n\textendedlog=\"%s_number_of_extendedsources.log\" %identity\n\tExtendedList=\"ExtendedList.txt\"\n\tOthersList=\"OthersList.txt\"\n\n\t\n\twith open (makexmllog,'r') as outputFile: #opens the makexmllog file from makesyfunc. This document contains info about the extended sources.\n\t\t\n\t\tfor line in outputFile:\n\t\t\t\n\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\tif line.startswith('Added')==True:\n\t\t\t\t\ta,b=line.split('and ')\t\n\t\t\t\t\tb1,b2,b3=b.split(' ')\n\t\t\t\t\n\t\t\t\t\tnumberofextendedsources=int(b1) #b1 is the number of extended sources\n\toutputFile.close()\n\toutputFile=open(inputmodel, 'w')\n\tprint numberofextendedsources\n\n\tif numberofextendedsources==1: #if there is an extended source\n\t\twith open (makexmllog,'r') as outputFile:\n\t\t\n\t\t\tfor line in outputFile:\n\t\t\t\n\t\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\t\tif line.startswith('Extended')==True:\n\t\t\t\t\t\tprint line\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tc,d=line.split(' in')\n\t\t\t\t\t\n\t\t\t\t\t\tc1,c2,c3,c4=c.split(' ')\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\textendedsource=str(c3) #extracts the name of the extended source from makexmllog\n\t\n\n\t\t\n\n\n\t\toutputFile.close()\t\n\n\n\t\n\n\t\twith open(\"%s\" %fermisources) as thefile: #opens the xml file that was created from makesyfunc\n\t\t\tfor line in thefile:\n\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==True:\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspecial=str.replace(line,'%s.fits'%extendedsource,'%s/%s.fits' %(cfg.homesy,extendedsource)) \n\t\t\t\t\tprint special #replace with the correct path to the extendedsource(Templates folder)\n\t\t\t\n\t\t\t\t\tspecial1=str.replace(special,'type=\"SpatialMap\"','type=\"SpatialMap\" map_based_integral=\"true\"')\n\t\t\t\t\tprint special1 #instruction from fermi tutorial, you must add map_based...\n\t\t\t\t\toutputFile=open(fermis, 'w') #write to fermis, the original xml with the right path to the extended source\n\t\t\t\t\twith open(\"%s\" %fermisources,'r') as infile:\n\t\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==False:\n\t\t\t\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputFile.write(special1)\n\t\t\t\t\toutputFile.close()\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\n\t\toutputFile=open(inputmodel, 'w') #final xml file. contains the right path and the source info of \"your\" source.\n\t\twith open(fermis,'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\t\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\t\t\t\n\n\t\toutputFile.close()\n\t\n\t\twith open(\"%s_diffrsp.log\" % (identity), 'w') as outsyputFile: #run diffrsp if you have an extended source.\n\t\t\tsubprocess.call(['%s' %(cfg.pythoncommand),'gtdiffrsp.py', '%s' %(region_filtered),'%s' %(spacecraft), '%s' %inputmodel, '%s' %(response),'%s' %identity ],stdout=outsyputFile)\n\t\t\t\n\t\twith open(ExtendedList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\t\t\t\t\n\tif numberofextendedsources==0: #if there is no extended source\n\t\toutputFile=open('%s' %(inputmodel), 'w') #write to inputmodel, \"your\" source\n\t\twith open('%s' %(fermisources),'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\n\t\t\t\n\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\toutputFile.close()\n\tif numberofextendedsources>1:\n\t\twith open(OthersList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\n\tif numberofextendedsources==1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\\n\\\n \t%s\"%(numberofextendedsources,extendedsource))\n\t\toutsyputFile.close()\n\n\tif numberofextendedsources !=1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\" %(numberofextendedsources))\n\t\toutsyputFile.close()", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))", "def generate_xml(self, locations):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n kernel = Kerneladapter()\n\n for locname in locations:\n xml_location = ET.SubElement(xmlroot, 'location')\n location = kernel.location_info(locname)\n ET.SubElement(xml_location, \"location\").text = unicode(locname)\n ET.SubElement(xml_location, \"height\").text = unicode(location['height'])\n ET.SubElement(xml_location, \"attributes\").text = unicode(location['attributes'])\n ET.SubElement(xml_location, \"floorlevel\").text = unicode(location['floorlevel'])\n ET.SubElement(xml_location, \"preference\").text = unicode(location['preference'])\n ET.SubElement(xml_location, \"info\").text = unicode(location['info'])\n ET.SubElement(xml_location, \"reserved_for\").text = unicode(location['reserved_for'])\n\n for mui in location['allocated_by']:\n unit = kernel.unit_info(mui)\n xml_unit = ET.SubElement(xml_location, \"unit\")\n ET.SubElement(xml_unit, \"mui\").text = unicode(unit['mui'])\n ET.SubElement(xml_unit, \"quantity\").text = unicode(unit['quantity'])\n ET.SubElement(xml_unit, \"artnr\").text = unicode(unit['product'])\n ET.SubElement(xml_unit, \"height\").text = unicode(unit['height'])\n ET.SubElement(xml_unit, \"pick_quantity\").text = unicode(unit['pick_quantity'])\n ET.SubElement(xml_unit, 'created_at').text = unit['created_at'].strftime('%Y-%m-%d %H:%M:%S')\n ET.SubElement(xml_unit, \"movements\").text = unicode(unit['movements'])\n ET.SubElement(xml_unit, \"picks\").text = unicode(unit['picks'])\n ET.SubElement(xml_unit, \"attributes\").text = unicode(unit['attributes'])\n try:\n product = produktpass.models.Product.objects.get(artnr=unit['product'])\n ET.SubElement(xml_unit, \"product_name\").text = unicode(product.name)\n except produktpass.models.Product.DoesNotExist:\n ET.SubElement(xml_unit, \"product_name\").text = '???'\n\n return xmlroot", "def createXML(config, ccdpars, userpars):\n\n # identify the template\n appLab = ccdpars.appLab.value()\n if config.debug:\n print('DEBUG: createXML: application = ' + appLab)\n print('DEBUG: createXML: application vals = ' + str(config.templates[appLab]))\n\n if config.template_from_server:\n # get template from server\n url = config.http_camera_server + config.http_path_get + '?' + \\\n config.http_search_attr_name + '=' + config.templates[appLab]\n if config.debug:\n print ('DEBUG: url = ' + url)\n sxml = urllib2.urlopen(url).read()\n txml = ET.fromstring(sxml)\n else:\n # get template from local file\n if config.debug:\n print ('DEBUG: directory = ' + config.template_directory)\n lfile = os.path.join(config.template_directory, config.templates[appLab]['app'])\n if config.debug:\n print ('DEBUG: local file = ' + lfile)\n tree = ET.parse(lfile)\n txml = tree.getroot()\n\n # Find all CCD parameters\n cconfig = txml.find('configure_camera')\n pdict = {}\n for param in cconfig.findall('set_parameter'):\n pdict[param.attrib['ref']] = param.attrib\n\n # Set them. This is designed so that missing \n # parameters will cause exceptions to be raised.\n\n # X-binning factor\n pdict['X_BIN']['value'] = ccdpars.xbin.get()\n\n # Y-binning factor\n pdict['X_BIN']['value'] = ccdpars.ybin.get()\n\n # Number of exposures\n pdict['NUM_EXPS']['value'] = '-1' if ccdpars.number.value() == 0 else ccdpars.number.get()\n\n # LED level\n pdict['LED_FLSH']['value'] = ccdpars.led.get()\n\n # Avalanche or normal\n pdict['OUTPUT']['value'] = str(ccdpars.avalanche())\n\n # Avalanche gain\n pdict['HV_GAIN']['value'] = ccdpars.avgain.get()\n\n # Clear or not\n pdict['EN_CLR']['value'] = str(ccdpars.clear())\n\n # Dwell\n pdict['DWELL']['value'] = ccdpars.expose.get()\n\n # Readout speed\n pdict['SPEED']['value'] = '0' if ccdpars.readout == 'Slow' else '1' \\\n if ccdpars.readout == 'Medium' else '2'\n\n # Number of windows -- needed to set output parameters correctly\n nwin = ccdpars.nwin.value()\n\n # Load up enabled windows, null disabled windows\n for nw, win in ccdpars.wframe.wins:\n if nw < nwin:\n pdict['X' + str(nw+1) + '_START']['value'] = win.xstart.get()\n pdict['Y' + str(nw+1) + '_START']['value'] = win.ystart.get()\n pdict['X' + str(nw+1) + '_SIZE']['value'] = win.nx.get()\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = win.ny.get()\n else:\n pdict['X' + str(nw+1) + '_START']['value'] = '1'\n pdict['Y' + str(nw+1) + '_START']['value'] = '1'\n pdict['X' + str(nw+1) + '_SIZE']['value'] = '0'\n pdict['Y' + str(nw+1) + '_SIZE']['value'] = '0'\n\n # Load the user parameters\n uconfig = txml.find('user')\n uconfig.set('target', userpars.target.get())\n uconfig.set('comment', userpars.comment.get())\n uconfig.set('ID', userpars.progid.get())\n uconfig.set('PI', userpars.pi.get())\n uconfig.set('Observers', userpars.observers.get())\n \n return txml", "def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT", "def test_pep8_conformance_pygccxml(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../pygccxml/\"\n\n self.run_check(path)", "def wrez2xml(self,newdoc,newroot):\n\t\twrez = newdoc.createElement('wrez')\n\t\twrez.setAttribute('hasChanged', str(self.hasChanged))\n\t\tnewroot.appendChild(wrez)\n\n\t\tpath = newdoc.createElement('path')\n\t\tpath.setAttribute('value', self.path)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('init_str')\n\t\tpath.setAttribute('value', self.init_str)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('hash_sha512')\n\t\tpath.setAttribute('value', self.hash_sha512)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('src_rip')\n\t\tpath.setAttribute('value', self.src_rip)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('quality')\n\t\tpath.setAttribute('value', self.quality)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('codec')\n\t\tpath.setAttribute('value', self.codec)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('language')\n\t\tpath.setAttribute('value', self.language)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('audio')\n\t\tpath.setAttribute('value', self.audio)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('encoder')\n\t\tpath.setAttribute('value', self.encoder)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('version')\n\t\tpath.setAttribute('value', self.version)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('extension')\n\t\tpath.setAttribute('value', self.extension)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('release_year')\n\t\tpath.setAttribute('value', self.release_year)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('title')\n\t\tpath.setAttribute('value', self.title)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('size')\n\t\tpath.setAttribute('value', str(self.size))\n\t\twrez.appendChild(path)\n\t\treturn wrez", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')", "def evaluate(self, xml_gold_path, xml_output_path):\n\n # Go through all files in xml_gold_path directory\n for file in os.listdir(xml_gold_path):\n\n # Set path to file\n file = xml_gold_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open xml files\n chapter_input_gold = open(file, 'r', encoding='utf8')\n chapter_input_test = open(xml_output_path+os.path.split(file)[-1], 'r', encoding='utf8')\n\n # Check if filenams are the same\n chapter_input_gold_name = os.path.split(chapter_input_gold.name)[-1]\n chapter_input_test_name = os.path.split(chapter_input_test.name)[-1]\n\n if chapter_input_gold_name == chapter_input_test_name:\n\n # Console log\n chapter_input_gold_name = chapter_input_gold.name\n chapter_input_test_name = chapter_input_test.name\n #print('Calculating score for: ' + chapter_input_gold_name + ' and: ' + chapter_input_test_name)\n\n # Process xml input file with BeautifulSoup\n chapter_input_gold = BeautifulSoup(chapter_input_gold, 'xml')\n chapter_input_test = BeautifulSoup(chapter_input_test, 'xml')\n\n # Empty variables for collecting Target scores\n target_precision_scores = 0\n target_recall_scores = 0\n target_f1_scores = 0\n target_jaccard_scores = 0\n\n # Empty variables for collecting Focus scores\n focus_precision_scores = 0\n focus_recall_scores = 0\n focus_f1_scores = 0\n focus_jaccard_scores = 0\n\n # Empty variables for collecting Negated scores\n negated_precision_scores = 0\n negated_recall_scores = 0\n negated_f1_scores = 0\n negated_jaccard_scores = 0\n\n # Empty variables for collecting Scope scores\n scope_precision_scores = 0\n scope_recall_scores = 0\n scope_f1_scores = 0\n scope_jaccard_scores = 0\n\n # Count sentences and frames\n sentence_count = 0\n gold_frames_count = 0\n test_frames_count = 0\n\n scope_gold_frames_count = 0\n #scope_test_frames_count = 0\n\n # Find all Gold and Test Sentences\n sentences_gold = chapter_input_gold.find_all('s')\n sentences_test = chapter_input_test.find_all('s')\n\n #targets_gold = chapter_input_gold.find_all('target')\n #targets_test = chapter_input_test.find_all('target')\n\n scope_gold_frames = chapter_input_gold.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_frames_count = len(scope_gold_frames)\n\n scope_test_frames = chapter_input_test.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_frames_count = len(scope_test_frames)\n\n # Exit if number of sentences != between Gold and Test files\n if len(sentences_gold) != len(sentences_test):\n raise SystemExit(print('Number of sentences between Gold and Test files does not match.\\nGold:',\n len(sentences_gold), 'Test:', len(sentences_test)))\n\n # Zip Gold and Test Sentences\n for s_gold, s_test in zip(sentences_gold, sentences_test):\n\n sentence_count = sentence_count + 1\n\n gold_frames = s_gold.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n test_frames = s_test.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n\n gold_frames_count = gold_frames_count + len(gold_frames)\n test_frames_count = test_frames_count + len(test_frames)\n\n for item in zip(gold_frames, test_frames):\n\n #print('\\n=========')\n #print('\\nFrame:', item[0].get('id'))\n\n target_gold_list = []\n target_test_list = []\n\n focus_gold_list = []\n focus_test_list = []\n\n negated_gold_list = []\n negated_test_list = []\n\n scope_gold_list = []\n scope_test_list = []\n\n # Flatten a nested list of fenodes\n def flatten(nested_list):\n \"\"\" Flatten a nested list of fenodes \"\"\"\n t_l = []\n for i in nested_list:\n if not isinstance(i, list):\n t_l.append(i)\n else:\n t_l.extend(flatten(i))\n return t_l\n\n # Target\n if item[0].find('target'):\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n\n try:\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n except:\n target_test_word = ''\n\n elif item[1].find('target'):\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n\n try:\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n except:\n target_gold_word = ''\n\n target_gold_list.append(target_gold_word)\n target_test_list.append(target_test_word)\n\n # Sort lists\n sorted_target_gold_list = sorted(flatten(target_gold_list))\n sorted_target_test_list = sorted(flatten(target_test_list))\n\n #print('\\nTarget [Gold]:', sorted_target_gold_list)\n #print('Target [Test]:', sorted_target_test_list)\n\n\n # Focus\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except:\n focus_gold_word = ''\n if item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n else:\n focus_test_word = ''\n\n elif item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n try:\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except AttributeError:\n focus_gold_word = ''\n else:\n focus_gold_word = ''\n\n focus_gold_list.append(focus_gold_word)\n focus_test_list.append(focus_test_word)\n\n # Sort lists\n sorted_focus_gold_list = sorted(flatten(focus_gold_list))\n sorted_focus_test_list = sorted(flatten(focus_test_list))\n\n #print('\\nFocus [Gold]:', sorted_focus_gold_list)\n #print('Focus [Test]:', sorted_focus_test_list)\n\n\n # Negated\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n if item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n else:\n negated_test_word = ''\n\n elif item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n else:\n negated_gold_word = ''\n else:\n negated_test_word = ''\n negated_gold_word = ''\n\n negated_gold_list.append(negated_gold_word)\n negated_test_list.append(negated_test_word)\n\n # Sort lists\n sorted_negated_gold_list = sorted(flatten(negated_gold_list))\n sorted_negated_test_list = sorted(flatten(negated_test_list))\n\n #print('\\nNegated [Gold]:', sorted_negated_gold_list)\n #print('Negated [Test]:', sorted_negated_test_list)\n\n\n # Resolve Terminals if Scope on a complex graph\n def resolve_non_terminals(idref):\n \"\"\" This function resolves a complex gold graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_gold.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_gold.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_gold.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_gold.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n def resolve_non_terminals_test(idref):\n \"\"\" This function resolves a complex test graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_test.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_test.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_test.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_test.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n # Scope\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[0].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n\n if item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n elif s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n scope_test_list.append('')\n\n elif item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n if s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n pass\n\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n else:\n scope_gold_list.append('')\n\n # Sort lists\n sorted_scope_gold_list = sorted(flatten(scope_gold_list))\n sorted_scope_test_list = sorted(flatten(scope_test_list))\n\n #print('\\nScope [Gold]:', sorted_scope_gold_list)\n #print('Scope [Test]:', sorted_scope_test_list)\n\n # If lists are same length, check if items are same\n if len(sorted_scope_gold_list) == len(sorted_scope_test_list):\n sorted_scope_test_list_intersection = set(sorted_scope_gold_list).intersection(sorted_scope_test_list)\n sorted_scope_test_list_intersection = list(sorted_scope_test_list_intersection)\n if len(sorted_scope_test_list_intersection) < len(sorted_scope_test_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_test_list_intersection)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list_intersection.append('')\n empty_element = empty_element + 1\n \n sorted_scope_test_list = sorted_scope_test_list_intersection\n\n # If lists are different lengths, add empty elements\n elif len(sorted_scope_gold_list) > len(sorted_scope_test_list):\n difference = len(sorted_scope_gold_list) - len(sorted_scope_test_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list.append('')\n empty_element = empty_element + 1\n\n elif len(sorted_scope_test_list) > len(sorted_scope_gold_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_gold_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_gold_list.append('')\n empty_element = empty_element + 1\n\n\n # Align items in the lists for sklearn, set 1 for matched items, else set 0\n sorted_target_gold_list_normalized = [1 if element in sorted_target_gold_list and not element == \"\" else 0 for element in sorted_target_gold_list]\n sorted_target_test_list_normalized = [1 if element in sorted_target_gold_list else 0 for element in sorted_target_test_list]\n\n sorted_focus_gold_list_normalized = [1 if element in sorted_focus_gold_list and not element == \"\" else 0 for element in sorted_focus_gold_list]\n sorted_focus_test_list_normalized = [1 if element in sorted_focus_gold_list else 0 for element in sorted_focus_test_list]\n\n sorted_negated_gold_list_normalized = [1 if element in sorted_negated_gold_list and not element == \"\" else 0 for element in sorted_negated_gold_list]\n sorted_negated_test_list_normalized = [1 if element in sorted_negated_gold_list else 0 for element in sorted_negated_test_list]\n\n sorted_scope_gold_list_normalized = [1 if element in sorted_scope_gold_list and not element == \"\" else 0 for element in sorted_scope_gold_list]\n sorted_scope_test_list_normalized = [1 if element in sorted_scope_gold_list else 1 if not element == \"\" else 0 for element in sorted_scope_test_list]\n\n #print(sorted_scope_gold_list_normalized)\n #print(sorted_scope_test_list_normalized)\n\n\n # Sklearn calculations\n #target_precision_scores = target_precision_scores + precision_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_recall_scores = target_recall_scores + recall_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n target_f1_scores = target_f1_scores + f1_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_jaccard_scores = target_jaccard_scores + jaccard_similarity_score(sorted_target_gold_list, sorted_target_test_list)\n\n #focus_precision_scores = focus_precision_scores + precision_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_recall_scores = focus_recall_scores + recall_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n focus_f1_scores = focus_f1_scores + f1_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_jaccard_scores = focus_jaccard_scores + jaccard_similarity_score(sorted_focus_gold_list, sorted_focus_test_list)\n\n #negated_precision_scores = negated_precision_scores + precision_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_recall_scores = negated_recall_scores + recall_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n negated_f1_scores = negated_f1_scores + f1_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_jaccard_scores = negated_jaccard_scores + jaccard_similarity_score(sorted_negated_gold_list, sorted_negated_test_list)\n\n scope_precision_scores = scope_precision_scores + precision_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_recall_scores = scope_recall_scores + recall_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_f1_scores = scope_f1_scores + f1_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_jaccard_scores = scope_jaccard_scores + jaccard_similarity_score(sorted_scope_gold_list, sorted_scope_test_list)\n\n\n print('\\n=============================')\n print('====== EVALUATION for:', chapter_input_test_name, '======')\n print('Total Sentences:', sentence_count,\n '\\nNegation Gold frames:', gold_frames_count,\n '\\nNegation Test frames:', test_frames_count, '\\n')\n\n print('----- CUEWORDS -----')\n #print('Precision:\\t', target_precision_scores / gold_frames_count)\n #print('Recall:\\t', target_recall_scores / gold_frames_count)\n print('F1 score:\\t', target_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', target_jaccard_scores / gold_frames_count)\n\n print('\\n----- FOCUS -----')\n #print('Precision:\\t', focus_precision_scores / gold_frames_count)\n #print('Recall:\\t', focus_recall_scores / gold_frames_count)\n print('F1 score:\\t', focus_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', focus_jaccard_scores / gold_frames_count)\n\n print('\\n----- NEGATED -----')\n #print('Precision:\\t', negated_precision_scores / gold_frames_count)\n #print('Recall:\\t', negated_recall_scores / gold_frames_count)\n print('F1 score:\\t', negated_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', negated_jaccard_scores / gold_frames_count)\n\n print('\\n----- SCOPE -----\\nScope Gold frames:', scope_gold_frames_count, '\\nScope Test frames:', scope_test_frames_count, '\\n')\n print('Precision:\\t', scope_precision_scores / scope_test_frames_count)\n print('Recall:\\t', scope_recall_scores / scope_test_frames_count)\n print('F1 score:\\t', scope_f1_scores / scope_test_frames_count)\n print('Jaccard similarity:\\t', scope_jaccard_scores / scope_test_frames_count)\n\n print('Done!')", "def build(filename=\"JMdict_e.gz\", output_filename=DATABASE_FILENAME):\n # NOTE: The JMdict XML file contains XML entities, that are expanded when\n # parsed using Python's stdlib xml.etree.ElementTree like so:\n # ElementTree.parse(f). That is undesired behavior for our use-case. Oshi\n # needs to parse the short entity string, for example &adj-i; should be\n # \"adj-i\" instead of \"adjective (keiyoushi)\". That's why it uses an external\n # xml parser: lxml that allows you to specify whether to expand entites.\n extension = path.splitext(filename)[1].lower()\n parser = etree.XMLParser(resolve_entities=False)\n if extension == \".gz\":\n with gzip.open(filename) as f:\n tree = etree.parse(f, parser)\n elif extension == \".xml\":\n tree = etree.parse(filename, parser)\n else:\n raise ValueError(\"File extension not supported: \" + extension)\n\n entries = []\n # variables starting with x contain xml element(s)\n for xentry in tree.getroot():\n entry = {}\n entry[\"writings\"] = [x.find('keb').text for x in xentry.findall('k_ele')]\n entry[\"readings\"] = [x.find('reb').text for x in xentry.findall('r_ele')]\n xsenses = xentry.findall('sense')\n senses = []\n # last_tags will contain a reference to previously found tags (JMdict\n # specifies that when pos is empty, the previous one should be used)\n last_tags = []\n for xsense in xsenses:\n tags = []\n xtags = xsense.findall('pos') # + xsense.findall('misc')\n for xtag in xtags:\n match = re.search(r'&([\\w-]+?);', etree.tostring(xtag, encoding=\"utf-8\").decode('utf-8') or \"\")\n if match: tags.append(match.group(1))\n glosses = [x.text for x in xsense.findall('gloss')]\n senses.append({\"glosses\": glosses, \"tags\": tags or last_tags})\n last_tags = tags or last_tags\n entry[\"senses\"] = senses\n entries.append(entry)\n with open(output_filename, 'w', encoding='utf-8') as f:\n json.dump(entries, f, ensure_ascii=False)", "def creation_srcmdl(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,outputfile,emin,emax):\n\tf_liste_sour=\"a.txt\"\n\n\tlect_ca(dir_cat,SourceRA,SourceDec,SourceROI,distmin,name,f_liste_sour,name)\n\tXML_EC_PL(name, f_liste_sour, outputfile, emin,emax)\n\tos.system(\"rm -rf a.txt\")", "def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)", "def test_xml_from_file(self):\n j2k = Jp2k(self.j2kfile)\n\n self.jp2h.box = [self.ihdr, self.colr]\n\n xmlb = glymur.jp2box.XMLBox(filename=self.xmlfile)\n boxes = [self.jp2b, self.ftyp, self.jp2h, xmlb, self.jp2c]\n with tempfile.NamedTemporaryFile(suffix=\".jp2\") as tfile:\n j2k.wrap(tfile.name, boxes=boxes)\n jp2 = Jp2k(tfile.name)\n\n output_boxes = [box.box_id for box in jp2.box]\n self.assertEqual(output_boxes, ['jP ', 'ftyp', 'jp2h', 'xml ',\n 'jp2c'])\n\n elts = jp2.box[3].xml.findall('country')\n self.assertEqual(len(elts), 3)\n\n neighbor = elts[1].find('neighbor')\n self.assertEqual(neighbor.attrib['name'], 'Malaysia')\n self.assertEqual(neighbor.attrib['direction'], 'N')", "def test_non_regression(self):\n main(\"Source_mobile.xml\", [[\"engine\", \"A320.xml\", \"A320.csv\"]], \"Resultat.xml\", gui=False)\n compare_xml_results(\"Resultat.xml\", \"Reference.xml\", self)", "def configureGeoData(data,resultDir):\n xmlfiles = findfiles(['*.xml'],where=data[\"folder\"])\n xmlurls=[]\n fgdclist=[]\n #xmlselect=[]\n for xml in xmlfiles:\n shutil.copy(os.path.join(data['folder'],xml),resultDir)\n xmlurls.append(os.path.join(resulturl,resultDir.split('/')[-1],xml))\n #import xmltodict\n localfilename=os.path.join(data['folder'],xml)\n #xmlselect.append({\"file\":localfilename,\"url\":os.path.join(resulturl,resultDir.split('/')[-1],xml)})\n with open(os.path.join(data['folder'],xml)) as fd:\n stringxml = fd.read()\n #if 'FGDC' in stringxml.upper():\n fgdc={}\n fgdc['url']=os.path.join(resulturl,resultDir.split('/')[-1],xml)\n doc = xmltodict.parse(stringxml,cdata_key='text',attr_prefix='',dict_constructor=dict)\n fgdc['data']=doc\n fgdc['file']=localfilename\n fgdclist.append(fgdc)\n data['xmlurls']=xmlurls\n data['xml']={\"urls\":xmlurls,\"fgdc\":fgdclist,\"files\":xmlfiles}\n return data", "def test_01_Xml(self):\n l_xml = self.m_xml.light_sect[1]\n # print(PrettyFormatAny.form(l_xml, 'C4-01-A - XML'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_LIGHT_NAME_1)\n self.assertEqual(l_xml.find('DeviceFamily').text, TESTING_DEVICE_FAMILY_UPB)", "def main(*args):\r\n print(START_MESSAGE)\r\n print(\"Script Location:\", location)\r\n print(\"Arguments Passed:\", args)\r\n\r\n root = ET.parse(xmlfile).getroot()\r\n keys = []\r\n out = \"\"\r\n\r\n for child in root[1]:\r\n out += child.attrib['Name'] + \";\" + child[0].text + \"\\n\"\r\n\r\n with open(outputfile, 'w') as f:\r\n f.write(out)", "def prepare_xml(original_xml, mangled_xml):\n in_handle = open(original_xml)\n footer = \" </BlastOutput_iterations>\\n</BlastOutput>\\n\"\n header = \"\"\n while True:\n line = in_handle.readline()\n if not line:\n #No hits?\n stop_err(\"Problem with XML file?\")\n if line.strip() == \"<Iteration>\":\n break\n header += line\n\n if \"<BlastOutput_program>blastx</BlastOutput_program>\" in header:\n print \"BLASTX output identified\"\n elif \"<BlastOutput_program>blastp</BlastOutput_program>\" in header:\n print \"BLASTP output identified\"\n else:\n in_handle.close()\n stop_err(\"Expect BLASTP or BLASTX output\")\n\n out_handle = open(mangled_xml, \"w\")\n out_handle.write(header)\n out_handle.write(line)\n count = 1\n while True:\n line = in_handle.readline()\n if not line:\n break\n elif line.strip() == \"<Iteration>\":\n #Insert footer/header\n out_handle.write(footer)\n out_handle.write(header)\n count += 1\n out_handle.write(line)\n\n out_handle.close()\n in_handle.close()\n print \"Input has %i queries\" % count", "def parse_CRAFT(kb_data):\n\n print(\"Parsing CRAFT corpus...\")\n corpus_dir = str()\n \n if kb_data.kb == \"chebi\":\n corpus_dir = \"./retrieved_data/corpora/CRAFT-4.0.1/concept-annotation/CHEBI/CHEBI/knowtator/\"\n \n elif kb_data.kb == \"go_bp\":\n corpus_dir = \"./retrieved_data/corpora/CRAFT-4.0.1/concept-annotation/GO_BP/GO_BP/knowtator/\"\n\n output_CRAFT = dict()\n \n for document in os.listdir(corpus_dir): \n root = ET.parse(corpus_dir + document)\n file_id = document.strip('.txt.knowtator.xml')\n annotations = dict()\n\n for annotation in root.iter(\"annotation\"):\n annotation_id = annotation.find('mention').attrib['id']\n annotation_text = annotation.find('spannedText').text\n start_pos, end_pos = annotation.find('span').attrib['start'], annotation.find('span').attrib['end']\n annotations[annotation_id] = [annotation_text, start_pos, end_pos] \n \n for classMention in root.iter(\"classMention\"):\n classMention_id = classMention.attrib['id']\n annotation_values = annotations[classMention_id]\n kb_id = classMention.find('mentionClass').attrib['id']\n \n if kb_id in kb_data.child_to_parent.keys(): # Consider only KB concepts with ONE direct ancestor\n direct_ancestor = kb_data.child_to_parent[kb_id]\n annotation = (annotation_values[0], annotation_values[1], \n annotation_values[2], kb_id, direct_ancestor) \n output_CRAFT = add_annotation_to_output_dict(file_id, annotation, output_CRAFT)\n \n print(\"...Done!\")\n return output_CRAFT", "def build_corpus_questions(criteria_incl_question=True, criteria_incl_snip=False, criteria_incl_long=False, level=0):\r\n\r\n\tprint('\\nbuilding questions and answers')\r\n\r\n\tif load_corpus_questions():\r\n\t\treturn\r\n\r\n\timport xml.etree.ElementTree as ET\r\n\r\n\tquestion_count = 0\r\n\tno_abstract_tag = 0\r\n\tno_abstract_file = 0\r\n\tlong_count = 0\r\n\t\r\n\tglobal search_criteria_dict, solution_dict, linked_abstracts_dict\r\n\t\r\n\tsearch_criteria_dict = collections.defaultdict(list)\r\n\tsolution_dict = collections.defaultdict(list)\r\n\tlinked_abstracts_dict = collections.defaultdict(list)\r\n\tcommon_map_dict = collections.defaultdict(list)\r\n\t\r\n\ttree = ET.parse(paths.path_data_questions)\r\n\troot = tree.getroot()\r\n\tfor record in root.findall('record'):\r\n\t\trecord_id = record.get('id')\r\n\t\tquestion_text = preprocess_document(record.find('question').text,True)\r\n\r\n\t\tif level == 0:\r\n\t\t\tkey = record_id # key\r\n\t\t\r\n\t\tanswer = record.find('answer')\r\n\t\tif answer is not None:\r\n\t\t\tfor s in answer.findall('snip'):\r\n\t\t\t\tif s is not None:\r\n\t\t\t\t\tsnip_id = s.get('id')\r\n\t\t\t\t\tsnip_text = preprocess_document(s.find('sniptext').text,True)\r\n\t\t\t\t\t\r\n\t\t\t\t\tif level == 1:\r\n\t\t\t\t\t\tkey = record_id + '_' + snip_id # key\r\n\t\t\t\t\t\r\n\t\t\t\t\tfor i,l in enumerate(s.findall('long')):\r\n\t\t\t\t\t\tif l is not None:\r\n\t\t\t\t\t\t\tlong_id = l.get('id')\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif level == 2:\r\n\t\t\t\t\t\t\t\tkey = record_id + '_' + snip_id + '_' + long_id # key\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif criteria_incl_question:\r\n\t\t\t\t\t\t\t\tfor x in question_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # question\r\n\t\t\t\t\t\t\tif criteria_incl_snip:\r\n\t\t\t\t\t\t\t\tfor x in snip_text:\r\n\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # snip\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tlong_text = l.find('longtext')\r\n\t\t\t\t\t\t\tif long_text is not None:\r\n\t\t\t\t\t\t\t\tlong_text = preprocess_document(long_text.text,True)\r\n\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\tsolution_dict[key].append(x) # long - answer\r\n\t\t\t\t\t\t\t\tif criteria_incl_long:\r\n\t\t\t\t\t\t\t\t\tfor x in long_text:\r\n\t\t\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append(x) # long - search\r\n\r\n\t\t\t\t\t\t\tif key not in search_criteria_dict.keys():\r\n\t\t\t\t\t\t\t\tsearch_criteria_dict[key].append('')\r\n\r\n\t\t\t\t\t\t\tlong_refs = l.findall('ref')\r\n\t\t\t\t\t\t\tfor long_ref in long_refs:\r\n\t\t\t\t\t\t\t\tabstract = long_ref.get('abstract')[10:]\r\n\t\t\t\t\t\t\t\tabstract_path = paths.path_data_abstracts + '/' + abstract\r\n\t\t\t\t\t\t\t\tabstract_sentences = abstracts_dict[abstract]\r\n\t\t\t\t\t\t\t\tlinked_abstracts_dict[key].append(abstract) # linked abstracts\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\tlong_count += 1\r\n\t\t\t\t\t\t\t\t\r\n\t\tquestion_count += 1\r\n\t\t# print(str(question_count) + ' : ' + str(question_text) + ' : ' + str(no_abstract_file) + ' : ' + str(no_abstract_tag) + ' : ' + str(long_count))\r\n\r\n\tpickle.dump(search_criteria_dict,open(paths.path_data_questions_pickle,\"wb\"))\r\n\tpickle.dump(solution_dict,open(paths.path_data_answers_pickle,\"wb\"))\r\n\tpickle.dump(linked_abstracts_dict,open(paths.path_data_linkedabstracts_pickle,\"wb\"))\r\n\t\r\n\tprint(len(search_criteria_dict))\r\n\tprint(len(solution_dict))\r\n\tprint(len(linked_abstracts_dict))\r\n\t\r\n\tprint('\\ncorpus build complete')", "def test_02_Xml1(self):\n l_xml = self.m_xml.light_sect[1]\n print(PrettyFormatAny.form(l_xml, 'C1-02-A - XML'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_LIGHT_NAME_1)\n self.assertEqual(l_xml.find('DeviceFamily').text, TESTING_DEVICE_FAMILY_UPB)", "def test_load_quakeML():\n # Check one cmt file\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n # Cmtfile path\n cmtfile = os.path.join(DATA_DIR, \"testCMT\")\n\n # create new directory\n new_xml_path = os.path.join(tmp_dir, \"tests.xml\")\n xml = read_events(cmtfile)\n xml.write(new_xml_path, format=\"QUAKEML\")\n\n assert(os.path.exists(new_xml_path)\n and os.path.isfile(new_xml_path))\n\n print(\"QuakeML\\n\", CMTSource.from_quakeml_file(new_xml_path))\n print(\"CMT\\n\", CMTSource.from_CMTSOLUTION_file(cmtfile))\n assertDictAlmostEqual(CMTSource.from_quakeml_file(new_xml_path),\n CMTSource.from_CMTSOLUTION_file(cmtfile))", "def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it", "def __createXMLFileForClear():\r\n #description\r\n #Root\r\n clear_root = Element('clear-users-request', {'xmlns':SYMPLECTIC_XMLNS_URI,} )\r\n #Feed\r\n SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID\r\n #Convert to ElementTree and write xml version to file\r\n xml_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_USER_FOLDER + SYMPLECTIC_LOCAL_USER_CLEARFILE\r\n ElementTree(clear_root).write(xml_filename)\r\n #Return xml filename\r\n return xml_filename", "def setUpClass(cls):\n import os\n for root in cls.prod_s2_ssc:\n os.makedirs(root)\n metadata = root.split(\".\")[0] + \".HDR\"\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_mus:\n os.makedirs(root)\n metadata = os.path.join(root, root + \"_MTD_ALL.xml\")\n TestFunctions.touch(metadata)\n for root in cls.prod_s2_nat:\n os.makedirs(root)\n metadata = os.path.join(root, \"MTD_MSIL1C.xml\")\n TestFunctions.touch(metadata)", "def xml_parser_dielectrics(request, tmpdir_factory):\n testdir = os.path.dirname(__file__)\n xmlfile = testdir + \"/dielectrics.xml\"\n tmpfile = str(tmpdir_factory.mktemp('data').join('basic_trunc.xml'))\n xml_truncate(request.param, xmlfile, tmpfile)\n xml = vasprun.Xml(tmpfile, event = False)\n \n return xml", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)" ]
[ "0.61072654", "0.5717132", "0.5615994", "0.5543528", "0.55028045", "0.53621995", "0.5347263", "0.5297309", "0.5269815", "0.5254691", "0.52032775", "0.5177183", "0.5130147", "0.5118681", "0.51131713", "0.51111734", "0.5094071", "0.50852966", "0.5085046", "0.50843346", "0.50589895", "0.5041263", "0.50376314", "0.50374556", "0.5014333", "0.49962717", "0.49937144", "0.49916235", "0.49897355", "0.49828026" ]
0.72865844
0
Update the config file
def update(self): self.save_config_file()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conf_update(self):\n pass", "def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def config_update(self, update: io.BytesIO) -> None:\n self.__logger.debug('Eva.config_update called')\n return self.__http_client.config_update(update)", "def update_config(self, config):\n return self._update_config(\"config\", config)", "def refresh_config(self):\n with open(config_name, 'rb') as f:\n self.CONFIG = simplejson.load(f)\n\n return self", "def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)", "def update_conf_file():\n filepath = remote_dir + \"/apache2/conf/httpd.conf\"\n fabric.contrib.files.sed(filepath, 'myproject', project_name)", "def reload_config(self):\n pass", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def _refreshconfig(self):\n self.config = ConfigGenerator(os.path.join(self.rundir, const.CONFIG_FILE))", "def refresh_configuration(self):\n pass", "def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())", "def update_config_file(**kwargs):\n config_file = try_read_file()\n config_file.update(kwargs)\n config_file = {key: value for key, value in config_file.items() if value is not None}\n logging.info('open config file %s', config_file_path)\n with open(config_file_path, 'w') as f:\n logging.info('begin io %s', config_file_path)\n json.dump(config_file, f, indent=4)\n logging.info('end io %s', config_file_path)", "def use_config_file(self):\n self.config_file = self.find_config_file()\n if self.config_file:\n self.apply_config_file(self.config_file)", "def reload(self):\n self.read(self._cfg_path)", "def testUpdateConfigFile(self):\n # Test update project field.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"\")\n # Test add ssh key path in config.\n gcp_setup_runner.UpdateConfigFile(self.cfg_path,\n \"ssh_private_key_path\", \"test_path\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")\n self.assertEqual(cfg.ssh_private_key_path, \"test_path\")\n # Test config is not a file\n with mock.patch(\"os.path.isfile\") as chkfile:\n chkfile.return_value = False\n gcp_setup_runner.UpdateConfigFile(self.cfg_path, \"project\",\n \"test_project\")\n cfg = config.AcloudConfigManager.LoadConfigFromProtocolBuffer(\n open(self.cfg_path, \"r\"), user_config_pb2.UserConfig)\n self.assertEqual(cfg.project, \"test_project\")", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def update_config(config_file, config_base=None):\n if config_base is None:\n config_base = def_config_file\n assert(os.path.isfile(config_base))\n if not os.path.isfile(config_file):\n shutil.copy(config_base, config_file)\n cp = CisConfigParser()\n cp.read(config_file)\n miss = []\n if platform._is_win: # pragma: windows\n miss += update_config_windows(cp)\n with open(config_file, 'w') as fd:\n cp.write(fd)\n for sect, opt, desc in miss: # pragma: windows\n warnings.warn((\"Could not locate option %s in section %s.\"\n + \"Please set this in %s to: %s\")\n % (opt, sect, config_file, desc))", "def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def config_edits(configfile):\n try:\n\n # Read in the file\n filedata = None\n with open(configfile, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n '/home/scratch01/sradanov/A2C2/NCEP/', '').replace('/home/estimr2/sradanov/Operational/', '')\n\n # Write the file out again\n with open(configfile, 'w') as file:\n file.write(filedata)\n\n LOGGER.info('configfile modified')\n except Exception:\n LOGGER.exeption('Failed to modify configfile:')\n\n return configfile", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def save_config(self, new_config, filename=None):\n self.cfg.update(new_config)\n if filename is None:\n self.cfg.filename = self.cfg_filename\n else:\n self.cfg.filename = filename\n self.cfg.write()\n logger.info(\"Config file %s written out\" % self.cfg.filename)", "def write_config(self, filename):\n self.config.filename = filename\n self.config.write()", "def update_shed_config(self, shed_conf):\n for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):\n if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:\n self._dynamic_tool_confs[index] = shed_conf\n self._save_integrated_tool_panel()", "def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())" ]
[ "0.8264877", "0.7606795", "0.74441475", "0.726482", "0.7236753", "0.72217864", "0.7091417", "0.7060331", "0.70171785", "0.70084494", "0.6937647", "0.69036394", "0.6887495", "0.68652135", "0.6852088", "0.6795112", "0.679408", "0.67330885", "0.6727831", "0.6705917", "0.66979545", "0.6675949", "0.66608924", "0.66542983", "0.6618892", "0.65922624", "0.6573851", "0.6571257", "0.6559911", "0.65597725" ]
0.88768095
0
FILL COLUMN2 WITH MOST LIKELY VALUES BASED ON COLUMN1
def fillgaps(column1,column2,train,test): ddict={} d1=test[[column1,column2]].dropna().values d2=train[[column1,column2]].dropna().values c1=np.array(d1[:,0].tolist()+d2[:,0].tolist()) c2=np.array(d1[:,1].tolist()+d2[:,1].tolist()) for ic1 in np.unique(c1): ddict[ic1]=(c2[c1==ic1].mean(),c2[c1==ic1].std()) full_data = [train, test] for dataset in full_data: for missing in np.where(np.isnan(dataset[column2]))[0]: m,s=ddict[dataset[column1][missing]] if s<=0: dataset[column2][missing]=m else: dataset[column2][missing]=np.random.normal(loc=m,scale=s,size=1) return (train,test)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fill_col1_val_where_col2_notna(col1, col2, val):\n fill_ser = col1.copy()\n fill_ser[col2.notna()] = val\n return col1.fillna(fill_ser)", "def fill_col(col, x):\n col.append(x)\n return col", "def merge(line):\n #Step1. Putting 0 to the end of the list.\n result = []\n for cell in line:\n if cell != 0:\n result.append(cell)\n for cell in range(line.count(0)):\n result.append(0)\n #Step2. Replaced with a tile of twice the value and a zero tile\n for cell in range(len(result)-1):\n if result[cell] == result[cell+1] and len(result) != 1:\n result[cell] += result[cell]\n result[cell+1] = 0\n #Step3. Repeat step1\n final_result = []\n for cell in result:\n if cell != 0:\n final_result.append(cell)\n for cell in range(result.count(0)):\n final_result.append(0)\n return final_result", "def fill_cells_to_the_right(nonogram, row, col):\n sth_changed = False\n\n # leeway stores a number of fillable cells to the left\n # -1 at the end returns length of line, when there is no true empty cell\n left_cells = nonogram.data.get_row(row)[:col]\n leeway = (left_cells[::-1]+[-1]).index(-1)\n\n block_length = find_min_block_length(nonogram, row, col)\n\n # filling cells enforced by minimal block length\n for position in range(col + 1, col + block_length - leeway):\n nonogram.fill_cell(row, position, 1)\n sth_changed = True\n\n return sth_changed", "def modify_d2(d1, d2):\n val_list = [i for i in d2.keys()]\n \n for key in val_list:\n for i in range(len(d2[key])):\n try:\n val = d1[d2[key][i][2]]\n d2[key][i][2] = val\n if None in d2[key][i]:\n d2[key][i].remove(None)\n except:\n pass\n return d2", "def fill_cells_to_the_left(nonogram, row, col):\n sth_changed = False\n\n # leeway stores a number of fillable cells to the right\n # -1 at the end returns length of line, when there is no true empty cell\n right_cells = nonogram.data.get_row(row)[col+1:]\n leeway = (right_cells + [-1]).index(-1)\n\n block_length = find_min_block_length(nonogram, row, col)\n\n # filling cells enforced by minimal block length\n for position in range(col + leeway + 1 - block_length, col):\n nonogram.fill_cell(row, position, 1)\n sth_changed = True\n\n return sth_changed", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def merge(line):\r\n # Create a copy of the input list line\r\n list_copy=[]\r\n #adding the none zero elements of line to list_copy\r\n for dummy_i in range(len(line)):\r\n if line[dummy_i] != 0:\r\n list_copy.append(line[dummy_i])\r\n # adding the appropriate number of zeros to match the length of list_copy and line\r\n for dummy_j in range(len(list_copy),len(line)):\r\n list_copy.append(0)\r\n \r\n # merging the tiles that have the same value\r\n for dummy_k in range(len(list_copy)-1):\r\n # checking for equal values of the adjacent tiles \r\n if list_copy[dummy_k]!=0 and list_copy[dummy_k]==list_copy[dummy_k+1]:\r\n # if equal double the value of the first tile and assign zero to second tile\r\n list_copy[dummy_k]=2*list_copy[dummy_k]\r\n list_copy[dummy_k+1]=0\r\n \r\n #shifting the rest of the values ot the tiles by one place\r\n for dummy_p in range(dummy_k+1,len(list_copy)-1):\r\n list_copy[dummy_p]=list_copy[dummy_p+1]\r\n if (len(line)>3):\r\n list_copy[-2]=list_copy[-1]\r\n list_copy[-1]=0\r\n # returning list_copy which is the answer\r\n return list_copy", "def fill_hom(patient, gene):\n\n first = 'HR_' + patient + '_First_' + gene + '_Split'\n second = 'HR_' + patient + '_Second_' + gene + '_Split'\n\n for column in data.columns:\n f = re.match(second, column)\n if f:\n data[second] = data[second].fillna(data[first])\n else:\n pass", "def fill_data(column, data):\n data[column].fillna(data[column].value_counts().index[0], inplace=True)", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def _update_context_no_unique_values(metadata, column, unique_values):\r\n\r\n return None", "def merge(line):\n line2=[]\n line3=[]\n line4=[]\n pair=0\n shift=0\n line1=[0]*len(line)\n if(len(line)==1):\n for iota in line:\n line1[0]=iota\n return line1\n \n for iota in xrange(len(line)):\n line4.append(line[iota])\n \n for iota in xrange(len(line)):\n line3.append(line[iota])\n \n \n \n for xinn in xrange(len(line3)):\n for iota in xrange(len(line3)-1):\n if(line3[iota]==0):\n if((line3[iota+1])>0):\n temp=line3[iota];\n line3[iota]=line3[iota+1];\n line3[iota+1]=temp\n shift=1\n xinn=xinn+1\n \n \n if(shift==1):\n for iota in xrange(len(line3)):\n line2.append(line3[iota])\n else:\n for iota in xrange(len(line4)):\n line2.append(line4[iota])\n \n \n \n \n \n \n \n for olay in range(len(line2)-1):\n \n \n if(line2[olay]==line2[olay+1]):\n line1[olay]=2*line2[olay];\n line2[olay+1]=0\n line1[olay+1]=line2[olay+1]\n pair=1;\n olay=olay+2\n else:\n line1[olay]=line2[olay]\n line1[olay+1]=line2[olay+1]\n \n \n \n \n \n \n \n \n \n \n if(pair==0):\n for lonn in xrange(len(line3)):\n line1[lonn]=line3[lonn]\n return line1\n \n \n \n for xinn in xrange(len(line1)):\n for iota in xrange(len(line1)-1):\n if(line1[iota]==0):\n if((line1[iota+1])>0):\n temp=line1[iota];\n line1[iota]=line1[iota+1];\n line1[iota+1]=temp\n \n xinn=xinn+1\n \n return line1", "def switchColumn(data_file, column1, column2):\n\tdata = []\n\tfor dataLine in readData(data_file):\n\t\ttmp = dataLine[column1-1]\n\t\tdataLine[column1-1] = dataLine[column2-1]\n\t\tdataLine[column2-1] = tmp\n\t\tdata.append(dataLine)\n\twriteData(data_file, data)", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def fillna_negtive1(df, target=None):\n if not target:\n target = ['price', 'image_top_1']\n for col in target:\n df[col] = df[col].fillna(-1)\n return None", "def _exch(self, ix_1, ix_2):\n tmp = self._vals[ix_1]\n self._vals[ix_1] = self._vals[ix_2]\n self._vals[ix_2] = tmp", "def addColumnValues(self, column):\n nr1 = self.data.shape[1]\n nr = len(column)\n if nr1 == 0:\n # case 1: empty table\n if nr == 0:\n # case 1a: we're just adding a name\n self.data = numpy.reshape(self.data, (1, 0))\n pass\n else:\n # case 1b: we're adding a column of values\n self.data = numpy.reshape(numpy.array(column), (1, nr))\n pass\n pass\n else:\n # case 2: non-empty table\n if nr1 > 0 and nr != nr1:\n raise Exception(\"New column must have the same length as existing ones %s %s\"%(nr1,nr))\n new_column = numpy.reshape(numpy.array(column), (1, nr))\n self.data = numpy.concatenate((self.data, new_column))\n pass\n return", "def _merge_row(self, row1, row2):\n\n duprow = list(row1)\n duprow.extend(list(row2))\n row1.clear()\n overlap_map = {}\n\n for body, overlap in duprow:\n if body not in overlap_map:\n overlap_map[body] = 0\n overlap_map[body] += overlap\n\n for body, overlap in overlap_map.items():\n row1.add((body, overlap))", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def assemble_col(c1, c2):\n c1.extend(c2)\n return c1", "def fill_row(row, x):\n row.append(x)\n return row", "def merge(line):\r\n setup_line = line[:]\r\n new_line = []\r\n for num in setup_line:\r\n if num == 0:\r\n setup_line.remove(0)\r\n setup_line.append(0)\r\n \r\n setup_line.append(0)\r\n setup_line.append(0)\r\n setup_line.append(0)\r\n setup_line.append(0)\r\n \r\n for itr in range(len(line)):\r\n if setup_line[0] == setup_line[1] and setup_line[0] != 0:\r\n new_line.append(setup_line[0] * 2)\r\n setup_line.remove(setup_line[0])\r\n setup_line.remove(setup_line[0])\r\n else:\r\n new_line.append(setup_line[0])\r\n setup_line.remove(setup_line[0])\r\n new_line.append(0)\r\n \r\n for itr in range(len(new_line)):\r\n for num in new_line[len(line):]:\r\n if num == 0:\r\n new_line.remove(0)\r\n \r\n return new_line", "def merge(line):\n lst = [0] * len(line) # we start with a 0-filled list.\n pos = 0 # index position in the new list\n pvl = 0 # we keep the previous value\n for val in line:\n if val: # we only care about the non zero values.\n if not pvl: # this tile is empty\n lst[pos] = val # let's fill with val\n pvl = val\n elif pvl - val: # different non zero values?\n pos += 1\n lst[pos] = val # tiles don't merge\n pvl = val\n else: # same values!\n lst[pos] <<= 1 # it merges!\n pos += 1\n pvl = 0 # next value is 0\n return lst", "def merge(line):\n # replace with your code\n result = []\n for index in range(len(line)):\n result.append(0)\n result = shift_down(line, result)\n for index in range(len(result) - 1):\n if result[index] == result[index + 1]:\n result[index] *= 2\n result[index + 1] = 0\n result = shift_down(result, result)\n return result", "def merge_right(row):\r\n row1 = reverse(row)\r\n row2 = add_tiles(row1)\r\n row3 = reverse(row2)\r\n row = row3\r\n\r\n return row", "def merge(line):\r\n origin_len = len(line)\r\n new_line = list(line)\r\n empty_space = 0\r\n # remove zero\r\n while empty_space in new_line:\r\n new_line.remove(0)\r\n # merge\r\n tile_cursor = 0\r\n for dummy_count in range(len(new_line) - 1):\r\n if tile_cursor >= (len(new_line) - 1):\r\n break\r\n elif new_line[tile_cursor] == new_line[tile_cursor + 1]:\r\n new_line[tile_cursor] = 2 * new_line[tile_cursor]\r\n new_line[tile_cursor + 1] = 0\r\n tile_cursor = tile_cursor + 2\r\n else:\r\n tile_cursor += 1\r\n \r\n #remove zero\r\n while empty_space in new_line:\r\n new_line.remove(0)\r\n list_zero = [0] * (origin_len - len(new_line))\r\n new_line.extend(list_zero)\r\n \r\n return new_line", "def fill_league_currency(self, df, latest_currency_list):\n league_currency_list = [currency[0] for currency in df.columns]\n for lastest_currency in latest_currency_list:\n if lastest_currency not in league_currency_list:\n df[lastest_currency, df.columns[0][1]] = np.nan\n df = df.sort_index(axis=1)\n return df", "def merge(line):\n # create a list of non zero values from input\n input_size = len(line)\n line = [dummy_value for dummy_value in line if dummy_value > 0]\n \n # create an output list of same length as input with zero values\n line2 = [0] * input_size\n \n #update the output list with the non zero input list based on certain conditions\n line2[0:len(line)] = line\n \n pos = [dummy_no for dummy_no in range(0, len(line2))]\n \n for jos in pos[0:input_size -1]:\n if line2[jos] == line2[pos[jos+1]]:\n line2[jos] = line2[jos] + line2[pos[jos+1]]\n line2[jos+1] = 0\n \n # repeat last two steps above\n # create an output list of same length as input with zero values\n line2 = [dummy_val for dummy_val in line2 if dummy_val > 0]\n \n # create an output list of same length as input with zero values\n line3 = [0] * input_size\n \n #update the output list with the non zero input list \n line3[0:len(line2)] = line2\n \n return line3", "def fill_missing_admission_type(df):\n for admit_type in df[\"admission_type\"].unique():\n type_facilities = df[df[\"admission_type\"] == admit_type][\"facility\"].unique()\n\n df[\"admission_type\"] = np.where(\n (df[\"admission_type\"].isnull() & df[\"facility\"].isin(type_facilities)),\n admit_type,\n df[\"admission_type\"],\n )\n\n return df" ]
[ "0.6066896", "0.5588243", "0.5520393", "0.5153865", "0.5142474", "0.5100762", "0.50284475", "0.50032073", "0.4990765", "0.4952544", "0.49398243", "0.49170038", "0.4903504", "0.4887256", "0.48762384", "0.48469424", "0.48462567", "0.48410118", "0.47721502", "0.47698507", "0.47568175", "0.4755987", "0.47121626", "0.46946415", "0.46859962", "0.46799126", "0.4679654", "0.46685728", "0.46435165", "0.4633871" ]
0.57042193
1
Returns true if player has 3 of spades in their hand.
def has_3_spades(self): if Card('3', 'spades') in self.hand: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_three_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 3:\n self.rank_per_hand['2'] = \"three of a kind\"\n return True\n return False", "def is_three_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 3:\n return (True, cards[c])\n return None", "def is_three_of_a_kind(hand):\n\tis_a_three_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tis_a_three_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_three_of_a_kind == True:\n\t\tif hand[j] == 3 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_three_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def has_pair(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n self.rank_per_hand['0'] = \"pair\"\n return True\n return False", "def is_3flush(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n suit1, suit2 = [card.suit for card in holecards]\n hand = tuple(chain(holecards, flop))\n suit_counts = Counter([card.suit for card in hand])\n\n for suit in suit_counts:\n if suit_counts[suit] == 3:\n if required_holecards == 2 and (suit1 == suit2 == suit):\n return True\n elif required_holecards == 1:\n if (suit1 == suit or suit2 == suit):\n return True\n elif required_holecards == 0:\n return True\n return False", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def is_full_house(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] != 0 and count[c] != 2 and count[c] != 3:\n return None\n triple = 0\n for k in count:\n if count[k] == 3:\n triple = cards[k]\n return (True, triple)", "def has_four_of_a_kind(self):\n self.suit_hist()\n for val in self.ranks.values():\n if val >= 4:\n self.rank_per_hand['6'] = \"four of a kind\"\n return True\n return False", "def has_twopair(self):\n count = 0\n self.suit_hist()\n for val in self.ranks.values():\n if val == 2:\n count += 1\n if count >= 2:\n self.rank_per_hand['1'] = \"two pair\"\n return True\n return False", "def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )", "def has_cards(self):\n return self.hand.len() > 0", "def is_3straight(holecards, flop, required_holecards=2):\n assert 0 <= required_holecards <= 2\n rank1, rank2 = sorted_numerical_ranks(holecards)\n hand = tuple(chain(holecards, flop))\n\n for subseq in rank_subsequences(hand):\n x, y, z = subseq\n if x == y-1 == z-2:\n if x == 1:\n # Special case for Ace playing low, to allow\n # for the `rank in subseq` check to work\n subseq.append(14)\n if required_holecards == 2:\n if rank1 in subseq and rank2 in subseq:\n return True\n elif required_holecards == 1:\n if rank1 in subseq or rank2 in subseq:\n return True\n elif required_holecards == 0:\n return True\n return False", "def is_soft_hand(self):\n is_soft = False\n for i in self.cards:\n if i.value == 'ACE':\n is_soft = True\n\n return is_soft", "def is_three_channeled(value):\n return len(value) == 3", "def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False", "def is_four_of_a_kind(hand):\n count = {c:0 for c in cards.keys()}\n for card in hand:\n count[card[0]] += 1\n for c in count:\n if count[c] == 4:\n return (True, cards[c])\n return None", "def flush(hand):\n return len(set([suit for value, suit in hand])) == 1", "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def is_four_of_a_kind(hand):\n\tis_a_four_of_a_kind = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 4:\n\t\t\tis_a_four_of_a_kind = True\n\t\ti += 1 \n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_four_of_a_kind == True:\n\t\tif hand[j] == 4 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_four_of_a_kind:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True", "def is_full_house(hand):\n\tis_a_full_house = False\n\tnum_three_kind = 0\n\tnum_pair = 0\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 3:\n\t\t\tnum_three_kind += 1\n\t\telif hand[i] == 2:\n\t\t\tnum_pair += 1\n\t\ti += 1\n\tif num_three_kind ==1 and num_pair == 1:\n\t\tis_a_full_house = True\n\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_full_house == True:\n\t\tif (hand[j] == 2 or hand[j] == 3) and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_full_house:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def is_game_win(self):\n return not self.deck and not self.hand", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def is_straight(hand):\n # same suite\n suite = hand[0][1]\n vals = []\n for c in hand:\n vals.append(cards[c[0]])\n # check if vals are consecutive or not\n if is_contiguous(vals):\n return True\n else:\n return False", "def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def make_card_wish(self, symbol, player):\n if player == self.current_player:\n if symbol in \"s c h d\":\n self.wait_for_card_wish = False\n self.card_wished = symbol\n self.choose_next_player()\n return True\n return False", "def is_royal_flush(hand):\n\n # same suit\n suite = hand[0][1]\n count = {c:0 for c in cards.keys()}\n for c in hand:\n if suite != c[1]:\n return False\n count[c[0]] += 1\n # all in same suit\n for c in 'T J Q K A'.split():\n if count[c] != 1:\n return False\n return True", "def is_straight(hand):\n\ti = 0\n\twhile i < 8:\n\t\tif hand[i] == 1 and hand[i+1] == 1 and hand[i+2] == 1 and hand[i+3] == 1 and hand[i+4] == 1:\n\t\t\treturn True, i + 4\n\t\ti += 1\n\treturn False", "def has_fullhouse(self):\n if self.has_pair() & self.has_three_of_a_kind():\n self.rank_per_hand['5'] = \"full house\"\n return True\n return False" ]
[ "0.72666436", "0.7080395", "0.70760804", "0.6595992", "0.65198547", "0.6504807", "0.6470795", "0.6453197", "0.63943964", "0.6391937", "0.6380108", "0.63546914", "0.63495284", "0.63185066", "0.62931806", "0.6260325", "0.61777973", "0.61756945", "0.61107355", "0.6093892", "0.60865194", "0.6032814", "0.60217035", "0.60090405", "0.5986928", "0.5974971", "0.59615004", "0.5937205", "0.5901767", "0.5898353" ]
0.89362204
0
Return all components that match the given type and filter
def queryComponent(type=None, filter=None, all=0):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_components(self, filter_type=None):\n\n if filter_type is None:\n out = self.components\n elif isinstance(filter_type, str):\n out = {}\n cls = co.str_to_comp(filter_type)\n for comp in self.get_components():\n if isinstance(self.components[comp], cls):\n out[comp] = self.components[comp]\n else:\n out = {}\n for comp in self.get_components():\n if isinstance(self.components[comp], filter_type):\n out[comp] = self.components[comp]\n\n return out", "def type_filter(self, items, types=None):", "def _filter(self, location, component=\"Hosting\", compute_type=None):\n filters = [\n [\"TERM_MATCH\", \"location\", location],\n [\"TERM_MATCH\", \"productFamily\", \"ML Instance\"],\n [\"TERM_MATCH\", \"currentGeneration\", \"Yes\"],\n [\"TERM_MATCH\", \"component\", component]\n ]\n if compute_type:\n filters.append([\"TERM_MATCH\", \"computeType\", compute_type])\n return [{\n 'Type': x[0],\n 'Field': x[1],\n 'Value': x[2]\n } for x in filters]", "def search_items(self, filter_text, type_filter=None):\n output = []\n\n for item in self._all_items:\n\n if type_filter:\n if item.match(filter_text) and item.resource_type == type_filter:\n output.append(item)\n else:\n if item.match(filter_text):\n output.append(item)\n\n return output", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return [c for c in self._components if isinstance(c, component_type)]", "def filter(self, filters):", "def get_components(self,filt):\n comps = [self.components[i] for i in xrange(len(self.header)) if filt == self.header[i]]\n return comps", "def get_components_by_type(\n self, component_type: Union[type, Tuple[type, ...]]\n ) -> List[Any]:\n return self._manager.get_components_by_type(component_type)", "def getFilter(self, type: int) -> int:\n ...", "def by_type(cls, typ='creditcard'):\n return Filter('type', values=(typ,), operator=Filter.OPERATOR['EQUAL'])", "def search_filter(query_params, query):\n if query_params.get('type') is not None:\n query = query.filter(search.c.kind == query_params.get('type'))\n return query", "def type_filter(self, items, types=None):\n if not types:\n return items\n allowed_items = []\n for item in items:\n if item.portal_type not in types:\n continue\n allowed_items.append(item)\n return allowed_items", "def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result", "def all(self, type_filter=None):\n res = []\n if type_filter is None or isinstance(self, type_filter):\n res.append(self)\n for v in self._all_subnodes():\n if isinstance(v, IDLNode):\n res.extend(v.all(type_filter))\n elif isinstance(v, list):\n for item in v:\n if isinstance(item, IDLNode):\n res.extend(item.all(type_filter))\n return res", "def filter_queries_by_nlp_component(\n query_list: ProcessedQueryList, component_type: str, component_name: str\n ):\n\n filtered_queries = []\n filtered_queries_indices = []\n for index, query in enumerate(query_list.processed_queries()):\n if getattr(query, component_type) == component_name:\n filtered_queries_indices.append(index)\n filtered_queries.append(query)\n return filtered_queries_indices, filtered_queries", "def search(self, filtro):\n return [nota for nota in self.notas if nota.match(filtro)]", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def get_filters(self):", "def _getRecords(self, record_type, filters):\n if not filters:\n # Always return a copy for consistency\n return list(self._dump_data[record_type])\n response = self._dump_data[record_type]\n for f in filters:\n response = [r for r in response if f(r)]\n return response", "def filter_geom(geom, _type):\n return list(filter(lambda x: isinstance(x, _type), geom))", "def get_objects(filter_rule=\"**\", obj_type=\"*\"):\n objects = ix.api.OfObjectVector()\n project_root = ix.application.get_factory().get_project()\n ix.application.get_matching_objects(objects, filter_rule, project_root,\n obj_type)\n return objects", "def filter(*args, name: Union[AnyStr, bool]=\"\", type: Union[AnyStr, bool]=\"\", q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def get_type_filters(self, list_type: ListType) -> List[TypeFilter]:\n if hasattr(self, \"json\") and isinstance(self.json, dict):\n type_filters_raw = self.json.get(\"filter\", None)\n\n if type_filters_raw is not None:\n if isinstance(type_filters_raw, str):\n type_filters_raw = loads(type_filters_raw)\n\n if not isinstance(type_filters_raw, list):\n type_filters_raw = [type_filters_raw]\n\n try:\n type_filters: List[TypeFilter] = AvailableTypeFilters.from_string_list(type_filters_raw)\n return type_filters\n except UnknownTypeFilter as e:\n # Import logger here to prevent circular dependency on module import\n message = \"Received unknown type filter: '{0}'\".format(e.unknown_type_filter)\n logger.error(self.request_id, message, exc_info=e)\n raise InvalidUsage(message)\n\n return list_type.to_type_filters()", "def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types", "def test_api_type_filtering(api_client, by_type, by_state):\n response = api_client.get(path='/breweries', params={'by_type': by_type, 'by_state': by_state})\n assert response.json() != []\n assert response.ok", "def get_events(self, type_filter=None):\n\n if type_filter:\n filtered_events = self.__events.get(type_filter, [])\n else:\n filtered_events = [ev for ev_type_list in self.__events.values() for ev in ev_type_list]\n\n return filtered_events", "def fetch(self, compute_type=None):\n has_next_page = True\n next_token = None\n results = []\n while has_next_page:\n params = {\n \"ServiceCode\": self.SERVICE_CODE,\n \"Filters\": self._filter(self.location, compute_type=compute_type)\n }\n if next_token:\n params[\"NextToken\"] = next_token\n response = self.pricing.get_products(**params)\n results += self._format(response)\n next_token = response.get(\"NextToken\")\n has_next_page = next_token is not None\n results = self.filters.apply(results)\n return results", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def search(self, filter: str = None) -> dict:\n r = requests.get(self.url, headers=self.headers)\n\n if filter:\n data = r.json()\n return filter_list(data=data, filter_by=filter)\n\n return r.json()", "def filter(self, *args, **kwargs):" ]
[ "0.7252907", "0.6754801", "0.6730381", "0.66894734", "0.6564939", "0.6288751", "0.6273336", "0.62642753", "0.6115402", "0.5971226", "0.59191287", "0.5848749", "0.58480895", "0.5832433", "0.5759755", "0.5755212", "0.56894547", "0.56826574", "0.5639281", "0.56076664", "0.5592761", "0.55915564", "0.5583345", "0.54409814", "0.5439224", "0.5420898", "0.5412727", "0.5404295", "0.53887784", "0.53765273" ]
0.74276376
0
checkKey is used to check for authentication
def checkKey(self): # TO DO for checking API authentication if self.apikey is None: return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth_publickey(self, username, key):\n return AUTH_FAILED", "def _check_key(self, key):\n raise NotImplementedError", "def api_key_check():\n req_path = request.path\n method_type = request.method\n app.logger.info(\">>> path = {}, method = {}\".format(req_path, method_type))\n\n if not app_props.api_key_check:\n app.logger.debug('>>> api key check closed')\n return None\n\n if req_path in app_props.api_key_white_list:\n app.logger.info('>>> {} in white list, pass'.format(req_path))\n return None\n headers = request.headers\n api_key_from_req = headers.get('x-api-key')\n if not api_key_from_req:\n app.logger.debug('>>> enter api-key error')\n return resp_json(BaseResp.err('no x-api-key header'))\n\n key_obj = Key.query.filter_by(api_key=api_key_from_req).first()\n if key_obj:\n app.logger.debug('>>> consumer_id = {}, secret_key = {}'.format(key_obj.consumer_id, key_obj.secret_key))\n g.consumer_id = key_obj.consumer_id\n g.secret_key = key_obj.secret_key\n return None\n else:\n return resp_json(BaseResp.err('Err api key'))", "def isValidKey(key):\n return True", "def check_auth():", "def check_ssh_key(self):\n return True", "def check_keys(self):", "def check_key(request):\n try:\n access_key = request.session.get('access_key_tw', None)\n if not access_key:\n return False\n except KeyError:\n return False\n return True\n\n\t# User info", "def check_key(cb):\n\n def funcn(*args, **kwargs):\n if 'key' not in kwargs:\n fail(REASON_NO_PASSKEY)\n key = kwargs['key']\n del kwargs['key']\n kwargs['user'] = STORAGE.lookup_user(key)\n if kwargs['user'].is_anonymous:\n fail(REASON_BAD_PASSKEY)\n return cb(*args, **kwargs)\n\n return funcn", "def get_key(self, user, api_key):\n return True", "def remote_verifyKey(self, key, protocol):\r\n if self._authenticated.called:\r\n return Failure(InvalidKey('Only one guess is possible.'))\r\n\r\n if isinstance(protocol, Failure):\r\n self._authenticated.errback(protocol)\r\n else:\r\n if self._key != key:\r\n e = Failure(InvalidKey('Wrong key supplied.'))\r\n self._authenticated.errback(e)\r\n return e\r\n\r\n self._authenticated.callback(protocol)", "def check_key(request):\n\ttry:\n\t\taccess_key = request.session.get('access_key_tw', None)\n\t\tif not access_key:\n\t\t\treturn False\n\texcept KeyError:\n\t\treturn False\n\treturn True", "def get_key(self, user, api_key):\r\n from delicious_cake.models import ApiKey\r\n\r\n try:\r\n ApiKey.objects.get(user=user, key=api_key)\r\n except ApiKey.DoesNotExist:\r\n return self._unauthorized()\r\n\r\n return True", "def test_validate_api_key(app, seed_data, key, result):\n user_id, api_key = seed_data\n if key == 'use-valid-key':\n key = api_key\n with app.app_context():\n assert auth.validate_api_key(user_id, key) == result", "def verify_key(self, providerkey = None):\n h = Https(API_DOMAIN)\n\n data = {'apikey' : self.apikey}\n\n if providerkey is not None:\n data['providerkey'] = providerkey\n\n h.request( \"GET\",\n \"/publicapi/verify\"+ urlencode(data),\n headers=self.headers)\n\n request_status = h.getresponse().status\n\n if request_status != 200:\n raise Exception(\"Invalid API Key %s\" % self.apikey)", "def check_api_key(x_api_key: str = Security(api_key_header_auth)):\n\n if x_api_key != API_KEY:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid API Key\",\n )", "def verify_auth_key(cls, auth_key):\n key = ObjectId(auth_key)\n db = cls.mongo_cli.get_database(collection=\"users\")\n if db.count({\"_id\": key}) > 0:\n return True\n return False", "def check_empty_key(self, key):\n if key is None or key == \"\" or key == self.empty_api_key:\n print(\"ERROR, A KEY IS EMPTY - CHECK YOUR FILE\")\n return False\n return True", "def check_api(submitted_key, users_key):\r\n if users_key != submitted_key:\r\n return False\r\n else:\r\n return True", "def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid", "def checkKeys( ):\n\n if (HMACKey is None) or (AESKey is None):\n loadKeys()\n\n if (int(time.time()) - creationTime) > const.KEY_ROTATION_TIME:\n rotateKeys()", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def test_api_key (key):\n\tdb = getattr(g,'db', None)\n\n\tif isinstance(key, unicode):\n\t\tkey = key.encode('utf-8')\n\n\tqry = \"SELECT apikey FROM api_keys WHERE apikey=%s;\"\n\twith db as cur:\n\t\treturn 0 < cur.execute(qry, (key,))", "def valid_key(self): \n self.so.ValidKey.restype = c_bool\n result = self.so.ValidKey()\n return result", "def test_validate_yubikey(self):\n from_key = self.yk_rnd.from_key(self.yk_public_id, self.yk_key)\n self.assertTrue(pyhsm.yubikey.validate_yubikey_with_aead( \\\n self.hsm, from_key, self.aead.data, self.kh_validate))", "def _check_auth(self, group_id):\n return", "def execute_request(self, request: Request) -> bool:\r\n print(\"Handler is validating key\")\r\n if request.key is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Key is not valid\")\r\n return False", "def check_key(key):\n # Get config\n line = getfromconfig()\n\t\n # Open a new connection\n conn = my.MySQLConnection(line[0], line[1], line[2], line[3])\n set_of_tables = ['A']\n\n # Go through each table\n for table in set_of_tables:\n c = my.radiogaga_db_get(conn, table, {'ukey': key})\n if len(c) > 0:\n return(1)\n\n # End connection and return answer\n conn.end_connection()\n return(0)", "def _key_to_key_verify(self):\n params = {\n 'host': self._redis_host,\n 'port': self._redis_port,\n 'db': self._from_db,\n 'password': self._password\n }\n client = RedisPool(**params)\n proxy_list = client.get_all()\n if proxy_list:\n self._loop.run_until_complete(self.verify(proxy_list))\n self._update(client)", "def verify_request_session_key(self, key, uuid):\n return self.compute_request_session_key(uuid) == key" ]
[ "0.7483722", "0.7159838", "0.7102654", "0.70746124", "0.70519286", "0.69825", "0.69745266", "0.6936464", "0.6857674", "0.6834089", "0.67978585", "0.6719186", "0.661024", "0.65871054", "0.64848256", "0.6452751", "0.6445985", "0.644107", "0.6375559", "0.6362564", "0.6354385", "0.6344735", "0.62626284", "0.6254767", "0.6209742", "0.6201196", "0.6198189", "0.61824095", "0.61750257", "0.6174351" ]
0.76604617
0
make the cosmos and DES meds files
def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid): flist = files.get_cosmos_flist(tileid) cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i') print('making cosmos MEDS:',cosmos_meds) maker = CosmosMEDSMaker( config_path=cosmos_config, catname=catfile, flistname=flist, ) maker.write(cosmos_meds) for band in ['u','g','r','i','z']: band_flist = files.get_des_flist(band) band_meds = files.get_meds_file(run, tileid, 'des',band) print('making DES MEDS:',band_meds) maker = CosmosMEDSMaker( config_path=des_config, catname=cosmos_meds, flistname=band_flist, ) maker.write(band_meds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_data_raw (mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname,',',mdp.tag\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n # write first header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n # write another header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)", "def create_demo_dcm_data(dcm_dir):\n pet_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_osem.nii')\n mr_fname = os.path.join(os.path.dirname(__file__), 'data', 'brainweb_06_t1.nii')\n \n pet, pet_affine = flip_ras_lps(*load_nii_in_ras(pet_fname))\n mr, mr_affine = flip_ras_lps(*load_nii_in_ras(mr_fname))\n\n os.mkdir(dcm_dir)\n write_3d_static_dicom(pet, os.path.join(dcm_dir, 'PT'), pet_affine, modality = 'PT')\n write_3d_static_dicom(mr, os.path.join(dcm_dir, 'MR'), mr_affine, modality = 'MR')", "def writeNMD(filename, modes, atoms, zeros=False):\n\n if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):\n raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '\n 'not {0}'.format(type(modes)))\n if modes.numAtoms() != atoms.numAtoms():\n raise Exception('number of atoms do not match')\n out = openFile(addext(filename, '.nmd'), 'w')\n\n #out.write('#!{0} -e\\n'.format(VMDPATH))\n out.write('nmwiz_load {0}\\n'.format(abspath(filename)))\n name = modes.getTitle()\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = str(atoms)\n name = name.replace(' ', '_').replace('.', '_')\n if not name.replace('_', '').isalnum() or len(name) > 30:\n name = splitext(split(filename)[1])[0]\n out.write('name {0}\\n'.format(name))\n try:\n coords = atoms.getCoords()\n except:\n raise ValueError('coordinates could not be retrieved from atoms')\n if coords is None:\n raise ValueError('atom coordinates are not set')\n\n try:\n data = atoms.getNames()\n if data is not None:\n out.write('atomnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnames()\n if data is not None:\n out.write('resnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getResnums()\n if data is not None:\n out.write('resids ')\n data.tofile(out, ' ')\n out.write('\\n')\n except:\n pass\n try:\n data = atoms.getChids()\n if data is not None:\n out.write('chainids {0}\\n'.format(' '.join(data)))\n except:\n pass\n try:\n data = atoms.getSegnames()\n if data is not None:\n out.write('segnames {0}\\n'.format(' '.join(data)))\n except:\n pass\n\n try:\n data = atoms.getBetas()\n if data is not None:\n out.write('bfactors ')\n data.tofile(out, ' ', '%.2f')\n out.write('\\n')\n except:\n pass\n\n format = '{0:.3f}'.format\n out.write('coordinates ')\n coords.tofile(out, ' ', '%.3f')\n out.write('\\n')\n count = 0\n if isinstance(modes, Vector):\n out.write('mode 1 {0:.2f} '.format(abs(modes)))\n modes.getNormed()._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n else:\n if isinstance(modes, Mode):\n modes = [modes]\n for mode in modes:\n if (mode.getEigval() < ZERO) and not zeros:\n continue\n elif (mode.getEigval() < ZERO) and zeros:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))\n else:\n out.write('mode {0} {1:.2f} '.format(\n mode.getIndex()+1, mode.getVariance()**0.5))\n arr = mode._getArray().tofile(out, ' ', '%.3f')\n out.write('\\n')\n count += 1\n if count == 0:\n LOGGER.warning('No normal mode data was written. '\n 'Given modes might have 0 eigenvalues.')\n out.close()\n return filename", "def make_data_raw_fast(mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def writeDataCards(opt,sigExp,bkgExp,shapesURL):\n\n #create a card per category\n dcList=[]\n for icat in range(len(opt.categs)):\n cat='%s_a%d_%d'%(opt.chTag,opt.xangle,icat)\n dcTxt='%s/shapes-parametric.datacard_%s.dat'%(opt.output,cat)\n dcList.append(dcTxt)\n with open(dcTxt,'w') as dc:\n dc.write('#\\n')\n dc.write('# datacard was automatically generated with generateWorkspace.py\\n')\n dc.write('# the options passed are printed below\\n')\n dc.write('# %s\\n'%opt)\n dc.write('#\\n')\n dc.write('imax *\\n')\n dc.write('jmax *\\n')\n dc.write('kmax *\\n')\n dc.write('-'*50+'\\n')\n dc.write('shapes * * {0} $PROCESS_{1} $PROCESS_$SYSTEMATIC\\n'.format(shapesURL,cat))\n dc.write('shapes data_obs * {0} $PROCESS_{1}\\n'.format(shapesURL,cat))\n dc.write('-'*50+'\\n')\n dc.write('bin %s\\n'%cat)\n dc.write('observation -1\\n')\n dc.write('-'*50+'\\n')\n dc.write('%15s %15s %15s\\n'%('bin',cat,cat))\n dc.write('%15s %15s %15s\\n'%('process','sig','bkg'))\n dc.write('%15s %15s %15s\\n'%('process','0', '1'))\n dc.write('%15s %15s %15s\\n'%('rate','%3.2f'%sigExp[icat], '%3.2f'%bkgExp[icat]))\n dc.write('-'*50+'\\n')\n \n #float the background normalization as well as the signal\n dc.write('mu_bkg{0} rateParam {0} bkg 1\\n'.format(cat))\n\n #uncertainties\n dc.write('lumi %8s %15s %15s\\n'%('lnN','1.027','-'))\n dc.write('%s_sigShape %8s %15s %15s\\n'%(cat,'shape','1','-'))\n dc.write('%s_bkgShape %8s %15s %15s\\n'%(cat,'shape','-','1'))\n dc.write('{0} autoMCStats 0.0 1\\n'.format(cat))\n \n print '\\tshapes available @',shapesURL\n print '\\tgenerated the following datacards',dcList", "def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1", "def Construct3DMolToFile(fileName,writeFile):\r\n # Writing sets of molecules\r\n \r\n\r\n w = Chem.SDWriter(writeFile)\r\n suppl = Chem.SDMolSupplier(fileName)\r\n mols = [x for x in suppl]\r\n for mol in mols:\r\n \t# print(mol.GetProp(\"Solvent\"))\r\n \t# print(mol.GetPropNames)\r\n \tsignal.signal(signal.SIGALRM, handler)\r\n \tsignal.alarm(100)\r\n \ttry:\r\n \t\tmol3d = GetMolFromMol(mol,dimension=3)\r\n \t\tw.write(mol3d)\r\n \texcept Exception:\r\n \t\tmol3d = mol\r\n \t\tw.write(mol3d)\r\n \t\t# print(mol.GetPropsAsDict())\r\n\r\n\r\n w.close()", "def generate_epics_db(self):\n if (self.verbose):\n # Generate digital application related databases and configuration files\n print(\"==================================================\")\n print(\"== Generating EPICS DB and configuration files: ==\")\n print(\"==================================================\")\n \n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n for app in self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n \n self.__write_dig_app_id_confg(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n\n # Add the IOC name environmental variable for the Link Nodes\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n if self.link_nodes[app[\"link_node_name\"]]['type'] == 'Digital':\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"1\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":\"0\"}) # If there are no analog cards, set ID to invalid\n\n has_virtual = False\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n for input in device[\"inputs\"]:\n\n if app[\"virtual\"]:\n has_virtual = True\n if (input[\"bit_position\"]>=32):\n scan = \".2 second\"\n if (input['name'] == 'WDOG'):\n if (\"MPSHEARTBEAT\" in input[\"input_pv\"]):\n scan = \".1 second\"\n channel = input[\"bit_position\"] - 32\n vmacros = { \"P\":input[\"input_pv\"]+'_THR',\n \"R\":input[\"name\"],\n \"N\":self.mps_name.getDeviceInputNameFromId(input[\"db_id\"]),\n \"INPV\":input[\"input_pv\"],\n \"ALSTATE\":str(input[\"alarm_state\"]),\n \"NALSTATE\":str(to_bool(not input[\"alarm_state\"])),\n \"ZSV\":input[\"zero_severity\"],\n \"OSV\":input[\"one_severity\"],\n \"BIT\":\"{:02d}\".format(channel).format,\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"], \n \"GID\":str(app[\"app_id\"]),\n \"SCAN\":scan}\n if (input['name'] == 'WDOG'):\n self.__write_virtual_wdog_db(path=app_path, macros=vmacros)\n else:\n self.__write_virtual_db(path=app_path, macros=vmacros)\n\n\n macros = { \"P\":device_prefix,\n \"R\":input[\"name\"],\n \"BIT\":input[\"bit_position\"],\n \"ZNAM\":input[\"zero_name\"],\n \"ONAM\":input[\"one_name\"] }\n\n if (self.verbose):\n print(\" Digital Input : {}\".format(input[\"name\"]))\n\n if (self.verbose):\n print(\"----------------------------\")\n\n print(\"==================================================\")\n print(\"\")\n\n # Generates analog application related databases and configuration files\n if (self.verbose):\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n for app in self.analog_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n app_prefix = 'MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"])\n if (self.verbose):\n print(\"Application path : {}\".format(app_path))\n print(\"Application prefix : {}\".format(app_prefix))\n\n self.__write_mps_db(path=app_path, macros={\"P\":app_prefix, \"THR_LOADED\":\"0\"})\n self.__write_app_id_config(path=app_path, macros={\"ID\":str(app[\"app_id\"])})\n self.__write_thresholds_off_config(path=app_path)\n\n # Add the IOC name environmental variable for the Link Nodes\n if app[\"analog_link_node\"]:\n self.__write_header_env(path=app_path, macros={\"MPS_LINK_NODE\":app[\"link_node_name\"],\n \"MPS_DB_VERSION\":self.config_version,\n \"DATE\":datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S')})\n\n self.__write_iocinfo_env(path=app_path, macros={\"AREA\":app[\"link_node_area\"].upper(),\n \"LOCATION\":app[\"link_node_location\"].upper(),\n \"LOC_IDX\":app['link_node_location'].upper().replace('MP', ''),\n \"C_IDX\":unicode(app['card_index'])})\n self.__write_prefix_env(path=app_path, macros={\"P\":app_prefix})\n\n spare_channels = range(0,6)\n for device in app[\"devices\"]:\n device_prefix = \"{}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"])\n\n if (self.verbose):\n print(\" Device prefix : {}\".format(device_prefix))\n\n if (device[\"type_name\"] not in self.non_link_node_types):\n macros = { \"P\": app_prefix,\n \"CH\":str(device[\"channel_index\"]),\n \"CH_NAME\":device[\"device_name\"],\n \"CH_PVNAME\":device_prefix,\n \"CH_SPARE\":\"0\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n processing = 0\n ch = device['channel_index']\n if (device[\"type_name\"] == \"CBLM\"):\n processing = 1\n if (device[\"type_name\"] == \"KICK\"):\n processing = 1\n int0 = device['channel_index']*4\n int1 = device['channel_index']*4 + 1\n macros = { \"CH\":format(device['channel_index']),\n \"PROC\":format(processing),\n \"INT0\":format(int0),\n \"INT1\":format(int1)\n }\n self.__write_ana_config(path=app_path, macros=macros)\n spare_channels[device[\"channel_index\"]] = -1\n for fault in device[\"faults\"].values():\n bsa_slot = fault['integrators'][0]*6 + device[\"channel_index\"]\n macros = { \"P\":app_prefix,\n \"R\":'ANA_BSA_DATA_{}'.format(bsa_slot),\n \"P_DEV\":device_prefix,\n \"R_DEV\":self.get_analog_type_name(device[\"type_name\"]),\n \"FAULT\":fault['name'],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"])\n }\n self.__write_analog_db(path=app_path, macros=macros)\n macros = { \"P\":device_prefix,\n \"BAY\":format(device[\"bay_number\"]),\n \"APP\":self.get_app_type_name(device[\"type_name\"]),\n \"FAULT\":fault[\"name\"],\n \"FAULT_INDEX\":self.get_fault_index(device[\"type_name\"], fault[\"name\"], device[\"channel_number\"]),\n \"DESC\":fault[\"description\"],\n \"EGU\":self.get_app_units(device[\"type_name\"],fault[\"name\"]),\n \"SLOPE\":unicode(self.get_slope(device[\"type_name\"])),\n \"OFFSET\":unicode(self.get_offset(device[\"type_name\"]))}\n self.__write_thr_base_db(path=app_path, macros=macros)\n # Generate PV for all possible thresholds, even if not defined in database\n for bit in range(0,8):#fault[\"bit_positions\"]:\n fault_prefix = \"{}_T{}\".format(fault[\"name\"], bit)\n macros[\"BIT_POSITION\"] = str(bit)\n self.__write_thr_db(path=app_path, macros=macros)\n if (self.verbose):\n print(\" Fault prefix : {}\".format(fault_prefix))\n\n\n for ch in spare_channels:\n if ch > -1:\n macros = { \"P\": app_prefix,\n \"CH\":str(ch),\n \"CH_NAME\":\"Spare\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n #\n # Write db information about slots of each link node\n #\n for app in self.analog_apps + self.digital_apps:\n app_path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, app[\"cpu_name\"], app[\"crate_id\"], app[\"slot_number\"])\n link_node_info=self.link_nodes[app[\"link_node_name\"]]\n #print link_node_info\n if not 'exported' in link_node_info:\n for slot in range(2,8):\n if slot in link_node_info['slots']:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": link_node_info['slots'][slot]['type'],\n \"SLOT_PVNAME\": link_node_info['slots'][slot]['pv_base'],\n \"SLOT_SPARE\": \"0\"}\n else:\n macros = { \"P\": app[\"app_prefix\"],\n \"SLOT\": str(slot),\n \"SLOT_NAME\": \"Spare\",\n \"SLOT_PVNAME\": \"Spare\",\n \"SLOT_SPARE\": \"1\"}\n\n self.__write_link_node_slot_info_db(path=app_path, macros=macros)\n\n # Add CH_* PVs for digital-only link nodes. These are added before \n # only if the LN is Mixed or Analog\n if link_node_info['type'] == 'Digital':\n for ch in spare_channels:\n macros = { \"P\": app[\"app_prefix\"],\n \"CH\":str(ch),\n \"CH_NAME\":\"Not Available\",\n \"CH_PVNAME\":\"None\",\n \"CH_SPARE\":\"1\"\n }\n self.__write_link_node_channel_info_db(path=app_path, macros=macros)\n\n link_node_info['exported']=True\n\n #\n # Add Link Node related information\n #\n #for ln_name,ln in self.link_nodes.items():\n # if \"lc1_node_id\" not in ln:\n # continue\n # if \"dig_app_id\" not in ln:\n # continue\n # print ln[\"lc1_node_id\"] + ' ' + ln[\"type\"] + ' ' + ln[\"dig_app_id\"]\n for ln_name,ln in self.link_nodes.items():\n self.__write_lc1_info_config(ln)\n self.__write_link_node_info_db(ln_name, ln)\n\n if (self.verbose):\n print(\"--------------------------\")", "def export_model_description(md: ModelDescription) -> bytes:\n\n # ---------------- write model description -------------------\n\n fmd = ET.Element(\"fmiModelDescription\")\n fmd.set(\"fmiVersion\", \"2.0\")\n fmd.set(\"modelName\", md.modelName)\n fmd.set(\"guid\", md.guid)\n fmd.set(\"author\", md.author)\n fmd.set(\"generationDateAndTime\", md.generationDateAndTime)\n fmd.set(\"variableNamingConvention\", md.variableNamingConvention)\n fmd.set(\"generationTool\", md.generationTool)\n fmd.set(\"description\", md.description)\n\n # CoSimulation\n cs = ET.SubElement(fmd, \"CoSimulation\")\n cs.set(\"modelIdentifier\", md.CoSimulation.modelIdentifier)\n cs.set(\n \"needsExecutionTool\", str(md.CoSimulation.needsExecutionTool).lower(),\n )\n cs.set(\n \"canHandleVariableCommunicationStepSize\",\n str(md.CoSimulation.canHandleVariableCommunicationStepSize).lower(),\n )\n cs.set(\n \"canInterpolateInputs\", str(md.CoSimulation.canInterpolateInputs).lower(),\n )\n\n cs.set(\n \"maxOutputDerivativeOrder\", str(md.CoSimulation.maxOutputDerivativeOrder),\n )\n cs.set(\n \"canRunAsynchronuously\", str(md.CoSimulation.canRunAsynchronuously).lower(),\n )\n cs.set(\n \"canBeInstantiatedOnlyOncePerProcess\",\n str(md.CoSimulation.canBeInstantiatedOnlyOncePerProcess).lower(),\n )\n cs.set(\n \"canNotUseMemoryManagementFunctions\",\n str(md.CoSimulation.canNotUseMemoryManagementFunctions).lower(),\n )\n cs.set(\n \"canGetAndSetFMUstate\", str(md.CoSimulation.canGetAndSetFMUstate).lower(),\n )\n cs.set(\n \"canSerializeFMUstate\", str(md.CoSimulation.canSerializeFMUstate).lower(),\n )\n cs.set(\n \"providesDirectionalDerivative\",\n str(md.CoSimulation.providesDirectionalDerivative).lower(),\n )\n\n # 2.2.4 p.42) Log categories:\n cs = ET.SubElement(fmd, \"LogCategories\")\n for ac in md.logCategories:\n c = ET.SubElement(cs, \"Category\")\n c.set(\"name\", ac)\n\n # 2.2.7 p.47) ModelVariables\n mvs = ET.SubElement(fmd, \"ModelVariables\")\n\n variable_index = 0\n\n for var in md.modelVariables:\n var.variability\n value_reference = str(var.value_reference)\n\n idx_comment = ET.Comment(f'Index of variable = \"{variable_index + 1}\"')\n mvs.append(idx_comment)\n sv = ET.SubElement(mvs, \"ScalarVariable\")\n sv.set(\"name\", var.name)\n sv.set(\"valueReference\", value_reference)\n sv.set(\"variability\", var.variability)\n sv.set(\"causality\", var.causality)\n\n if var.description:\n sv.set(\"description\", var.description)\n\n if var.initial:\n i = var.initial\n sv.set(\"initial\", i)\n\n val = ET.SubElement(sv, var.dataType)\n\n # 2.2.7. p.48) start values\n if var.initial in {\"exact\", \"approx\"} or var.causality == \"input\":\n assert (\n var.start != None\n ), \"a start value must be defined for intial ∈ {exact, approx}\"\n val.set(\"start\", var.start)\n\n variable_index += 1\n\n ms = ET.SubElement(fmd, \"ModelStructure\")\n\n # 2.2.8) For each output we must declare 'Outputs' and 'InitialUnknowns'\n outputs = [\n (idx + 1, o)\n for idx, o in enumerate(md.modelVariables)\n if o.causality == \"output\"\n ]\n\n if outputs:\n os = ET.SubElement(ms, \"Outputs\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n os = ET.SubElement(ms, \"InitialUnknowns\")\n for idx, o in outputs:\n ET.SubElement(os, \"Unknown\", {\"index\": str(idx), \"dependencies\": \"\"})\n\n # FMI requires encoding to be encoded as UTF-8 and contain a header:\n #\n # See 2.2 p.28\n return ET.tostring(fmd, pretty_print=True, encoding=\"utf-8\", xml_declaration=True)", "def make_database(num_files=10):\n for i in range(num_files):\n print('\\n\\n\\nCreating set', str(i), '\\n\\n\\n')\n s_file = 'set' + str(i) + '.hdf5' \n play_dominoes(save_file=s_file)", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def generate_file(material_id):\n apr=get_doc_from_MP(material_id)\n mat_list=generate_matrix(apr)\n formu=POSCAR_title(apr)\n cell_for=generate_cell_formula(apr)\n needed_dos=generate_dos_str(material_id)\n revise_dos=dos_into_string(needed_dos)\n ordered_list=generate_ordered_list(revise_dos)\n my_ordered_elements=generate_ordered_elements(revise_dos,ordered_list)\n my_ordered_numbers=generate_ordered_numbers(revise_dos,ordered_list,cell_for)\n generate_POSCAR(formu,mat_list,my_ordered_elements,my_ordered_numbers,revise_dos)", "def createDataDescriptionTxtFile(pMassFile=[], pMassDcmpFile=[], pMassDcmpSpkesFile=[], pMassBrianFile=[]):\n\n if isinstance(pMassFile,str):\n if os.path.isfile(pMassFile):\n # Load in the data to see what the hell it is\n inputDataFile = open(pMassFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [dataOut, xmin, xmax, vmin, vmax, amin, amax, dt, tmax]\n numbOfTrials = len(dataOut[0]) # This is the number of individual trails stored\n xmin = dataOut[1]\n xmax = dataOut[2]\n vmin = dataOut[3]\n vmax = dataOut[4]\n amin = dataOut[5]\n amax = dataOut[6]\n dt = dataOut[7]\n tmax = dataOut[8]\n descpFName = pMassFile[:-4]+'DESC.txt'\n file = open(descpFName,'w')\n file.write(\"1-D Moving Mass Data Description\\n\")\n file.write(\"\\nData here is generated from the genAndSaveMoving1DMassData() method\\n\")\n file.write(\"\\nFileName... \"+pMassFile)\n file.write(\"\\niterations. \"+str(numbOfTrials))\n file.write(\"\\nxmin....... \"+str(xmin))\n file.write(\"\\nxmax....... \"+str(xmax))\n file.write(\"\\nvmin........\"+str(vmin))\n file.write(\"\\nvmax........\"+str(vmax))\n file.write(\"\\namin........\"+str(amin))\n file.write(\"\\namax........\"+str(amax))\n file.write(\"\\ndt..........\"+str(dt))\n file.write(\"\\ntmax........\"+str(tmax))\n file.close\n if isinstance(pMassDcmpFile, str):\n if os.path.isfile(pMassDcmpFile):\n # Load in teh data to see what it has it there\n inputDataFile = open(pMassDcmpFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [segmentedTrials, gCenters, b, dataFile, xStart, xStop, nGaus, bInitial, dt]\n numbOfTrials = len(dataOut[0])\n originatingDataFile = dataOut[3]\n numbGaus = dataOut[6]\n xStart = dataOut[4]\n xStop = dataOut[5]\n GausVarb = dataOut[2]\n dt = dataOut[8]\n descpFName = pMassDcmpFile[:-4]+\"DESC.txt\"\n file = open(descpFName,'w')\n file.write(\"1-D Moving Mass Decomposed Data Description\\n\")\n file.write(\"\\nData here is generated from the decomposeMoving1DMassData() method\\n\")\n file.write(\"\\nFileName.......\"+pMassDcmpFile)\n file.write(\"\\nInputDataFile..\"+originatingDataFile)\n file.write(\"\\niterations.....\"+str(numbOfTrials))\n file.write(\"\\nNumbOfGauss....\"+str(numbGaus))\n file.write(\"\\nxStart.........\"+str(xStart))\n file.write(\"\\nxStop..........\"+str(xStop))\n file.write(\"\\nGausVarbl_b....\"+str(GausVarb))\n file.write(\"\\ndt.............\"+str(dt))\n file.close()\n if isinstance(pMassDcmpSpkesFile, str):\n if os.path.isfile(pMassDcmpSpkesFile):\n # Load in teh data to see what it has it there\n inputDataFile = open(pMassDcmpSpkesFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [segmentedSpikesList, dataFile, spikeGenType, analgSingnalScaling]\n numbOfTrials = len(dataOut[0])\n originatingDataFile = dataOut[1]\n spikeGenType = dataOut[2]\n analogScl = dataOut[3]\n descpFName = pMassDcmpSpkesFile[:-4]+\"DESC.txt\"\n file = open(descpFName,'w')\n file.write(\"1-D Moving mass Decomposed and Turned to Spikes Data Description\\n\")\n file.write(\"\\nData here is generated from the decompedToSpikes1DMassData\\n\")\n file.write(\"\\nFileName.........\"+pMassDcmpSpkesFile)\n file.write(\"\\nInputDataFile....\"+originatingDataFile)\n file.write(\"\\nIterations.......\"+str(numbOfTrials))\n file.write(\"\\nSpike-Gen-Type...\"+spikeGenType)\n file.write(\"\\nSignal-Scaling...\"+str(analogScl))\n file.close()\n if isinstance(pMassBrianFile, str):\n if os.path.isfile(pMassBrianFile):\n # Load in the data to see what it has in there\n inputDataFile = open(pMassBrianFile, \"rb\")\n dataOut = pickle.load(inputDataFile)\n inputDataFile.close()\n # Now we need to write a description of what the data is! [spikesInBrianForm, dataFile, origDataFile]\n descpFName = pMassBrianFile[:-4]+\"DESC.txt\"\n file = open(descpFName, 'w')\n file.write(\"1-D Moving mass data decomposed, turned into spikes, and then turned into a brain sim. compatible format\\n\")\n file.write(\"\\n Data here is generated from the convertSpikeseToBrainForm()\\n\")\n file.write(\"\\nFilename.............................\"+pMassBrianFile)\n file.write(\"\\nOriginating Decompled Data File .... \"+dataOut[1])\n file.close()", "def handle_store(self, event):\n\n \n mode_prefixes = {'CT Image Storage' : 'CT',\n 'Enhanced CT Image Storage' : 'CTE',\n 'MR Image Storage' : 'MR',\n 'Enhanced MR Image Storage' : 'MRE',\n 'Positron Emission Tomography Image Storage' : 'PT',\n 'RT Plan Storage' : 'RP',\n 'RT Structure Set Storage' : 'RS',\n 'Computed Radiography Image Storage' : 'CR',\n 'Ultrasound Image Storage' : 'US',\n 'Enhanced Ultrasound Image Storage' : 'USE',\n 'X-Ray Angiographic Image Storage' : 'XA',\n 'Enhanced XA Image Storage' : 'XAE',\n 'Nuclear Medicine Image Storage' : 'NM',\n 'Secondary Capture Image Storage' : 'SC'\n }\n\n ds = event.dataset\n # Because pydicom uses deferred reads for its decoding, decoding errors\n # are hidden until encountered by accessing a faulty element\n try:\n sop_class = ds.SOPClassUID\n sop_instance = ds.SOPInstanceUID\n except Exception as exc:\n # Unable to decode dataset\n return 0xC210\n\n try:\n # Get the elements we need\n mode_prefix = mode_prefixes[sop_class.name]\n except KeyError:\n mode_prefix = 'UN'\n\n filename = os.path.join(self.config['output']['directory'],'tmp/{0!s}.dcm'.format(uuid.uuid4()))\n\n # Presentation context\n cx = event.context\n\n meta = Dataset()\n meta.MediaStorageSOPClassUID = sop_class\n meta.MediaStorageSOPInstanceUID = sop_instance\n \n meta.TransferSyntaxUID = cx.transfer_syntax\n \n\n ds.file_meta = meta\n ds.is_little_endian = cx.transfer_syntax.is_little_endian\n ds.is_implicit_VR = cx.transfer_syntax.is_implicit_VR\n\n status_ds = Dataset()\n \n try:\n ds.save_as(filename, write_like_original=False)\n self.file_count += 1\n self.writing_queue.put((filename, ds))\n status_ds.Status = 0x0000 # Success\n except IOError:\n # Failed - Out of Resources - IOError\n status_ds.Status = 0xA700\n except:\n # Failed - Out of Resources - Miscellaneous error\n status_ds.Status = 0xA701\n\n\n return status_ds", "def create_dnz_file(args):\n\n file = open(args.o, 'w')\n\n file.write(\"% ----DATA VARIABLES----\\n\\n\")\n file.write(\"t=\" + str(args.t) + \";\" + \"%number of attributes\\n\")\n file.write(\"k=\" + str(args.k) + \";\" + \"%max length of the support set\\n\")\n file.write(\"n=\" + str(args.n) + \";\" + \"%number of positive instances\\n\")\n file.write(\"m=\" + str(args.m) + \";\" + \"%number of negative instances\\n\")\n file.write(\"c=\" + str(args.c) + \";\" + \"%number of atMostOne Constraints\\n\\n\")\n\n file.write(\"% ----OMEGAS----\\n\\n\")\n\n omega_p = generate_omega_data(args.t, args.n, args.b)\n file.write(\"omegap= \" + omega_to_mz(omega_p) + \"\\n\\n\")\n\n omega_n = generate_disjoint_omega_data(omega_p, args.m, args.b)\n file.write(\"omegan= \" + omega_to_mz(omega_n) + \"\\n\\n\")\n\n file.write(\"% ----CONSTRAINS----\\n\\n\")\n at_most_one = generate_at_most_one(int(args.t/2), args.c, 1, args.t)\n file.write(\"atMostOne=\" + at_most_one_to_mz(at_most_one))", "def build(self):\n self.kwargs.pop('clobber', None)\n\n # Read in mock catalog with assigned photometric redshifts\n # and calculate the line-of-sight displacement between the \n # upweighted galaxy and the photometric redshift of the \n # collided galaxy \n photoz_cat_corr = {\n 'catalog': self.cat_corr['catalog'].copy(), \n 'correction': {'name': 'photoz'}\n }\n dataclass = Data('data', photoz_cat_corr) \n dataclass.read() \n\n cosmo = dataclass.cosmo()\n\n coll = np.where(dataclass.wfc == 0) \n \n dlos_actual = (cosmos.distance.comoving_distance(dataclass.z[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n dlos_photoz = (cosmos.distance.comoving_distance(dataclass.photoz[coll], **cosmo) - \\\n cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']\n\n # each value of d_NN corresponds to a dLOS value \n # in dLOS file \n print self.file_name\n np.savetxt(self.file_name, \n np.c_[dlos_actual, dlos_photoz], \n fmt=['%10.5f', '%10.5f'],\n header='Columns : dLOS, dLOS_photoz'\n ) \n\n return None", "def write_scram_toolfiles(self):\n from string import Template\n\n mkdirp(join_path(self.spec.prefix.etc, 'scram.d'))\n\n values = {}\n values['VER'] = self.spec.version\n values['PFX'] = self.spec.prefix\n\n fname = 'uuid-cms.xml'\n template = Template(\"\"\"<tool name=\"uuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)\n\n fname = 'libuuid.xml'\n template = Template(\"\"\"<tool name=\"libuuid\" version=\"$VER\">\n <lib name=\"uuid\"/>\n <client>\n <environment name=\"LIBUUID_BASE\" default=\"$PFX\"/>\n <environment name=\"LIBDIR\" default=\"$$LIBUUID_BASE/lib\"/>\n <environment name=\"INCLUDE\" default=\"$$LIBUUID_BASE/include\"/>\n </client>\n <runtime name=\"ROOT_INCLUDE_PATH\" value=\"$$INCLUDE\" type=\"path\"/>\n <use name=\"root_cxxdefaults\"/>\n <use name=\"sockets\"/>\n</tool>\"\"\")\n\n contents = template.substitute(values)\n self.write_scram_toolfile(contents, fname)", "def dict2file(dict, filename, foldername):\n if foldername:\n if not os.path.exists(\"../Created_QD/\" + foldername):\n os.makedirs(\"../Created_QD/\" + foldername)\n file = open(\"../Created_QD/\" + foldername + \"/\" + filename + \".xyz\", \"w\")\n else:\n file = open(\"../Created_QD/\" + filename + \".xyz\", \"w\")\n file.write(\" \\n\\n\")\n for atom, values in dict.items():\n file.write(values['element'] + \"\\t\" + str(values['coor'][0]) + \"\\t\\t\" +\n str(values['coor'][1]) + \"\\t\\t\" + str(values['coor'][2]) + \"\\n\")\n file.seek(0)\n file.write(str(len(dict)))\n file.close()\n print(\"\\nQuantum Dot created :)\")", "def createckfk(self, observer, dbname, t0, field1, nfields, mk): \n\n observerint=self.mpc2internal(observer)\n instrumentint=observerint*1000\n\n with open(\"cksetupfile\", \"w\") as f:\n f.write(\"KPL/IK \\nComments describing the keywords and values \\nto follow, as well as any other pertinent \\ninformation.\\n\\\\begindata\\n\")\n f.write(\"LSK_FILE_NAME = '%s'\\n\" %(mk))\n f.write(\"\\n\")\n f.write(\"INTERNAL_FILE_NAME = 'Survey Sim Camera Orientation'\\n\")\n f.write(\"\\n\")\n f.write(\"MAKE_FAKE_SCLK = 'tmpsclk'\\n\")\n f.write(\"CK_TYPE = 3\\n\")\n f.write(\"CK_SEGMENT_ID = 'Instrument Orientation'\\n\")\n f.write(\"INSTRUMENT_ID = %i \\n\" %(instrumentint))\n f.write(\"REFERENCE_FRAME_NAME = 'J2000'\\n\")\n f.write(\"ANGULAR_RATE_PRESENT = 'NO'\\n\")\n f.write(\"\\n\")\n f.write(\"INPUT_DATA_TYPE = 'SPICE QUATERNIONS'\\n\")\n f.write(\"INPUT_TIME_TYPE = 'UTC'\\n\")\n f.write(\"MAXIMUM_VALID_INTERVAL = 60\\n\") \n f.write(\"\\n\")\n f.write(\"PRODUCER_ID = 'Survey Sim, JPL'\\n\")\n f.write(\"\\\\begintext\")\n f.close()\n\n\n self.readfields(dbname,field1,nfields, t0)\n with open(\"ckip\",\"w\") as f:\n\n for i in range(len(self.fieldRA)):\n quat=self.computerotmat(self.fieldRA[i], self.fieldDec[i], self.rotSkyPos[i])\n\n #This helps with duplicate entries. For example enigma_1189 can have same fieldID's under different propID's\n #Issue warning for duplicate time. Have a verbose mode for displaying that (true as default)\n if (self.fieldMJD[i] !=self.fieldMJD[i-1]):\n JD=self.fieldMJD[i]+shared.mjd2jd\n timestring= 'JD'+repr(JD)\n f.write(\"%s %f %f %f %f\\n\" %(timestring,quat[0],quat[1],quat[2],quat[3]))\n f.close()\n try:\n os.system('rm tmp.ck tmpsclk test.ck fakesclk >/dev/null')\n except:\n pass\n os.system('msopck cksetupfile ckip tmp.ck > /dev/null')\n\n os.system('rsync tmpsclk fakesclk > /dev/null')\n os.system('rsync tmp.ck test.ck > /dev/null')\n\n with open(\"tmp.fk\",\"w\") as f:\n f.write(\"\\\\begindata\\n\\n\")\n f.write(\"FRAME_CAMERA_FRAME = %i\\n\" %(instrumentint))\n f.write(\"FRAME_%i_NAME = 'CAMERA_FRAME'\\n\" %(instrumentint))\n f.write(\"FRAME_%i_CLASS = 3\\n\" %(instrumentint))\n f.write(\"FRAME_%i_CLASS_ID = %i\\n\" %(instrumentint, instrumentint))\n f.write(\"FRAME_%i_CENTER = %i\\n\" %(instrumentint, observerint))\n f.write(\"CK_%i_SCLK = %i\\n\" %(instrumentint, observerint))\n f.write(\"CK_%i_SPK = %i\\n\\n\" %(instrumentint, observerint))\n f.write(\"\\\\begintext\\n\")\n f.close()\n \n os.system('rsync tmp.fk test.fk')", "def create_sdxmetadata(sdx_dir, output_dir):\n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #segment and store metadata \n #define SDX files to be read\n for root, dirs, files in os.walk(sdx_dir):\n for idx, file in enumerate(files):\n if file.endswith(\".sdx\"):\n \n print(\"Reading File: \" + file)\n \n #define list to store SDX information\n instrument = []\n picks = [] \n phases = []\n \n #scan for pick info\n with open(root + file,\"r\") as f:\n searchlines = f.readlines()\n for i, line in enumerate(searchlines):\n #strip whitespace/end-of-line characters for exact text matching\n line = line.rstrip()\n #find pick info\n if \"pick\" == line:\n for l in searchlines[i:i+16]: \n #print(l)\n #assign pick info/instrument info to variables and store\n instrument_info = searchlines[i+1]\n pick_info = searchlines[i+2]\n phase_info = searchlines[i+9:i+13]\n instrument.append(instrument_info)\n picks.append(pick_info)\n phases.append(phase_info)\n \n #create a .txt file for each seperate event to store pick info\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n\n f = open(output_dir + os.path.splitext(file)[0] + \".txt\",'w')\n #header information...\n f.write('Data read from correpsonding SDX file:' + '\\n')\n f.write(file + '\\n\\n')\n f.write('Instrument/component' + '\\t\\t\\t' + 'Pick information' '\\t\\t\\t' + 'Phase information\\n')\n \n # print both instrument and pick information to the \n # associated event file\n for item in zip(instrument, picks, phases):\n \n #remove preceding whitespace/formatting characters\n item0 = item[0].rstrip()\n item1 = item[1].rstrip()\n item2 = list(map(str.strip, item[2]))\n \n #remove associated list formatting\n item2 = (\", \".join( str(e) for e in item2))\n\n #print...\n #format | instrument info | pick info | phase info\n f.write(\"%s\\t\\t%s\\t\\t%s\\n\" % (item0,item1,item2))\n \n f.close()", "def dump_to_disk(self, prefix):\n\n f = open(prefix + rpki.sundial.now().isoformat() + \"Z.cms\", \"wb\")\n f.write(self.get_DER())\n f.close()", "def writeCADFile(self, filename):\n valid_filetypes = [\"brep\", \"bstl\", \"egads\", \"egg\", \"iges\", \"igs\", \"sens\", \"step\", \"stl\", \"stp\", \"tess\", \"grid\"]\n file_extension = filename.split(\".\")[-1]\n if file_extension.lower() not in valid_filetypes:\n raise OSError(\n \"CAD filename \"\n + filename\n + \" must have a valid exension. \"\n + \"Consult the EngineeringSketchPad docs for the DUMP function\"\n )\n if self.comm.rank == 0:\n modelCopy = self.espModel.Copy()\n n_branches, _, _ = modelCopy.Info()\n modelCopy.NewBrch(\n n_branches, modelCopy.GetCode(\"dump\"), \"<none>\", 0, filename, \"0\", \"0\", \"0\", \"\", \"\", \"\", \"\", \"\"\n )\n modelCopy.Build(0, 0)", "def _generate_metadata_kind(filename, items, affidavit=None):\n store = appstream.Store('lvfs')\n for item in items:\n\n # add each component\n for md in item.mds:\n component = appstream.Component()\n component.id = md.cid\n component.kind = 'firmware'\n component.name = md.name\n component.summary = md.summary\n component.description = md.description\n if md.url_homepage:\n component.urls['homepage'] = md.url_homepage\n component.metadata_license = md.metadata_license\n component.project_license = md.project_license\n component.developer_name = md.developer_name\n\n # add provide\n for guid in md.guids:\n prov = appstream.Provide()\n prov.kind = 'firmware-flashed'\n prov.value = guid\n component.add_provide(prov)\n\n # add release\n if md.version:\n rel = appstream.Release()\n rel.version = md.version\n rel.description = md.release_description\n if md.release_timestamp:\n rel.timestamp = md.release_timestamp\n rel.checksums = []\n rel.location = app.config['FIRMWARE_BASEURL'] + item.filename\n rel.size_installed = md.release_installed_size\n rel.size_download = md.release_download_size\n rel.urgency = md.release_urgency\n component.add_release(rel)\n\n # add container checksum\n if md.checksum_container:\n csum = appstream.Checksum()\n csum.target = 'container'\n csum.value = md.checksum_container\n csum.filename = item.filename\n rel.add_checksum(csum)\n\n # add content checksum\n if md.checksum_contents:\n csum = appstream.Checksum()\n csum.target = 'content'\n csum.value = md.checksum_contents\n csum.filename = md.filename_contents\n rel.add_checksum(csum)\n\n # add screenshot\n if md.screenshot_caption:\n ss = appstream.Screenshot()\n ss.caption = md.screenshot_caption\n if md.screenshot_url:\n im = appstream.Image()\n im.url = md.screenshot_url\n ss.add_image(im)\n component.add_screenshot(ss)\n\n # add requires for each allowed vendor_ids\n group = db.groups.get_item(item.group_id)\n if group.vendor_ids:\n req = appstream.Require()\n req.kind = 'firmware'\n req.value = 'vendor-id'\n if len(group.vendor_ids) == 1:\n req.compare = 'eq'\n else:\n req.compare = 'regex'\n req.version = '|'.join(group.vendor_ids)\n component.add_require(req)\n\n # add manual firmware or fwupd version requires\n for req_txt in md.requirements:\n split = req_txt.split('/', 4)\n req = appstream.Require()\n req.kind = split[0]\n req.value = split[1]\n req.compare = split[2]\n req.version = split[3]\n component.add_require(req)\n\n # add component\n store.add(component)\n\n # dump to file\n download_dir = app.config['DOWNLOAD_DIR']\n if not os.path.exists(download_dir):\n os.mkdir(download_dir)\n filename = os.path.join(download_dir, filename)\n store.to_file(filename)\n\n # upload to the CDN\n blob = open(filename, 'rb').read()\n _upload_to_cdn(filename, blob)\n\n # generate and upload the detached signature\n if affidavit:\n blob_asc = affidavit.create(blob)\n _upload_to_cdn(filename + '.asc', blob_asc)", "def prepare_dataset(sdffile, dest=None, overwrite=False):\n root, name = op.split(sdffile)\n name = op.splitext(name)[0]\n\n if not dest: dest = root\n\n dest_sdf = op.join(dest, name + '-prepared.sdf')\n master_table = op.join(dest, name + '-master.csv')\n sali_table = op.join(dest, name + '-saliviewer.csv')\n\n if op.exists(dest_sdf) and not overwrite:\n print '%s is already there and not overwriting requested' % dest_sdf\n else:\n print 'Reading %s' % sdffile\n mols = list(pybel.readfile('sdf', sdffile))\n\n print '\\tCreating dataset root: %s' % dest\n if not op.exists(dest):\n os.makedirs(dest)\n\n print '\\tRenaming the compounds to keep track of the provenance'\n rename_mols_by_index(mols, name + '-')\n\n print '\\tGenerating conformations'\n for mol in mols:\n if not any(name in mol.title for name in ('train-3988', 'train-4205')):\n try:\n print 'Conformation for %s' % mol.title\n mol.make3D()\n except Exception:\n print 'Error computing a 3D conformation for %s' % mol.title\n\n print '\\tSaving compounds'\n save_mols(mols, dest_sdf)\n\n print '\\tCreating \\\"master\\\" table: %s' % master_table\n create_master_table(dest_sdf, master_table, fields=['Activity'])\n\n print '\\tCreating \\\"saliviewer\\\" table: %s' % sali_table\n create_saliviewer_input(master_table, sali_table)\n\n return dest_sdf, master_table", "def _build_meds_layout(self):\n\n\n nim = self.image_info.size\n nobj = self.obj_data.size\n\n trim_to_coadd = self.get('trim_to_coadd',False)\n if trim_to_coadd:\n print(' trimming to coadd')\n coadd_wcs, coadd_pos, coadd_bnds, coadd_q = \\\n self._get_pos_and_bounds(self.obj_data, 0)\n in_bnds = coadd_bnds.contains_points(coadd_pos['zrow'], coadd_pos['zcol'])\n w_in_bnds, = np.where(in_bnds == True)\n assert w_in_bnds.size > 0,\"none found in coadd\"\n\n w_in_bnds = coadd_q[w_in_bnds]\n self.obj_data = self.obj_data[w_in_bnds]\n\n self._do_psf_setup()\n\n # box sizes are even\n half_box_size = self.obj_data['box_size']//2\n\n for file_id in range(nim):\n\n wcs, pos, bnds, q = self._get_pos_and_bounds(self.obj_data, file_id)\n\n # do the test\n in_bnds = bnds.contains_points(pos['zrow'], pos['zcol'])\n q_rc, = np.where(in_bnds == True)\n print(' second cut: %6d of %6d objects' % (len(q_rc),len(q)))\n\n # now make sure everything is there\n if self['check_in_first_image']:\n if file_id == 0 and len(self.obj_data['ra']) != len(q_rc):\n raise MEDSCreationError('Not all objects were found in first image for '\n 'MEDS making (which is the coadd/detection '\n 'image by convention).')\n # compose them\n q = q[q_rc]\n\n # fill in the object_data structure\n\n # note q_rc since pos was created using obj_data[q]\n qrow = pos['zrow'][q_rc]\n qcol = pos['zcol'][q_rc]\n\n icut = self.obj_data['ncutout'][q]\n self.obj_data['file_id'][q,icut] = file_id\n self.obj_data['orig_row'][q,icut] = qrow\n self.obj_data['orig_col'][q,icut] = qcol\n\n # this results in the object center being close to\n # the natural center (dim-1.)/2.\n ostart_row = qrow.astype('i4') - half_box_size[q] + 1\n ostart_col = qcol.astype('i4') - half_box_size[q] + 1\n crow = qrow - ostart_row\n ccol = qcol - ostart_col\n\n self.obj_data['orig_start_row'][q,icut] = ostart_row\n self.obj_data['orig_start_col'][q,icut] = ostart_col\n self.obj_data['cutout_row'][q,icut] = crow\n self.obj_data['cutout_col'][q,icut] = ccol\n\n # do jacobian, in original, not-offset coords\n # note q_rc since pos was created using self.obj_data[q]\n jacob = wcs.get_jacobian(\n x=pos['wcs_col'][q_rc],\n y=pos['wcs_row'][q_rc])\n\n # jacob is a tuple of arrays\n self.obj_data['dudcol'][q,icut] = jacob[0]\n self.obj_data['dudrow'][q,icut] = jacob[1]\n self.obj_data['dvdcol'][q,icut] = jacob[2]\n self.obj_data['dvdrow'][q,icut] = jacob[3]\n\n # increment\n self.obj_data['ncutout'][q] += 1\n\n w,=np.where(self.obj_data['ncutout'] > 0)\n print('%d/%d had ncut > 0' % (w.size, self.obj_data.size))\n #self.obj_data = self.obj_data[w]\n\n self.obj_data = self._make_resized_data(self.obj_data)\n print('setting number field as sequential')\n self.obj_data['number'] = 1+np.arange(self.obj_data.size)\n\n\n self._set_start_rows_and_pixel_count()\n\n if self['survey']=='cosmos':\n self._set_psf_layout_hst()\n else:\n self._set_psf_layout_psfex()", "def export_ctsdg(cfg):\n generator = Generator(\n image_in_channels=config.image_in_channels,\n edge_in_channels=config.edge_in_channels,\n out_channels=config.out_channels\n )\n generator.set_train(False)\n load_checkpoint(cfg.checkpoint_path, generator)\n\n ckpt_path = Path(cfg.checkpoint_path)\n output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix()\n file_format = config.file_format\n\n img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size],\n dtype=mstype.float32)\n edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32)\n mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32)\n\n export(generator, img_dummy, edge_dummy, mask_dummy,\n file_name=output_file_name, file_format=file_format)\n\n print(f'{output_file_name}.mindir exported successfully!', flush=True)", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)" ]
[ "0.637841", "0.6184567", "0.6101035", "0.6063938", "0.59966964", "0.5898186", "0.5831032", "0.5792886", "0.56806254", "0.5673659", "0.5642974", "0.56186104", "0.5612226", "0.5589771", "0.55712795", "0.55688566", "0.55426204", "0.5509481", "0.5476108", "0.5453811", "0.5441293", "0.54330695", "0.5431424", "0.5414131", "0.5385509", "0.53829294", "0.5375447", "0.5328336", "0.53228354", "0.5321947" ]
0.7661663
0
write the cutouts for the specified type
def _write_psf_cutouts_hst(self): print('writing psf cutouts') obj_data=self.obj_data psf_data=self.psf_data nfile=self.image_info.size nobj=obj_data.size cutout_hdu = self.fits['psf'] for iobj in range(nobj): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # HST psf is same for every cutout, in fact ncut should always # be 1 try: psf_im = self.psf_data.get_psf(iobj) except AttributeError: psf_im = None ncut=obj_data['ncutout'][iobj] for icut in range(ncut): if psf_im is None: row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = self.psf_data[file_id] psf_im = p.get_rec(row,col) expected_psf_shape = ( obj_data['psf_row_size'][iobj,icut], obj_data['psf_col_size'][iobj,icut], ) file_id = obj_data['file_id'][iobj, icut] row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] start_row = obj_data['psf_start_row'][iobj, icut] if psf_im.shape != expected_psf_shape: raise ValueError("psf size mismatch, expected %s " "got %s" % (expected_psf_shape, psf_im.shape)) cutout_hdu.write(psf_im, start=start_row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write_moleculetype(top_file: IO, mol_name: str, nrexcl: int = 3):\n top_file.write(\"[ moleculetype ]\\n\")\n top_file.write(\"; Name\\tnrexcl\\n\")\n top_file.write(f\"{mol_name}\\t{nrexcl}\\n\\n\")", "def write(self, out):", "def write_output(self):", "def write(self):", "def write(self):", "def writeOutput(self, output):", "def write_readouts(path, dataset_dict, image_list, datasettype, mask_part,\n do_wt1_signal, do_dach1_signal, do_stereology_pred, do_stereology_gt):\n\n titles = []\n for i in range(len(image_list)):\n image_name = os.path.split(image_list[i])[1]\n titles.append(image_name[:-4])\n\n # Segmentation of only 1 class was applied (e.g. glomerulus or podocytes)\n if len(mask_part) == 1:\n mask_el = mask_part.pop()\n\n if mask_el == \"glomerulus\":\n network_area = \"glomerulus_area\"\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_WT1_signal_in_glom': pd.Series(dataset_dict['mean_WT1_glom_preds']),\n 'var_WT1_signal_in_glom': pd.Series(dataset_dict['var_WT1_glom_preds']),\n 'median_WT1_signal_in_glom': pd.Series(dataset_dict['median_WT1_glom_preds']),\n 'min_WT1_signal_in_glom': pd.Series(dataset_dict['min_WT1_glom_preds']),\n 'max_WT1_signal_in_glom': pd.Series(dataset_dict['max_WT1_glom_preds']),\n 'perc25_WT1_signal_in_glom': pd.Series(dataset_dict['perc25_WT1_glom_preds']),\n 'perc75_WT1_signal_in_glom': pd.Series(dataset_dict['perc75_WT1_glom_preds'])})\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n elif mask_el == \"podocytes\":\n network_count = \"podocyte_count\"\n network_area = \"podocyte_nuclear_area\"\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el]),\n 'mean_DACH1_signal_in_podo': pd.Series(dataset_dict['mean_DACH1_podo_preds']),\n 'var_DACH1_signal_in_podo': pd.Series(dataset_dict['var_DACH1_podo_preds']),\n 'median_DACH1_signal_in_podo': pd.Series(dataset_dict['median_DACH1_podo_preds']),\n 'min_DACH1_signal_in_podo': pd.Series(dataset_dict['min_DACH1_podo_preds']),\n 'max_DACH1_signal_in_podo': pd.Series(dataset_dict['max_DACH1_podo_preds']),\n 'perc25_DACH1_signal_in_podo': pd.Series(dataset_dict['perc25_DACH1_podo_preds']),\n 'perc75_DACH1_signal_in_podo': pd.Series(dataset_dict['perc75_DACH1_podo_preds'])\n })\n else:\n df = pd.DataFrame({'image_name': pd.Series(titles),\n network_count: pd.Series(dataset_dict['count_preds_%s' % mask_el]),\n network_area: pd.Series(dataset_dict['area_preds_%s' % mask_el])})\n\n else:\n raise ValueError('The name of the segmentation is not known:', mask_el)\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_el))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n\n # Segmentation of 2 classes were applied (e.g. glomerulus and podocytes)\n elif len(mask_part) == 2:\n df = pd.DataFrame(\n {'image_name': pd.Series(titles),\n \"glomerulus_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[0]]),\n \"podocyte_count\": pd.Series(dataset_dict['count_preds_%s' % mask_part[1]]),\n \"podocyte_nuclear_area\": pd.Series(dataset_dict['area_preds_%s' % mask_part[1]])})\n\n # Add a column if GET_WT1_SIGNAL_FOR_GLOMERULUS = True\n if do_wt1_signal:\n df['mean_WT1_signal_in_glom'] = dataset_dict['mean_WT1_glom_preds']\n df['var_WT1_signal_in_glom'] = dataset_dict['var_WT1_glom_preds']\n df['median_WT1_signal_in_glom'] = dataset_dict['median_WT1_glom_preds']\n df['min_WT1_signal_in_glom'] = dataset_dict['min_WT1_glom_preds']\n df['max_WT1_signal_in_glom'] = dataset_dict['max_WT1_glom_preds']\n df['perc25_WT1_signal_in_glom'] = dataset_dict['perc25_WT1_glom_preds']\n df['perc75_WT1_signal_in_glom'] = dataset_dict['perc75_WT1_glom_preds']\n\n # Add a column if GET_DACH1_SIGNAL_FOR_PODOCYTES = True\n if do_dach1_signal:\n df['mean_DACH1_signal_in_podo'] = dataset_dict['mean_DACH1_podo_preds']\n df['var_DACH1_signal_in_podo'] = dataset_dict['var_DACH1_podo_preds']\n df['median_DACH1_signal_in_podo'] = dataset_dict['median_DACH1_podo_preds']\n df['min_DACH1_signal_in_podo'] = dataset_dict['min_DACH1_podo_preds']\n df['max_DACH1_signal_in_podo'] = dataset_dict['max_DACH1_podo_preds']\n df['perc25_DACH1_signal_in_podo'] = dataset_dict['perc25_DACH1_podo_preds']\n df['perc75_DACH1_signal_in_podo'] = dataset_dict['perc75_DACH1_podo_preds']\n\n if do_stereology_pred:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='pred')\n # Add it to df\n df['stereology_on_prediction-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_prediction-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_prediction-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n if do_stereology_gt:\n stereo_dict = get_stereology_readouts(dataset_dict, mask_part, titles, mode='gt')\n # Add it to df\n df['stereology_on_groundtruth-glomerular_volume_per_million'] = stereo_dict[\"glomerular_volume_per_million\"]\n df['stereology_on_groundtruth-podocyte_count'] = stereo_dict[\"podocyte_count\"]\n df['stereology_on_groundtruth-podocyte_density'] = stereo_dict[\"podocyte_density\"]\n\n savepath = str(os.path.join(path, datasettype + '_Dataframe_' + mask_part[0] + mask_part[1]))\n df.to_csv(savepath + '.csv')\n df.to_excel(savepath + '.xlsx')\n return", "def writetif(self,outputname,):\n pass", "def write(self, out):\r\n out.write('# {0:<11} {1:<6} {2:<6} {3:<6} {4}\\n'\r\n .format('Time(s)', 'X(mm)', 'Y(mm)', 'Z(um)', 'Tile'))\r\n for i in self: out.write(self.format_pt(i))", "def write_report(report, ftype):\n if ftype == 'text':\n msg = '{} disks have been removed\\n'.format(len(report))\n msg += 'To replace them, run:\\n'\n for device, action_args in report.items():\n args = json.dumps(action_args, separators=(' ', '='))\n args = args.replace('{', '').replace('}', '').replace('\"', '')\n msg += 'juju run-action {} add-disk {} {}'.format(\n hookenv.local_unit(), 'osd-devices=' + device, args)\n else:\n msg = json.dumps(report)\n\n hookenv.action_set({'message': msg})", "def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads", "def to_logchunk(self):\n\t\tdemo_name = os.path.splitext(self.demo_name)[0]\n\t\tto_write = [(\"Killstreak\", value, tick, date) for value, tick, date in self.killstreaks]\n\t\tto_write.extend((\"Bookmark\", value, tick, date) for value, tick, date in self.bookmarks)\n\n\t\tto_write.sort(key = lambda t: t[2])\n\n\t\treturn \"\\n\".join(\n\t\t\tf'[{date}] {type_} {value} (\"{demo_name}\" at {tick})'\n\t\t\tfor type_, value, tick, date in to_write\n\t\t)", "def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1", "def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')", "def dump_cuts_list(self, file_name):\n assert(file_name is not None)\n with open(file_name, 'w') as fd:\n uids = self._cuts.keys()\n uids.sort()\n for cut_uid in uids:\n cut = self._cuts[cut_uid]\n fd.write(cut.get_cost_var() + \" \" + str(cut.get_cost()) + \"\\n\")\n return", "def write_out(c2ptmk, ofn):\n print \"Writing out to [{}]\".format(ofn)\n with codecs.open(ofn, \"w\", \"utf8\") as ofd:\n for co, infos in sorted(c2ptmk.items()):\n ofd.write(u\"{}\\t{}\\t{}\\n\".format(\n co, infos[\"uri\"], \",\".join(\n [unicode(x) for x in infos[\"ptmks\"]])))", "def write_run(run):\n r=Run(run)\n r.write_all()", "def write_analysis(path, dataset_dict, datasettype, mask_part, start_time, supervised=True):\n for mask_el in mask_part:\n if mask_el == 'podocytes':\n filename = datasettype + '_podos.txt'\n filestr = 'podos images'\n elif mask_el == 'glomerulus':\n filename = datasettype + '_gloms.txt'\n filestr = 'gloms images'\n else:\n filename = datasettype + 'unknown.txt'\n filestr = 'unknown type'\n\n write_txt = open(str(os.path.join(path, filename)), \"w\")\n\n if supervised:\n dc_mean = np.sum(np.array(dataset_dict['dice_coeffs_%s' % mask_el])) / len(dataset_dict['dice_coeffs_%s'\n % mask_el])\n dc_min = np.min(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n dc_max = np.max(np.array(dataset_dict['dice_coeffs_%s' % mask_el]))\n object_dc_mean = np.sum(np.array(dataset_dict['object_dc_%s' % mask_el])) / len(dataset_dict['object_dc_%s'\n % mask_el])\n object_dc_min = np.min(np.array(dataset_dict['object_dc_%s' % mask_el]))\n object_dc_max = np.max(np.array(dataset_dict['object_dc_%s' % mask_el]))\n pearson = calculate_pearson(dataset_dict['count_masks_%s' % mask_el], dataset_dict['count_preds_%s'\n % mask_el])\n\n write_txt.write(str(\"Mean dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_min) + '\\n')\n write_txt.write(str(\"Max dice coefficient on pixels of \" + filestr + \" compared to groundtruth: \") +\n str(dc_max) + '\\n')\n write_txt.write(str(\"Pearson correlation coefficient on objects of \" + filestr +\n \" compared to groundtruth: \") + str(pearson) + '\\n')\n write_txt.write(str(\"Mean dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_mean) + '\\n')\n write_txt.write(str(\"Min dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_min) + '\\n')\n write_txt.write(str(\"Max dice coeff on objects of \" + filestr + \" compared to groundtruth: \") +\n str(object_dc_max) + '\\n')\n write_txt.write('\\n')\n\n duration = time.time() - start_time\n duration_std = int(duration / 3600)\n duration_min = int((duration % 3600) / 60)\n duration_sec = int(duration % 60)\n\n write_txt.write(str(\"Test time: \") + str(duration_std) + \"h \" + str(duration_min)\n + \"min \" + str(duration_sec) + 'sec \\n')\n write_txt.close()\n return", "def writeClumptoDump(self,ID):\n clumpxyz = self.clumpcat[ID][:3]\n r2 = (self.disc.xyzh[0]-clumpxyz[0])**2 + (self.disc.xyzh[1]-clumpxyz[1])**2\n members = np.sqrt(r2) < self.annulus #members are all particles within radial annulus\n\n gas = self.disc.itype == 1\n dust = self.disc.itype == 2\n\n dustfrac = self.disc.dustfrac*1e8 #as I originally set dust-to-gas=1e-10\n\n #Calculate temperatures from thermal energies\n k = 1.38064852e-16 #ergs\n mH = 1.6735575e-24 #grams\n gmw = 2.381 #mean mass taken from Phantom\n N = sum(gas*self.disc.massofgas)*self.umass/mH/gmw #number of atoms\n temp = 2.*self.disc.utherm*self.uenerg/3./k/N\n\n\t\tutime = self.utime/(60*60*24*365.25)\n\n\t\t#create arrays of particle masses\n\t\tmass = np.zeros(len(self.disc.xyzh[0,:]))\n\t\tmass[self.disc.itype == 1] = self.disc.massofgas\n\t\tmass[self.disc.itype == 2] = self.disc.massofdust\n\n\t\tclumpdata = zip(self.disc.xyzh[0,members], self.disc.xyzh[1,members], self.disc.xyzh[2,members], \n self.disc.xyzh[3,members], self.disc.density[members], mass[members], \n temp[members], dustfrac[members], self.disc.itype[members])\n\t\tclumpdata = np.asarray(clumpdata)\n\t\theader = (\"time: %s utime (yrs^-1): %s \\n x, y, z, h, density, mass, temp, \" %(str(self.disc.time), str(utime)) +\n\t\t\t \"dustfrac, itype \\n %s, %s, %s, %s, %s, %s, 0.0, 0.0, 0.0 \\n \\n\" %(str(self.udist), str(self.udist),\n\t\t\t \t\t\t\t\t\t\t\t str(self.udist), str(self.udist),\n\t\t\t\t\t\t\t\t\t\t\t str(self.udens), str(self.umass)))\n\t\tnp.savetxt('%s/clumpfiles/clumpdata_%.0f.txt' %(self.wd,self.disc.time), clumpdata, header=header)", "def write_pc_cards(bc_file, bc_class):\n bc_file.write('! Output Control\\n')\n oc = bc_class.output_control\n objects = list(oc.param.output_control_option.get_range())\n if oc.output_control_option == objects[0]:\n bc_file.write('OC {}\\n'.format(oc.oc_time_series_id))\n ofs = oc.output_flow_strings\n if not ofs.empty:\n bc_file.write(ofs.to_csv(sep=' ', na_rep='', index=False, header=False,).replace('\\r\\n', '\\n'))\n\n if oc.print_adaptive_mesh:\n bc_file.write('PC ADP\\n')\n if oc.print_numerical_fish_surrogate:\n bc_file.write('PC ELM\\n')\n if oc.screen_output_residual:\n bc_file.write('SOUT RESID\\n')\n if oc.screen_output_all:\n bc_file.write('SOUT ALL\\n')\n if oc.screen_output_mass_error:\n bc_file.write('SOUT MERROR\\n')\n if oc.screen_output_worst_nonlinear_node:\n bc_file.write('SOUT NLNODE\\n')\n if oc.screen_output_worst_linear_node:\n bc_file.write('SOUT LNODE\\n')\n if oc.file_output_wind:\n bc_file.write('FOUT WIND\\n')\n if oc.file_output_wave:\n bc_file.write('FOUT WAVE\\n')\n if oc.file_output_adapted_grid:\n bc_file.write('FOUT ADAPT GRID\\n')\n if oc.file_output_adapted_solution:\n bc_file.write('FOUT ADAPT SW\\n')\n if oc.file_output_adapted_transport:\n bc_file.write('FOUT ADAPT CON\\n')\n if oc.file_output_sediment:\n bc_file.write('FOUT SED\\n')\n\n bc_file.write('\\n') # blank line after Output Control", "def write_tcv(self):\n suffix = '_'+str(self.shot)+'_'+str(int(self.t*1e3))\n self.write_input(suffix=suffix)", "def write(self, file_name, *, file_type=None, precision=None, write_patch_results=False):\n self.logger.info('Writing KG correlations to %s',file_name)\n precision = self.config.get('precision', 4) if precision is None else precision\n name = 'main' if write_patch_results else None\n with make_writer(file_name, precision, file_type, self.logger) as writer:\n self._write(writer, name, write_patch_results)", "def sitofp(self, typ):", "def write_fits(self, name=None, output_path=None):\n pass", "def write_to(self, stream: StreamWrapper):\n stream.write_int(len(self.moves))\n for element in self.moves:\n element.write_to(stream)\n stream.write_int(len(self.buildings))\n for element in self.buildings:\n element.write_to(stream)\n if self.choose_specialty is None:\n stream.write_bool(False)\n else:\n stream.write_bool(True)\n stream.write_int(self.choose_specialty)", "def write(self, outfile):\n outfile.write(\n '\\t'.join(\n [\n str(i) for i in [\n self.chrom, self.start, self.end, self.name,\n self.count, self.fold_change, self.log10p\n ]\n ]\n )\n )\n outfile.write('\\n')", "def write_output_files(self, file_type, output, expected):\n actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)\n expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)\n\n self._write_file(actual_filename, output)\n self._write_file(expected_filename, expected)", "def export_summary(\n self,\n output_dir=None,\n solution_name=None,\n type=\"Object\",\n geometryType=\"Volume\",\n quantity=\"Temperature\",\n variation=\"\",\n variationlist=[],\n ):\n all_objs = list(self.modeler.oeditor.GetObjectsInGroup(\"Solids\"))\n all_objs_NonModeled = list(self.modeler.oeditor.GetObjectsInGroup(\"Non Model\"))\n all_objs_model = [item for item in all_objs if item not in all_objs_NonModeled]\n arg = []\n self.logger.glb.info(\"Objects lists \" + str(all_objs_model))\n for el in all_objs_model:\n try:\n self.osolution.EditFieldsSummarySetting(\n [\"Calculation:=\", [type, geometryType, el, quantity, \"\", \"Default\"]]\n )\n arg.append(\"Calculation:=\")\n arg.append([type, geometryType, el, quantity, \"\", \"Default\"])\n except Exception as e:\n self.logger.glb.error(\"Object \" + el + \" not added.\")\n self.logger.glb.error(str(e))\n if not output_dir:\n output_dir = self.project_path\n self.osolution.EditFieldsSummarySetting(arg)\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n if not solution_name:\n solution_name = self.nominal_sweep\n if variation:\n for l in variationlist:\n self.osolution.ExportFieldsSummary(\n [\n \"SolutionName:=\",\n solution_name,\n \"DesignVariationKey:=\",\n variation + \"='\" + str(l) + \"'\",\n \"ExportFileName:=\",\n os.path.join(output_dir, \"IPKsummaryReport\" + quantity + \"_\" + str(l) + \".csv\"),\n ]\n )\n else:\n self.osolution.ExportFieldsSummary(\n [\n \"SolutionName:=\",\n solution_name,\n \"DesignVariationKey:=\",\n \"\",\n \"ExportFileName:=\",\n os.path.join(output_dir, \"IPKsummaryReport\" + quantity + \".csv\"),\n ]\n )\n return True", "def write(self):\n pass", "def write(self):\n pass" ]
[ "0.55175173", "0.5354752", "0.53262687", "0.52530247", "0.52530247", "0.5227527", "0.5203227", "0.5147186", "0.5113114", "0.5088805", "0.5083531", "0.5081513", "0.5079073", "0.5070704", "0.5031757", "0.50116146", "0.49944216", "0.4976218", "0.49679434", "0.4965163", "0.49638355", "0.49420896", "0.49406436", "0.49382824", "0.4930062", "0.49161944", "0.49101913", "0.49077344", "0.49075034", "0.49075034" ]
0.5732155
0
set the box sizes and start row for each psf image
def _set_psf_layout_hst(self): print('setting psf layout for HST') obj_data=self.obj_data total_psf_pixels = 0 psf_start_row = 0 for iobj in range(obj_data.size): if (iobj+1) % 100 == 0: print(' %d/%d' % (iobj+1,obj_data.size)) # note assuming same psf for all "epochs" psf_im = self.psf_data.get_psf(iobj) psf_shape = psf_im.shape psf_npix = psf_im.size cen = (np.array(psf_shape)-1.0)/2.0 # we will expand the psfs for icut in range(obj_data['ncutout'][iobj]): obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_psfex(self):\n\n print('setting psf layout for PSFEx')\n\n obj_data=self.obj_data\n psf_data=self.psf_data\n\n total_psf_pixels = 0\n\n #psf_npix = psf_size*psf_size\n\n psf_start_row = 0\n for iobj in range(obj_data.size):\n for icut in range(obj_data['ncutout'][iobj]):\n\n row = obj_data['orig_row'][iobj, icut]\n col = obj_data['orig_col'][iobj, icut]\n file_id = obj_data['file_id'][iobj,icut]\n\n p = psf_data[file_id]\n\n pim = p.get_rec(row,col)\n cen = p.get_center(row,col)\n\n psf_shape = pim.shape\n psf_npix = pim.size\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n\n self.total_psf_pixels = total_psf_pixels", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def build_filler_images(self):", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def _get_boxes(self, idx):\n # Load Image\n path = self.df.hsi_path.iloc[idx]\n im = self._load_im(path)\n\n # Crop out box\n r_box_im = self.df.width.iloc[idx] / im.shape[-1] # Ratio of RGB im box coords to load im width (e.g., r=10 for hsi)\n box = np.array([self.df.ymin.iloc[idx], self.df.ymax.iloc[idx], self.df.xmin.iloc[idx], self.df.xmax.iloc[idx]])\n box = np.around(box / r_box_im).astype(int)\n\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n if np.any(np.array(crop_im.shape) == 0):\n print('[WARNING] Loaded box has zero shape and is sketchily inflated. TODO: skip this box with ID', idx)\n if box[0] == self.df.width.iloc[idx]/r_box_im: box[0] -= 1\n if box[2] == self.df.height.iloc[idx]/r_box_im: box[2] -= 1\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n\n target = {}\n target[\"labels\"] = self.df.class_id.iloc[idx]\n target[\"uid\"] = self.df.uid.iloc[idx]\n \n\n return crop_im, target", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def setBoxsize(length,width,height):\n return length,width,height", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n if len(results[key]) > 0:\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def analyzeImages(path, name_type, box1_size, box2_size, box3_size, box4_size, box5_size):\n \n folders = [f for f in sorted(glob.glob(path + \"/**\"))]\n \n for folder in folders: \n \n # to save this data frame in a csv file\n \n files = [f for f in sorted(glob.glob(folder + \"/**\" + \".jpg\"))]\n \n centroidsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n headsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n \n img_array1 = []\n img_array2 = []\n img_array3 = []\n img_array4 = []\n img_array5 = []\n\n for file in files:\n \n print(file)\n \n centroidsDf[\"frame\"][files.index(file)] = files.index(file)+1\n headsDf[\"frame\"][files.index(file)] = files.index(file)+1\n \n img = cv2.imread(file)\n \n ## FLY 1 ##\n\n box1 = img[box1_size[0]:box1_size[1], box1_size[2]:box1_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box1, file) \n \n # add the centroid and head locations on the image \n box1 = cv2.circle(box1, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box1 = cv2.circle(box1, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array1.append(box1)\n \n # add the positions in the final data frame\n centroidsDf[\"fly1_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly1_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly1_x\"][files.index(file)] = x_head\n headsDf[\"fly1_y\"][files.index(file)] = y_head\n \n ## FLY 2 ##\n \n box2 = img[box2_size[0]:box2_size[1], box2_size[2]:box2_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box2, file)\n \n # add the centroid and head locations on the image \n box2 = cv2.circle(box2, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box2 = cv2.circle(box2, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array2.append(box2)\n \n # add the positions in the final data frame \n centroidsDf[\"fly2_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly2_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly2_x\"][files.index(file)] = x_head\n headsDf[\"fly2_y\"][files.index(file)] = y_head\n \n ## FLY 3 ##\n\n box3 = img[box3_size[0]:box3_size[1], box3_size[2]:box3_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box3, file)\n \n # add the centroid and head locations on the image \n box3 = cv2.circle(box3, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box3 = cv2.circle(box3, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array3.append(box3)\n\n # add the positions in the final data frame\n centroidsDf[\"fly3_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly3_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly3_x\"][files.index(file)] = x_head\n headsDf[\"fly3_y\"][files.index(file)] = y_head\n \n ## FLY 4 ##\n \n box4 = img[box4_size[0]:box4_size[1], box4_size[2]:box4_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box4, file)\n \n # add the centroid and head locations on the image \n box4 = cv2.circle(box4, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box4 = cv2.circle(box4, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array4.append(box4)\n \n # add the positions in the final data frame\n centroidsDf[\"fly4_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly4_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly4_x\"][files.index(file)] = x_head\n headsDf[\"fly4_y\"][files.index(file)] = y_head\n \n ## FLY 5 ##\n \n # the fifth fly is not present in all the genetic strains \n if (box5_size != []):\n box5 = img[box5_size[0]:box5_size[1], box5_size[2]:box5_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box5, file)\n \n # add the centroid and head locations on the image \n box5 = cv2.circle(box5, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box5 = cv2.circle(box5, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array5.append(box5)\n \n # add the positions in the final data frame\n centroidsDf[\"fly5_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly5_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly5_x\"][files.index(file)] = x_head\n headsDf[\"fly5_y\"][files.index(file)] = y_head\n \n # save the data frame in a .csv file, \n # one for the centroids and one for the heads\n #centroidsDf.to_csv(folder+\"/centroids.csv\", index = None, header=True)\n #headsDf.to_csv(folder+\"/heads.csv\", index = None, header=True)\n \n \n ## CREATE THE VIDEOS ##\n \n height, width, _ = box1.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_1_' + str(folders.index(folder)+1)+ '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array1)):\n out.write(img_array1[i])\n out.release()\n \n height, width, _ = box2.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_2_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array2)):\n out.write(img_array2[i])\n out.release()\n \n height, width, _ = box3.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_3_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array3)):\n out.write(img_array3[i])\n out.release()\n \n height, width, _ = box4.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_4_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array4)):\n out.write(img_array4[i])\n out.release()\n \n if (box5_size != []):\n height, width, _ = box5.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_5_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array5)):\n out.write(img_array5[i])\n out.release()", "def _iter_images_rects(self):\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height", "def _assign_sizes(self):", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)" ]
[ "0.6761654", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177", "0.55709535", "0.5566756", "0.5561757", "0.55550766", "0.55439216", "0.55324256", "0.5499715", "0.5493037", "0.5491024" ]
0.62900573
1
set the box sizes and start row for each psf image
def _set_psf_layout_psfex(self): print('setting psf layout for PSFEx') obj_data=self.obj_data psf_data=self.psf_data total_psf_pixels = 0 #psf_npix = psf_size*psf_size psf_start_row = 0 for iobj in range(obj_data.size): for icut in range(obj_data['ncutout'][iobj]): row = obj_data['orig_row'][iobj, icut] col = obj_data['orig_col'][iobj, icut] file_id = obj_data['file_id'][iobj,icut] p = psf_data[file_id] pim = p.get_rec(row,col) cen = p.get_center(row,col) psf_shape = pim.shape psf_npix = pim.size obj_data['psf_row_size'][iobj,icut] = psf_shape[0] obj_data['psf_col_size'][iobj,icut] = psf_shape[1] obj_data['psf_cutout_row'][iobj,icut] = cen[0] obj_data['psf_cutout_col'][iobj,icut] = cen[1] obj_data['psf_start_row'][iobj,icut] = psf_start_row psf_start_row += psf_npix total_psf_pixels += psf_npix self.total_psf_pixels = total_psf_pixels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_psf_layout_hst(self):\n\n print('setting psf layout for HST')\n obj_data=self.obj_data\n\n total_psf_pixels = 0\n psf_start_row = 0\n\n for iobj in range(obj_data.size):\n if (iobj+1) % 100 == 0:\n print(' %d/%d' % (iobj+1,obj_data.size))\n\n # note assuming same psf for all \"epochs\"\n psf_im = self.psf_data.get_psf(iobj)\n\n psf_shape = psf_im.shape\n psf_npix = psf_im.size\n\n cen = (np.array(psf_shape)-1.0)/2.0\n\n # we will expand the psfs\n\n for icut in range(obj_data['ncutout'][iobj]):\n\n obj_data['psf_row_size'][iobj,icut] = psf_shape[0]\n obj_data['psf_col_size'][iobj,icut] = psf_shape[1]\n obj_data['psf_cutout_row'][iobj,icut] = cen[0]\n obj_data['psf_cutout_col'][iobj,icut] = cen[1]\n obj_data['psf_start_row'][iobj,icut] = psf_start_row\n\n psf_start_row += psf_npix\n total_psf_pixels += psf_npix\n\n self.total_psf_pixels = total_psf_pixels", "def generate_boxes(self, img):\r\n return [Box(left, top, img) for (left, top) in self.coords]", "def build_filler_images(self):", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def makeImage(self):\n\n for row in range(self.height):\n self.makeRow(row)\n self.window.update() # display a row of pixels", "def _get_boxes(self, idx):\n # Load Image\n path = self.df.hsi_path.iloc[idx]\n im = self._load_im(path)\n\n # Crop out box\n r_box_im = self.df.width.iloc[idx] / im.shape[-1] # Ratio of RGB im box coords to load im width (e.g., r=10 for hsi)\n box = np.array([self.df.ymin.iloc[idx], self.df.ymax.iloc[idx], self.df.xmin.iloc[idx], self.df.xmax.iloc[idx]])\n box = np.around(box / r_box_im).astype(int)\n\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n if np.any(np.array(crop_im.shape) == 0):\n print('[WARNING] Loaded box has zero shape and is sketchily inflated. TODO: skip this box with ID', idx)\n if box[0] == self.df.width.iloc[idx]/r_box_im: box[0] -= 1\n if box[2] == self.df.height.iloc[idx]/r_box_im: box[2] -= 1\n crop_im = im[:, box[0]:box[1]+1, box[2]:box[3]+1]\n\n target = {}\n target[\"labels\"] = self.df.class_id.iloc[idx]\n target[\"uid\"] = self.df.uid.iloc[idx]\n \n\n return crop_im, target", "def _get_box_sizes(self, image_info, cat):\n\n\n file_id=0\n impath=image_info['image_path'][file_id].strip()\n ext=image_info['image_ext'][file_id]\n wcs_data = fitsio.read_header(impath, ext=ext)\n wcs = eu.wcsutil.WCS(wcs_data)\n\n\n jacob = wcs.get_jacobian(100,100)\n dudcol, dudrow, dvdcol, dvdrow = jacob\n\n det = dvdrow*dudcol - dvdcol*dudrow\n pixel_scale = np.sqrt(abs(det))\n print('found pixel scale:',pixel_scale)\n box_size = cat['box_size_arcsec']/pixel_scale\n\n # clip to range\n box_size.clip(\n min=self['min_box_size'],\n max=self['max_box_size'],\n out=box_size,\n )\n box_size = box_size.astype('i4')\n\n w,=np.where( ( (box_size % 2) != 0 ) )\n if w.size > 0:\n box_size[w] += 1\n\n return box_size", "def setBoxsize(length,width,height):\n return length,width,height", "def draw_boxs(img,boxs,width=3,color=(0,0,255)):\n box_img = copy.deepcopy(img)\n for i in range(boxs.shape[0]):\n # x1,y1,x2,y2=boxs[i]\n x1 = boxs[i][0]\n y1 = boxs[i][1]\n x2 = boxs[i][2]\n y2 = boxs[i][3]\n p1 = (int(round(x1)),int(round(y1)))\n p2 = (int(round(x2)),int(round(y2)))\n cv2.rectangle(box_img, p1, p2, color, width)\n\n return box_img", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def _resize_bboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n tmp_box = np.array(box, dtype=np.float32)\n tmp_box[0::2] *= results['scale_factor'][0]\n tmp_box[1::2] *= results['scale_factor'][1]\n if self.bbox_clip_border:\n tmp_box[0::2] = np.clip(tmp_box[0::2], 0, img_shape[1])\n tmp_box[1::2] = np.clip(tmp_box[1::2], 0, img_shape[0])\n bboxes.append(tmp_box)\n if len(results[key]) > 0:\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def _resize_bboxes(self, results):\n for key in results.get('bbox_fields', []):\n bboxes = results[key] * results['scale_factor']\n if self.bbox_clip_border:\n img_shape = results['img_shape']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n results[key] = bboxes", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def analyzeImages(path, name_type, box1_size, box2_size, box3_size, box4_size, box5_size):\n \n folders = [f for f in sorted(glob.glob(path + \"/**\"))]\n \n for folder in folders: \n \n # to save this data frame in a csv file\n \n files = [f for f in sorted(glob.glob(folder + \"/**\" + \".jpg\"))]\n \n centroidsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n headsDf = pd.DataFrame(np.zeros([len(files), 11]),columns=[\"frame\", \"fly1_x\", \"fly1_y\", \"fly2_x\", \"fly2_y\", \"fly3_x\", \"fly3_y\", \"fly4_x\", \"fly4_y\", \"fly5_x\", \"fly5_y\"]);\n \n img_array1 = []\n img_array2 = []\n img_array3 = []\n img_array4 = []\n img_array5 = []\n\n for file in files:\n \n print(file)\n \n centroidsDf[\"frame\"][files.index(file)] = files.index(file)+1\n headsDf[\"frame\"][files.index(file)] = files.index(file)+1\n \n img = cv2.imread(file)\n \n ## FLY 1 ##\n\n box1 = img[box1_size[0]:box1_size[1], box1_size[2]:box1_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box1, file) \n \n # add the centroid and head locations on the image \n box1 = cv2.circle(box1, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box1 = cv2.circle(box1, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array1.append(box1)\n \n # add the positions in the final data frame\n centroidsDf[\"fly1_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly1_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly1_x\"][files.index(file)] = x_head\n headsDf[\"fly1_y\"][files.index(file)] = y_head\n \n ## FLY 2 ##\n \n box2 = img[box2_size[0]:box2_size[1], box2_size[2]:box2_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box2, file)\n \n # add the centroid and head locations on the image \n box2 = cv2.circle(box2, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box2 = cv2.circle(box2, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array2.append(box2)\n \n # add the positions in the final data frame \n centroidsDf[\"fly2_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly2_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly2_x\"][files.index(file)] = x_head\n headsDf[\"fly2_y\"][files.index(file)] = y_head\n \n ## FLY 3 ##\n\n box3 = img[box3_size[0]:box3_size[1], box3_size[2]:box3_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box3, file)\n \n # add the centroid and head locations on the image \n box3 = cv2.circle(box3, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box3 = cv2.circle(box3, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array3.append(box3)\n\n # add the positions in the final data frame\n centroidsDf[\"fly3_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly3_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly3_x\"][files.index(file)] = x_head\n headsDf[\"fly3_y\"][files.index(file)] = y_head\n \n ## FLY 4 ##\n \n box4 = img[box4_size[0]:box4_size[1], box4_size[2]:box4_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box4, file)\n \n # add the centroid and head locations on the image \n box4 = cv2.circle(box4, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box4 = cv2.circle(box4, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array4.append(box4)\n \n # add the positions in the final data frame\n centroidsDf[\"fly4_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly4_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly4_x\"][files.index(file)] = x_head\n headsDf[\"fly4_y\"][files.index(file)] = y_head\n \n ## FLY 5 ##\n \n # the fifth fly is not present in all the genetic strains \n if (box5_size != []):\n box5 = img[box5_size[0]:box5_size[1], box5_size[2]:box5_size[3]]\n \n # image processing to get the centroid and head positions\n x_centroid, y_centroid, x_head, y_head = findCentroid(box5, file)\n \n # add the centroid and head locations on the image \n box5 = cv2.circle(box5, (x_centroid, y_centroid), 7, (255, 0, 0), -1)\n box5 = cv2.circle(box5, (x_head, y_head), 7, (0, 0, 255), -1)\n img_array5.append(box5)\n \n # add the positions in the final data frame\n centroidsDf[\"fly5_x\"][files.index(file)] = x_centroid\n centroidsDf[\"fly5_y\"][files.index(file)] = y_centroid\n \n headsDf[\"fly5_x\"][files.index(file)] = x_head\n headsDf[\"fly5_y\"][files.index(file)] = y_head\n \n # save the data frame in a .csv file, \n # one for the centroids and one for the heads\n #centroidsDf.to_csv(folder+\"/centroids.csv\", index = None, header=True)\n #headsDf.to_csv(folder+\"/heads.csv\", index = None, header=True)\n \n \n ## CREATE THE VIDEOS ##\n \n height, width, _ = box1.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_1_' + str(folders.index(folder)+1)+ '.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array1)):\n out.write(img_array1[i])\n out.release()\n \n height, width, _ = box2.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_2_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array2)):\n out.write(img_array2[i])\n out.release()\n \n height, width, _ = box3.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_3_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array3)):\n out.write(img_array3[i])\n out.release()\n \n height, width, _ = box4.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_4_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array4)):\n out.write(img_array4[i])\n out.release()\n \n if (box5_size != []):\n height, width, _ = box5.shape\n size = (width,height)\n out = cv2.VideoWriter(folder+name_type+'_5_' + str(folders.index(folder)+1)+ '.mp4' ,cv2.VideoWriter_fourcc(*'DIVX'), 1.5, size)\n for i in range(len(img_array5)):\n out.write(img_array5[i])\n out.release()", "def _iter_images_rects(self):\n image_x = self._margin\n image_y = self._margin\n total_width = self.width - 2 * self._margin\n total_height = self.height - self._texts_height - 2 * self._margin\n\n if len(self._images) == 1:\n image_width = total_width\n image_height = total_height\n elif 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_width = total_width\n image_height = (total_height - (len(self._images) - 1) * self._margin) // len(self._images)\n else:\n image_width = (total_width - (len(self._images) - 1) * self._margin) // len(self._images)\n image_height = total_height\n else:\n image_width = (total_width - self._margin) // 2\n image_height = (total_height - self._margin) // 2\n\n yield image_x, image_y, image_width, image_height\n\n if 2 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if 3 <= len(self._images) < 4:\n if self.is_portrait:\n image_y += image_height + self._margin\n else:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n\n if len(self._images) == 4:\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height\n image_y += image_height + self._margin\n image_x = self._margin\n yield image_x, image_y, image_width, image_height\n image_x += image_width + self._margin\n yield image_x, image_y, image_width, image_height", "def _assign_sizes(self):", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "def __init__(self, path_image, path_imagefile, path_bndboxfile, transform):\r\n # -------------------- DATA ARGUMENT\r\n self.shape = 446\r\n self.hue = 0.1\r\n self.saturation = 1.5\r\n self.exposure = 1.5\r\n self.imagelist = []\r\n self.labellist = []\r\n self.transform = transform\r\n label_dir = os.listdir(path_bndboxfile)\r\n image_dir = os.listdir(path_imagefile)\r\n\r\n # read imagepath\r\n for file in image_dir:\r\n file_name = os.path.join(path_imagefile, file)\r\n with open(file_name) as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n image_name = line.split()[0] + '.JPEG'\r\n image = os.path.join(path_image, image_name)\r\n self.imagelist.append(image)\r\n\r\n # read imagelabel, i.e, (name, xmin, xmax, ymin, ymax)\r\n for file in label_dir:\r\n if file.split('.')[1] == 'xml':\r\n file_name = os.path.join(path_bndboxfile, file)\r\n with open(file_name) as f:\r\n xml_tree = parse(f).documentElement\r\n objects = xml_tree.getElementsByTagName('object')\r\n for object in objects:\r\n label = []\r\n name = object.getElementsByTagName('name')[0]\r\n label.append(name.childNodes[0].data)\r\n bndbox = object.getElementsByTagName('bndbox')[0]\r\n for node in bndbox.childNodes:\r\n if node.nodeType == node.ELEMENT_NODE:\r\n label.append(node.childNodes[0].data)\r\n self.labellist.append(label)\r\n else:\r\n print('Expect files in xml format. but get {}'.format(file.split('.')[1]))", "def _images_and_boxes_preprocessing(self, imgs, boxes):\r\n # Image [0, 255] -> [0, 1].\r\n imgs = imgs.float()\r\n imgs = imgs / 255.0\r\n\r\n height, width = imgs.shape[2], imgs.shape[3]\r\n # The format of boxes is [x1, y1, x2, y2]. The input boxes are in the\r\n # range of [0, 1].\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n if self._split == \"train\":\r\n # Train split\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)\r\n\r\n # Random flip.\r\n imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)\r\n elif self._split == \"val\":\r\n # Val split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n # Apply center crop for val split\r\n imgs, boxes = transform.uniform_crop(\r\n imgs, size=self._crop_size, spatial_idx=1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n elif self._split == \"test\":\r\n # Test split\r\n # Resize short side to crop_size. Non-local and STRG uses 256.\r\n imgs, boxes = transform.random_short_side_scale_jitter(\r\n imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)\r\n else:\r\n raise NotImplementedError(\"{} split not supported yet!\".format(self._split))\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = transform.color_jitter(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = transform.lighting_jitter(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = transform.color_normalization(\r\n imgs,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n\r\n if self._use_bgr:\r\n # Convert image format from RGB to BGR.\r\n # Note that Kinetics pre-training uses RGB!\r\n imgs = imgs[:, [2, 1, 0], ...]\r\n\r\n boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)\r\n\r\n return imgs, boxes", "def _resize_cbboxes(self, results):\n img_shape = results['img_shape']\n for key in results.get('cbbox_fields', []):\n cbboxes = []\n for cbox in results[key]:\n tmp_cbox = np.array(cbox, dtype=np.float32)\n new_tmp_cbox = []\n for ccbox in tmp_cbox:\n ccbox = np.array(ccbox, dtype=np.float32)\n ccbox[0::2] *= results['scale_factor'][0]\n ccbox[1::2] *= results['scale_factor'][1]\n new_tmp_cbox.append(ccbox)\n tmp_cbox = np.array(new_tmp_cbox, dtype=np.float32)\n if self.bbox_clip_border:\n tmp_cbox[:, 0::2] = np.clip(tmp_cbox[:, 0::2], 0, img_shape[1])\n tmp_cbox[:, 1::2] = np.clip(tmp_cbox[:, 1::2], 0, img_shape[0])\n cbboxes.append(tmp_cbox)\n results[key] = cbboxes", "def drawBox (self, left, top, width, height, colour):\r\n w = self.bih_vals [bih_Width]\r\n h = self.bih_vals [bih_Height]\r\n\r\n cols = [left, left + width - 1]\r\n rows = [top, top + height - 1]\r\n \r\n x0 = max ((0,left))\r\n x1 = min ((cols[1]+1, w))\r\n y0 = max ((0,top))\r\n y1 = min ((rows [1]+1, h))\r\n\r\n # rows\r\n\r\n for r in rows:\r\n if r >= 0 and r < h:\r\n row = self.image [r]\r\n for x in range (x0, x1):\r\n row [x] = colour\r\n\r\n # columns\r\n \r\n for y in range (y0, y1):\r\n row = self.image [y]\r\n for c in cols:\r\n if c >= 0 and c < w :\r\n row [c] = colour", "def OnSize(self, event):\r\n\r\n for pos, item in self._items.items():\r\n widget, horizontalalignment, verticalalignment = item.widget, item.horizontalalignment, item.verticalalignment\r\n\r\n rect = self.GetFieldRect(pos)\r\n widgetpos = widget.GetPosition()\r\n widgetsize = widget.GetSize()\r\n\r\n rect = self.GetFieldRect(pos)\r\n\r\n if horizontalalignment == ESB_EXACT_FIT:\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((rect.width-2, rect.height-2))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.width - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y+diffs))\r\n else:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetSize((rect.width-2, widgetsize[1]))\r\n widget.SetPosition((rect.x-1, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_LEFT:\r\n\r\n xpos = rect.x - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_RIGHT:\r\n\r\n xpos = rect.x + rect.width - widgetsize[0] - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-2))\r\n widget.SetPosition((xpos, rect.y-1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n elif horizontalalignment == ESB_ALIGN_CENTER_HORIZONTAL:\r\n\r\n xpos = rect.x + (rect.width - widgetsize[0])/2 - 1\r\n if verticalalignment == ESB_EXACT_FIT:\r\n widget.SetSize((widgetsize[0], rect.height))\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_CENTER_VERTICAL:\r\n if widgetsize[1] < rect.height - 1:\r\n diffs = (rect.height - widgetsize[1])/2\r\n widget.SetPosition((xpos, rect.y+diffs))\r\n else:\r\n widget.SetSize((widgetsize[0], rect.height-1))\r\n widget.SetPosition((xpos, rect.y+1))\r\n elif verticalalignment == ESB_ALIGN_TOP:\r\n widget.SetPosition((xpos, rect.y))\r\n elif verticalalignment == ESB_ALIGN_BOTTOM:\r\n widget.SetPosition((xpos, rect.height-widgetsize[1]))\r\n\r\n if event is not None:\r\n event.Skip()", "def recreate_grid(self):\n\n self.print_numlist = arcade.SpriteList()\n for row in range(ROW_COUNT):\n for column in range(COLUMN_COUNT):\n sprite = arcade.Sprite(\n f\"Numbers/{self.grid[row][column]}.png\", scale=0.2\n )\n x = (MARGIN + WIDTH) * column + MARGIN + WIDTH // 2\n y = (MARGIN + HEIGHT) * row + MARGIN + HEIGHT // 2\n sprite.center_x = x\n sprite.center_y = y\n self.print_numlist.append(sprite)\n # Check to see if all squares have been filled in\n if 0 not in self.grid:\n # if Cameron.Check_for_Completion(self.grid) == True:\n self.done = True", "def _draw_boxes(self, image, boxes, classes, thickness=4):\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i]) - 1\n color = self.COLOR_LIST[class_id]\n cv2.rectangle(image, (left, top), (right, bot), color=color, thickness=thickness)" ]
[ "0.62900573", "0.6085298", "0.6009164", "0.58949316", "0.58541226", "0.58446145", "0.5767578", "0.5712032", "0.5706543", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5674811", "0.5668548", "0.5650026", "0.5615289", "0.5615289", "0.5593645", "0.55755764", "0.55713177", "0.55709535", "0.5566756", "0.5561757", "0.55550766", "0.55439216", "0.55324256", "0.5499715", "0.5493037", "0.5491024" ]
0.6761654
0
read the cosmos catalog
def _read_catalog(self, catname): print('loading catalog:',catname) with fitsio.FITS(catname,lower=True) as fits: #cat = fits[1][100000:110000] if 'object_data' in fits: print('reading from MEDS object data') ext='object_data' else: ext=1 cat = fits[ext][:] # one cut here based on if we matched to the galsim cat w, = np.where( #(cat['mu_class'] < 3) #& #(cat['mask']==0) #& (cat['gscosmos_index'] >= 0) ) print('initial cuts %d/%d %g%%' % (w.size,cat.size,w.size/cat.size*100)) cat = cat[w] return cat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_catalog(catalog):\n with open(catalog, \"r\") as f:\n header = f.readline()\n if header.startswith('#EventID | Time | Latitude | Longitude | Depth/km'):\n catalog = _read_iris(f)\n elif header.startswith('time, latitude, longitude, depth, depthUnits, magnitude'):\n catalog = _read_sod(f)\n else:\n sys.exit(\"Unknown catalog format\")\n return catalog", "def getCatalogs():", "def getCatalog(unique_name):", "def read_catalog():\n categories = session.query(Category).all()\n items = session.query(CatalogItem).order_by(CatalogItem.id.desc())\n quantity = items.count()\n return categories, items, quantity", "def catalog(self) -> str:\n return pulumi.get(self, \"catalog\")", "def get_catalog():\n return jsonify(getCatalog())", "def sample():\n write_drive_catalog_file(\"j:\\\\\", \"SANSA2_1G\", r\"c:\\SANSA2_1G.txt\")\n write_drive_catalog_file(\"k:\\\\\", \"8GB\", r\"c:\\8GB.txt\")\n write_master_catalog_file([r\"c:\\SANSA2_1G.txt\", r\"c:\\8GB.txt\"], r\"c:\\Master_Catalog.txt\")\n entries = read_catalog_file_entries(r\"c:\\Master_Catalog.txt\")\n for entry in entries:\n println(entry_to_line(entry))", "def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)", "def _get_catalog_object(self):\n return self.cluster.catalogd.service.read_debug_webpage(\n \"catalog_object?object_type=TABLE&object_name=functional.alltypes\")", "def get_catalog(self) -> Dict[str, str]:\n return self.catalog", "def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return", "def loadData(catalog):\n return controller.loadData(catalog)", "def loadData(catalog):\n return controller.loadData(catalog)", "def test_api_ucs_get_catalog(self):\n api_data = request(\"get\", \"/sys\")\n self.assertEqual(api_data['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))\n total_elements = 0\n for elementTypes in api_data[\"json\"]:\n for element in api_data[\"json\"][str(elementTypes)]:\n api_data_c = request(\"get\", \"/catalog\",\n query={\"identifier\": element[\"relative_path\"].strip(\"/\")})\n self.assertEqual(api_data_c['status'], 200,\n 'Incorrect HTTP return code, expected 200, got:' +\n str(api_data_c['status']))\n total_elements += 1\n self.assertGreater(total_elements, 0, \"Zero catalog elements found\")\n # TO DO: deeper check on the catalog data", "def read_catalogs():\n if not StateHolder.config_parsed:\n config = YamlUtils.read(file=StateHolder.catalog_config_file, doc=Doc.CATALOGS_CONFIG)\n\n if not type(config) is dict:\n config['default'] = {}\n StateHolder.config = dict(config)\n StateHolder.config_parsed = True", "def loadData(catalog):\r\n controller.loadData(catalog)", "def readDataFromCosmosDB(self):\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaleup_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n # read all the data from cosmos DB with encrypted fields and store in a data frame\n df = spark.read.format(\"com.microsoft.azure.cosmosdb.spark\").options(\n **self.config.get_hash_readconfig()).load()\n\n # iterate over the dataframe and decrypt and replace all fields except the cosmos db system fields strating\n # with \"_\" and the key --> id field since its hashed not encrypted and also not the partition field\n df = df.repartition(160).cache()\n dec_udf = udf(decrypt)\n\n for columns in df.columns:\n if columns.startswith('_') or columns.startswith('id') or columns.startswith('partition'):\n print('not to be encrypted field: ' + columns)\n else:\n print('to be encrypted field: ' + columns)\n df = df.withColumn(columns, dec_udf(df[columns]))\n print(\"succesfully decrypted the fields in spark df data frame\")\n\n # Register the DataFrame as a SQL temporary view\n df = df.repartition(1).cache()\n # df.persist(StorageLevel.DISK_ONLY_2)\n df.createOrReplaceTempView(\"customer\")\n spark.sql(\"CACHE TABLE customer\").collect()\n\n print(\"succesfully read \" + str(df.count()) +\n \" records from CosmosDB and saved in spark df data frame\")\n self.cosmosdb.updateCollectionThroughput(\n self.config.get_database_name(), self.config.get_hash_table(), self.config.get_scaledown_cosmos(),\n self.config.get_key(),\n self.config.get_cosmos_account())\n\n return df", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def loadData(catalog):\n controller.loadData(catalog)", "def get_catalog(self) -> Catalog:\n params: Dict[str, Any] = self._status.get_status_info()\n\n response = self._client.open_api_do(\n \"GET\", \"labels/catalogs\", self.dataset_id, params=params\n ).json()\n return Catalog.loads(response[\"catalog\"])", "def print_catalog(self):\n # first download the json for the catalog\n self.download_json()\n\n # open the saved json file and load the json\n with self.file.open(\"r\") as catalog_file:\n pages = json.load(catalog_file)\n\n # the catalog json is just a list of pages\n # so we begin by iterating through the pages\n for page_num in range(len(pages)):\n # get each page\n page = pages[page_num]\n\n # get the threads on each page\n threads = page[\"threads\"]\n\n # print the page heading\n print(\"*** PAGE \", page_num + 1, \"***\")\n\n # iterate through the threads on each page\n for thread_num in range(len(threads)):\n # get each thread\n thread = threads[thread_num]\n\n # print the thread number\n num = thread[\"no\"]\n print(\"---\", \"Thread:\", num, \"---\")\n\n # not all threads have a subject or comment\n try:\n subject = thread[\"sub\"]\n comment = thread[\"com\"]\n\n print(\"Sub:\", subject)\n print(\"Comment:\", comment)\n except KeyError:\n print(\"N/A\")", "def read_combined_star_catalog(params,log):\n\n if path.isfile(params['catalog_file']) == False:\n\n return np.zeros(1)\n\n hdulist = fits.open(params['catalog_file'])\n\n data = hdulist[1].data\n\n header = hdulist[0].header\n\n star_catalog = Table(data)\n\n data = hdulist[2].data\n\n image_trios = Table(data)\n\n log.info('Read data from combined colour star catalog')\n\n return star_catalog, image_trios, header", "def list_detail_catalog(self, catalog_name):\n # list catalog\n self._list_catalog(catalog_name)\n # detail catalog\n self._details_catalog(catalog_name)", "def load_data(catalog):\n controller.load_data(catalog)", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()", "def initCatalog():\n return controller.initCatalog()" ]
[ "0.69564354", "0.63602346", "0.6289939", "0.6274737", "0.6120884", "0.60243875", "0.5962401", "0.59330785", "0.5929398", "0.59037805", "0.5888229", "0.58801526", "0.58801526", "0.587579", "0.586561", "0.582441", "0.582154", "0.57935977", "0.57935977", "0.57935977", "0.57935977", "0.57935977", "0.5749884", "0.5731499", "0.5704847", "0.5690862", "0.5621293", "0.5611027", "0.5611027", "0.5611027" ]
0.67307436
1
add fields from the cat some will not be in the odata but some will. When copy is True We will copy over the ones that are in both, in some cases
def _add_cat_fields(self, odata, copy=True): # these are required fileds from get_meds_output_dtype # that we have put into the input catalog always_copy=[ 'id', 'ra', 'dec', ] cat = self.cat_orig add_dt = [] for d in cat.dtype.descr: n = d[0] if n not in odata.dtype.names: add_dt.append(d) obj_data = eu.numpy_util.add_fields( odata, add_dt, ) if copy: for n in always_copy: obj_data[n] = cat[n] for d in add_dt: n = d[0] if n in always_copy: continue # don't clobber things that should be left at # their default values if n not in odata.dtype.names: obj_data[n] = cat[n] return obj_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyAttributes(self, other, add_nxpars=False):\n import copy\n \n self.setTitle(other.getTitle())\n self.setDataSetType(other.getDataSetType())\n self.setAllAxisLabels(other.getAllAxisLabels())\n self.setAllAxisUnits(other.getAllAxisUnits())\n self.setYLabel(other.getYLabel())\n self.setYUnits(other.getYUnits())\n if len(self.attr_list.keys()) == 0:\n self.attr_list = copy.copy(other.attr_list)\n else:\n self.attr_list.instrument = copy.copy(other.attr_list.instrument)\n self.attr_list.sample = copy.copy(other.attr_list.sample)\n\n if add_nxpars:\n nxpar_keys = [item[0] for item in self.attr_list.iteritems() \\\n if isinstance(item[1], NxParameter)]\n\n for nxpar_key in nxpar_keys:\n self.attr_list[nxpar_key] += other.attr_list[nxpar_key]\n else:\n # Do nothing\n pass\n \n keys_to_get = [other_key for other_key in other.attr_list \\\n if other_key not in self.attr_list]\n \n for key_to_get in keys_to_get:\n self.attr_list[key_to_get] = \\\n copy.copy(other.attr_list[key_to_get])", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def union(self, other: Catalog) -> Catalog:\n cat = self.copy()\n oth_cp = other.copy()\n\n for k in oth_cp.keys():\n for ver_id, version in oth_cp[k].versions.items():\n cat[k][ver_id] = version\n return cat", "def __add__(self, other):\n return self.__class__(\n {\n name:\n self.__getattribute__(name) + other.__getattribute__(name)\n for name in self._fields\n }\n )", "def merge(self, new_store):\n if new_store.name and len(new_store.name) > 0:\n self.name = new_store.name\n if new_store.address and len(new_store.address) > 0:\n self.address = new_store.address\n if new_store.city and len(new_store.city) > 0:\n self.city = new_store.city\n if new_store.state and len(new_store.state) > 0:\n self.state = new_store.state\n if new_store.zip and new_store.zip > 0:\n self.zipcode = new_store.zip\n if new_store.phone and new_store.phone > 0:\n self.phone = new_store.phone", "def _copy(self, *, new_instance:bool=False, new_alias:bool=False,\n _context:dict=None, _deep_copy:bool=True, **kwargs):\n self_id = id(self)\n if _context is None:\n _context = {}\n elif self_id in _context:\n return _context[self_id] # I've already been copied\n\n\n existing_items = {k:getattr(self, k) for k in self._nb_attrs}\n #It's a copy so shouldn't have the same uuid\n existing_items.pop('uuid', None)\n existing_items.update(kwargs)\n\n if new_instance:\n existing_items['_ref'] = self\n elif not ('_ref' in kwargs and kwargs['_ref']) and self._ref:\n existing_items['_ref'] = self._ref._copy(_context=_context)\n\n if new_alias:\n existing_items['_alias'] = True\n\n new_obj = type(self)(**existing_items)\n\n _context[self_id] = new_obj\n\n if _deep_copy:\n for obj in self:\n new_obj._add(obj._copy(new_alias=new_alias, _context=_context))\n\n return new_obj", "def test_copy_2(dset_full):\n dset_new = copy.deepcopy(dset_full)\n\n # Test internal references in the dataset\n assert id(dset_new.site_pos.other) == id(dset_new.sat_pos)\n assert id(dset_new.site_delta.ref_pos) == id(dset_new.site_pos)\n assert id(dset_new.site_posvel.other) == id(dset_new.sat_posvel)\n assert id(dset_new.site_posvel_delta.ref_pos) == id(dset_new.site_posvel)\n\n assert id(dset_new.group.site_pos.other) == id(dset_new.group.sat_pos)\n assert id(dset_new.group.site_delta.ref_pos) == id(dset_new.group.site_pos)\n assert id(dset_new.group.site_posvel.other) == id(dset_new.group.sat_posvel)\n assert id(dset_new.group.site_posvel_delta.ref_pos) == id(dset_new.group.site_posvel)\n\n # Verify that new dataset have different references than original object\n for field_name, field in dset_full._fields.items():\n assert id(field.data) != id(dset_new._fields[field_name].data)\n try:\n for group_field_name, group_field in field.data._fields.items():\n assert id(group_field.data) != id(dset_new._fields[field_name].data._fields[group_field_name].data)\n except AttributeError:\n # Field is not a group\n pass", "def copy_attrs(data_orig, data_new):\n\n if isinstance(data_orig, Dataset):\n\n # Variables\n for v in data_orig.data_vars:\n field = data_orig[v]\n for attr, val in field.attrs.items():\n data_new[v].attrs[attr] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n # Metadata\n for attr, val in data_orig.attrs.items():\n data_new.attrs[attr] = val\n\n elif isinstance(data_orig, DataArray):\n\n # Variable Metadata\n for att, val in data_orig.attrs.items():\n data_new.attrs[att] = val\n\n # Coordinates\n for c in data_orig.coords:\n coord = data_orig.coords[c]\n for attr, val in coord.attrs.items():\n if c in data_new.coords:\n data_new.coords[c].attrs[attr] = val\n\n else:\n raise ValueError(\"Couldn't handle type %r\" % type(data_orig))\n\n return data_new", "def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)", "def _merge_attributes(self, workout):\n keys = self.__table__.columns.keys()\n for key in keys:\n if key in [\"id\",\n \"external_id\",\n \"is_duplicate_with\",\n \"manual_check_required_with\",\n ]:\n continue\n elif getattr(self, key) == None:\n # copy attribute if empty; else keep existing \n setattr(self, key, getattr(workout, key))", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def copyBooks(self):\n skipMods = set(('Morrowind.esm',self.fileInfo.name))\n for id,(record,modName) in (self.srcBooks.items() + self.altBooks.items()):\n if modName not in skipMods:\n self.setRecord(copy.copy(record))", "def append_ipma_metadata(orig: dict, dest: dict):\n for key in [key for key in orig.keys() if key != 'data']:\n dest[key] = orig[key]", "def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields", "def combine_data(self, object, additional_data):\n for k, v in additional_data.items():\n if isinstance(v, list):\n object[k] = object.get(k, []) + v\n else:\n object[k] = v\n for instance in object.get(\"instances\", []):\n if instance.get(\"sub_container\", {}).get(\"top_container\", {}).get(\"_resolved\"):\n del instance[\"sub_container\"][\"top_container\"][\"_resolved\"]\n object = super(ArchivalObjectMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def _copy_metadata_deep(value, old_value):\n if value is None or old_value is None or value is old_value: return\n\n if isinstance(value, dict):\n for k, v in value.iteritems():\n _copy_metadata_deep(v, old_value[k])\n elif isinstance(value, list):\n for v, old_v in zip(value, old_value):\n _copy_metadata_deep(v, old_v)\n else:\n try:\n value.__dict__.update(old_value.__dict__)\n except AttributeError:\n pass", "def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))", "def copy_attributes(var1, var2):\n for each in var1.ncattrs():\n if each != \"_FillValue\":\n setattr(var2, each, getattr(var1, each))", "def _copy_from_doc(doc):\n d = {\"has_props\": [], \"origins\": []}\n # Complex function to grab the keys and put them in the root doc\n # if the item is a list, it makes one doc per item with those corresponding keys\n for doc_key in summary_fields:\n sub_doc = doc.get(doc_key, None)\n if isinstance(sub_doc, list) and len(sub_doc) > 0:\n d[\"has_props\"].append(doc_key)\n d[doc_key] = []\n for sub_item in sub_doc:\n temp_doc = {\n copy_key: sub_item[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_item\n }\n d[doc_key].append(temp_doc)\n elif isinstance(sub_doc, dict):\n d[\"has_props\"].append(doc_key)\n if sub_doc.get(\"origins\", None):\n d[\"origins\"].extend(sub_doc[\"origins\"])\n d.update(\n {\n copy_key: sub_doc[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_doc\n }\n )\n return d", "def merge_contextual(self, other):\n # TODO: This is currently dependent on our data model? Make more robust to schema changes\n # Currently we assume all lists at Compound level, with 1 further potential nested level of lists\n for k in self.keys():\n # print('key: %s' % k)\n for item in self[k]:\n # print('item: %s' % item)\n for other_item in other.get(k, []):\n # Skip text properties (don't merge names, labels, roles)\n if isinstance(other_item, six.text_type):\n continue\n for otherk in other_item.keys():\n if isinstance(other_item[otherk], list):\n if len(other_item[otherk]) > 0 and len(item[otherk]) > 0:\n other_nested_item = other_item[otherk][0]\n for othernestedk in other_nested_item.keys():\n for nested_item in item[otherk]:\n if not nested_item[othernestedk]:\n nested_item[othernestedk] = other_nested_item[othernestedk]\n elif not item[otherk]:\n item[otherk] = other_item[otherk]\n log.debug('Result: %s' % self.serialize())\n return self", "def mergeWith(self, others):", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def append(dest, field, value):\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]", "def merge(self, obj):\n pass", "def test__ActivityParty__copy_with__1():\n old_party_id = 'plain'\n old_size = 6\n old_max = 12\n new_party_id = 'asia'\n new_size = 1\n new_max = 8\n \n field = ActivityParty(\n party_id = old_party_id,\n size = old_size,\n max_ = old_max,\n )\n copy = field.copy_with(\n party_id = new_party_id,\n size = new_size,\n max_ = new_max,\n )\n _assert_fields_set(copy)\n vampytest.assert_is_not(field, copy)\n \n vampytest.assert_eq(copy.id, new_party_id)\n vampytest.assert_eq(copy.size, new_size)\n vampytest.assert_eq(copy.max, new_max)", "def copy(self):", "def _copy_from_doc(doc):\n\n d = {\"has_props\": []}\n\n # Function to grab the keys and put them in the root doc\n for doc_key in summary_fields:\n sub_doc = doc.get(doc_key, None)\n if isinstance(sub_doc, list) and len(sub_doc) > 0:\n d[\"has_props\"].append(doc_key)\n for copy_key in summary_fields[doc_key]:\n d[copy_key] = dict()\n for sub_item in sub_doc:\n # In cases where multiple docs have the same properties,\n # they must differ by method\n if copy_key in sub_item and \"method\" in sub_item:\n d[copy_key][sub_item[\"method\"]] = sub_item[copy_key]\n\n elif isinstance(sub_doc, dict):\n d[\"has_props\"].append(doc_key)\n d.update(\n {\n copy_key: sub_doc[copy_key]\n for copy_key in summary_fields[doc_key]\n if copy_key in sub_doc\n }\n )\n\n return d", "def get_added_dicts(a, b):\n tmp = copy.deepcopy(a)\n for key, val in b.iteritems():\n if key not in tmp:\n tmp[key] = val\n return tmp", "def copy_attributes(ncin, ncout,exclude=None, include=None):\n att_dict = odict()\n for attribute_name in ncin.ncattrs():\n if include is not None and attribute_name not in include:\n continue #if include is defined, and this attribute is not there\n if exclude is not None and attribute_name in exclude:\n continue #if exclude is defined, and this attribute is there\n att_dict[attribute_name] = ncin.getncattr(attribute_name)\n ncout.setncatts(att_dict)", "def fusionne(self, new):\n if new == self:\n raise ValueError(\"une catégorie ne peut être fusionnée avec elle même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n if self.type != new.type:\n raise TypeError(\"pas le même type de catégorie, %s est %s alors que %s est %s\" % (\n self.nom, self.type, new.nom, new.type))\n nb_change = Echeance.objects.filter(cat=self).update(cat=new)\n nb_change += Ope.objects.filter(cat=self).update(cat=new)\n self.delete()\n return nb_change" ]
[ "0.60299045", "0.5626615", "0.55989486", "0.55208635", "0.54832995", "0.54745245", "0.5468035", "0.54594445", "0.5416989", "0.54133993", "0.53944564", "0.5360663", "0.5277778", "0.5271018", "0.52541333", "0.5244835", "0.5185914", "0.518226", "0.5180149", "0.51750094", "0.5171218", "0.5156771", "0.5155819", "0.51528907", "0.5145961", "0.5141367", "0.51112574", "0.51023513", "0.50964636", "0.5067921" ]
0.7400884
0
make a new struct with ncutoutsizedarrays based on the actual maximum ncutout
def _make_resized_data(self, odata): nmax = odata['file_id'].shape[1] new_nmax = odata['ncutout'].max() if new_nmax < 2: new_nmax = 2 temp_obj_data = odata nobj = temp_obj_data.size new_data = meds.util.get_meds_output_struct( nobj, new_nmax, extra_fields=self._get_fields(new_nmax), ) new_data = self._add_cat_fields(new_data, copy=False) for name in new_data.dtype.names: if name in temp_obj_data.dtype.names: shape = new_data[name].shape lshape = len(shape) if lshape > 1 and shape[1] == new_nmax: new_data[name][:,:] = temp_obj_data[name][:,0:new_nmax] else: new_data[name][:] = temp_obj_data[name][:] del temp_obj_data return new_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_maxcut_data_model():\n n = 5\n V = np.arange(0, n, 1)\n E = [(0, 1, 3.0), (1, 2, 2.0), (2, 3, 2.0), (3, 4, 3.0), (4, 0, 1.0), (0, 3, 3.0)]\n\n G = nx.Graph()\n G.add_nodes_from(V)\n G.add_weighted_edges_from(E)\n return G", "def expanding_max_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = expanding_max_1d_nb(a[:, col], minp=minp)\n return out", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.phasedarray_sptr_max_noutput_items(self)", "def get_bins(size, n, max_value):\n bin_lims = get_bin_lims(n, max_value)\n return sort_by_rows(np.array(list(itertools.product(bin_lims, repeat=size))))", "def arrayManipulation_shortpeak(n, queries):\n a_s = []\n b_s = []\n k_s = []\n\n for i, row in enumerate(queries):\n a_s.append(row[0])\n b_s.append(row[1])\n k_s.append(row[2])\n\n # breakpoint()\n x = a_s + b_s\n all_indices = list(set(x))\n all_indices.sort()\n short_arr = [0] * len(all_indices)\n\n # mapping index of n-long array to index of shorter array\n index_lookup = {}\n for j, el in enumerate(all_indices):\n index_lookup[el] = j\n\n # breakpoint()\n for m in range(len(a_s)):\n short_arr[index_lookup[a_s[m]]] += k_s[m]\n short_arr[index_lookup[b_s[m]]] -= k_s[m]\n\n maxval = 0\n cumsum = 0\n for i, el in enumerate(short_arr):\n cumsum += el\n maxval = max(maxval, cumsum)\n\n print(f'{maxval: <15,d}: Max value')\n arr_size = short_arr.__sizeof__() / 1000000\n total = ((a_s.__sizeof__() / 1000000)\n + b_s.__sizeof__() / 1000000\n + k_s.__sizeof__() / 1000000\n + queries.__sizeof__() / 1000000\n + index_lookup.__sizeof__() / 1000000\n + short_arr.__sizeof__() / 1000000)\n print(f'{total: <15.2f}: All objects size(MB)')\n print(f'{arr_size: <15.2f}: Array size(MB)')\n return maxval, arr_size", "def process_lim(pool_lim, area):\n\n pool_nolim = [] # No limitation\n pool_lim_n = [] # N limitation\n pool_lim_p = [] # P limitation\n # Colimitation driven by N (When the realized NPP allocation is smaller\n # thant the potential due to N but the other element is also limitant)\n pool_colim_n = []\n # Colimitation driven by P (When the realized NPP allocation is smaller\n # than the potential due to P but the other element is also limitant\n pool_colim_p = []\n # Real Colimitation = K <= 1D-6 (K is difference between P and N realized NPP allocation)\n pool_colim_np = []\n\n ndays = pool_lim.shape[1]\n npls = pool_lim.shape[0]\n\n for pls in range(npls):\n if area[pls]:\n no_lim = (pool_lim[pls, :] == 0).sum() / ndays * area[pls]\n lim_n = (np.count_nonzero(\n pool_lim[pls, :] == 1) / ndays) * area[pls]\n lim_p = (np.count_nonzero(\n pool_lim[pls, :] == 2) / ndays) * area[pls]\n colim_n = (np.count_nonzero(\n pool_lim[pls, :] == 4) / ndays) * area[pls]\n colim_p = (np.count_nonzero(\n pool_lim[pls, :] == 5) / ndays) * area[pls]\n colim_np = (np.count_nonzero(\n pool_lim[pls, :] == 6) / ndays) * area[pls]\n\n pool_nolim.append(no_lim)\n pool_lim_n.append(lim_n)\n pool_lim_p.append(lim_p)\n pool_colim_n.append(colim_n)\n pool_colim_p.append(colim_p)\n pool_colim_np.append(colim_np)\n\n return (np.sum(pool_nolim),\n np.sum(pool_lim_n),\n np.sum(pool_lim_p),\n np.sum(pool_colim_n),\n np.sum(pool_colim_p),\n np.sum(pool_colim_np))", "def max_pooling(img):\n result_img = img.copy()\n heignt, width, _ = result_img.shape\n for h in range(0, heignt, 8):\n for w in range(0, width, 8):\n result_img[h:h+8, w:w+8, 0] = np.max(result_img[h:h+8, w:w+8, 0])\n result_img[h:h+8, w:w+8, 1] = np.max(result_img[h:h+8, w:w+8, 1])\n result_img[h:h+8, w:w+8, 2] = np.max(result_img[h:h+8, w:w+8, 2])\n result_img[(heignt//8)*8:heignt, :, :] = 0\n result_img[:, (width//8)*8:width, :] = 0\n return result_img", "def create_array( n ):", "def max_noutput_items(self):\n return _spacegrant_swig.invert_bit_sptr_max_noutput_items(self)", "def expanding_max_1d_nb(a, minp=1):\n out = np.empty_like(a, dtype=np.float_)\n maxv = a[0]\n cnt = 0\n for i in range(a.shape[0]):\n if np.isnan(maxv) or a[i] > maxv:\n maxv = a[i]\n if ~np.isnan(a[i]):\n cnt += 1\n if cnt < minp:\n out[i] = np.nan\n else:\n out[i] = maxv\n return out", "def max_noutput_items(self) -> \"int\":\n return _beamforming_swig.randomsampler_sptr_max_noutput_items(self)", "def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]", "def set_max_noutput_items(self, m: \"int\") -> \"void\":\n return _beamforming_swig.phasedarray_sptr_set_max_noutput_items(self, m)", "def __init__(self, maxlen, dtype):\n self._start_index = np.int64(0)\n self._len = np.int64(0)\n self._maxlen = np.array(maxlen)\n initial_len = 10 if np.isinf(self._maxlen) else self._maxlen\n self._buffer = np.zeros(shape=(initial_len,), dtype=dtype)", "def _get_optimal_threshold(arr, num_bins=1001, num_quantized_bins=255):\n if not isinstance(arr, np.ndarray):\n raise TypeError('get_optimal_threshold only supports input type of np.ndarray,'\n ' while received type=%s' % (str(type(arr))))\n min_val = np.min(arr)\n max_val = np.max(arr)\n th = max(abs(min_val), abs(max_val))\n\n hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-th, th))\n zero_bin_idx = num_bins // 2\n num_half_quantized_bins = num_quantized_bins // 2\n assert np.allclose(hist_edges[zero_bin_idx] + hist_edges[zero_bin_idx + 1],\n 0, rtol=1e-5, atol=1e-7)\n\n thresholds = np.zeros(num_bins // 2 + 1 - num_quantized_bins // 2)\n divergence = np.zeros_like(thresholds)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int32)\n # i means the number of bins on half axis excluding the zero bin.\n for i in range(num_quantized_bins // 2,\n num_bins // 2 + 1):\n p_bin_idx_start = zero_bin_idx - i\n p_bin_idx_stop = zero_bin_idx + i + 1\n thresholds[i - num_half_quantized_bins] = hist_edges[p_bin_idx_stop]\n sliced_nd_hist = hist[p_bin_idx_start:p_bin_idx_stop]\n\n # generate reference distribution p\n p = sliced_nd_hist.copy()\n assert p.size % 2 == 1\n assert p.size >= num_quantized_bins\n # put left outlier count in p[0]\n left_outlier_count = np.sum(hist[0:p_bin_idx_start])\n p[0] += left_outlier_count\n # put right outlier count in p[-1]\n right_outlier_count = np.sum(hist[p_bin_idx_stop:])\n p[-1] += right_outlier_count\n # is_nonzeros[k] indicates whether hist[k] is nonzero\n is_nonzeros = (sliced_nd_hist != 0).astype(np.int32)\n\n # calculate how many bins should be merged to generate quantized distribution q\n num_merged_bins = p.size // num_quantized_bins\n # merge hist into num_quantized_bins bins\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n stop = start + num_merged_bins\n quantized_bins[j] = sliced_nd_hist[start:stop].sum()\n quantized_bins[-1] += sliced_nd_hist[num_quantized_bins * num_merged_bins:].sum()\n # expand quantized_bins into p.size bins\n q = np.zeros(p.size, dtype=np.float32)\n for j in range(num_quantized_bins):\n start = j * num_merged_bins\n if j == num_quantized_bins - 1:\n stop = -1\n else:\n stop = start + num_merged_bins\n norm = is_nonzeros[start:stop].sum()\n if norm != 0:\n q[start:stop] = float(quantized_bins[j]) / float(norm)\n q[sliced_nd_hist == 0] = 0\n p = _smooth_distribution(p)\n # There is a chance that q is an invalid probability distribution.\n try:\n q = _smooth_distribution(q)\n except ValueError:\n divergence[i - num_half_quantized_bins] = float(\"inf\")\n else:\n divergence[i - num_half_quantized_bins] = stats.entropy(p, q)\n quantized_bins[:] = 0\n\n min_divergence_idx = np.argmin(divergence)\n min_divergence = divergence[min_divergence_idx]\n opt_th = thresholds[min_divergence_idx]\n return min_val, max_val, min_divergence, opt_th", "def max_noutput_items(self):\n return _spacegrant_swig.general_burster_2_sptr_max_noutput_items(self)", "def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=True)\n\n # test\n assert len(output) == 3\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False)\n\n # test\n assert len(output) == 2", "def max_cut(g):\n # Write your code here.\n return []", "def max_pool2d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._pair, **locals())", "def max_pool3d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._triple, **locals())", "def create_new_targets(window, data):\n new_data = np.apply_along_axis(lambda x: np.bincount(x).argmax(), axis=1, arr=(rolling_window(data.astype(int), 0, window)))\n\n return new_data", "def maxpool(input, filter_h, filter_w, stride_h, stride_w, padding, name):\n with tf.name_scope(name):\n mp = tf.nn.max_pool(input, ksize=[1, filter_h, filter_w, 1], strides=[1, stride_h, stride_w, 1],\n padding=padding)\n # print(name + \" : \", str(mp.shape))\n return mp", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def max_noutput_items(self):\n return _add_vector_swig.add_vector_2_cpp_sptr_max_noutput_items(self)", "def max_pool1d(input, kernel_size, stride=1, padding=0, ceil_mode=False):\n return _pool('MAX', utils._single, **locals())", "def _resize(self, maxval):\n assert maxval >= self._N\n temp = [None for i in range(maxval)] # (Item[]) new [maxval] # Item[]\n q_len = len(self._q)\n for i in range(self._N):\n temp[i] = self._q[(self._first + i) % q_len]\n self._q = temp\n self._first = 0\n self._last = self._N", "def create_split_bounds(N, train_pct):\n train_len = int(round(train_pct * N))\n if ((N - train_len) % 2) != 0:\n train_len += 1\n\n # NOTE: We're assume the dev and test set are equal in length.\n test_len = dev_len = int((N - train_len) / 2)\n\n assert \"Not all data points are being used. Check create_split_bounds()\", \\\n (train_len + test_len + dev_len) == N\n\n return train_len, dev_len, test_len", "def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold, confidence_threshold):\n batch = tf.unstack(inputs)\n boxes_dicts = []\n\n for boxes in batch:\n boxes = tf.boolean_mask(boxes, boxes[:, 4] > confidence_threshold)\n classes = tf.argmax(boxes[:, 5:], axis=-1)\n classes = tf.expand_dims(tf.cast(classes, tf.float32), axis=-1)\n boxes = tf.concat([boxes[:, :5], classes], axis=-1)\n\n boxes_dict = dict()\n for cls in range(n_classes):\n mask = tf.equal(boxes[:, 5], cls)\n mask_shape = mask.get_shape()\n if mask_shape.ndims != 0:\n class_boxes = tf.boolean_mask(boxes, mask)\n boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes, [4, 1, -1], axis=-1)\n boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])\n indices = tf.image.non_max_suppression(boxes_coords,\n boxes_conf_scores,\n max_output_size,\n iou_threshold)\n class_boxes = tf.gather(class_boxes, indices)\n boxes_dict[cls] = class_boxes[:, :5]\n\n boxes_dicts.append(boxes_dict)\n return boxes_dicts" ]
[ "0.5717705", "0.5661527", "0.5603735", "0.5556727", "0.5533146", "0.54946077", "0.54890066", "0.5484546", "0.54764545", "0.5465916", "0.54387826", "0.54167676", "0.5408915", "0.5392852", "0.5391776", "0.53902745", "0.5386068", "0.53750616", "0.5368087", "0.5362609", "0.5357239", "0.53557456", "0.53500104", "0.5347422", "0.5346802", "0.53459895", "0.5344874", "0.53320915", "0.53192997", "0.531634" ]
0.58375233
0
get box sizes that are wither 2N or 32N, within the limits set by the user
def _get_box_sizes(self, image_info, cat): file_id=0 impath=image_info['image_path'][file_id].strip() ext=image_info['image_ext'][file_id] wcs_data = fitsio.read_header(impath, ext=ext) wcs = eu.wcsutil.WCS(wcs_data) jacob = wcs.get_jacobian(100,100) dudcol, dudrow, dvdcol, dvdrow = jacob det = dvdrow*dudcol - dvdcol*dudrow pixel_scale = np.sqrt(abs(det)) print('found pixel scale:',pixel_scale) box_size = cat['box_size_arcsec']/pixel_scale # clip to range box_size.clip( min=self['min_box_size'], max=self['max_box_size'], out=box_size, ) box_size = box_size.astype('i4') w,=np.where( ( (box_size % 2) != 0 ) ) if w.size > 0: box_size[w] += 1 return box_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_compute_box_size(self):\n def compute_best_size_for(dim):\n size = ((self.element_space[dim]-1)//self.box_space[dim]) + 1\n size += 2 * self.ghost_space[dim]\n while size % Level.BOX_ALIGNMENTS[dim]:\n size += 1\n return size\n\n return Space([compute_best_size_for(dim) for dim in range(self.dimensions)])", "def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, list(choices.keys())))\n raise ValueError(err)", "def _get_block_sizes(resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n\n try:\n return choices[resnet_size]\n except KeyError:\n err = (\n 'Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)", "def guess_box_size(xyz):\n return np.round(np.max(xyz[:, 1] - np.min(xyz[:, 1]), 0))", "def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height", "def box_size(self) -> np.ndarray:\n return self.upper - self.lower + 1", "def box_size(self) -> np.ndarray:\n return self.upper - self.lower + 1", "def _get_block_sizes(self, resnet_size):\n choices = {\n 18: [2, 2, 2, 2],\n 34: [3, 4, 6, 3],\n 50: [3, 4, 6, 3],\n 101: [3, 4, 23, 3],\n 152: [3, 8, 36, 3],\n 200: [3, 24, 36, 3]\n }\n try:\n return choices[resnet_size]\n except KeyError:\n err = ('Could not find layers for selected Resnet size.\\n'\n 'Size received: {}; sizes allowed: {}.'.format(\n resnet_size, choices.keys()))\n raise ValueError(err)", "def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes", "def GetNiceExtentsBySpacing(minval,maxval,spacing,tolerance):\n pass", "def setBoxsize(length,width,height):\n return length,width,height", "def getLayoutDimensions(n, pref=\"height\"):\n nopt = np.sqrt(n)\n inoptw = int(nopt)\n inopth = int(nopt)\n while inoptw * inopth < n:\n if pref == \"width\":\n inoptw += 1\n if inoptw * inopth > (n - inopth):\n inoptw -= 1\n inopth += 1\n else:\n inopth += 1\n if inoptw * inopth > (n - inoptw):\n inopth -= 1\n inoptw += 1\n\n return (inopth, inoptw)", "def getBoxsize(stepCount,stepHeight,stepWidth,platformWidth,stairsLength,distance):\n #///重新给box的三个属性赋值\n box_width = (stepCount-1)*stepWidth + platformWidth\n box_length = (stairsLength*2+distance) \n #distance = box_length-stairsLength*2\n box_height = stepCount*2*stepHeight\n #print (\"box_length:%s,box_width:%s,box_height:%s\"%(box_length,box_width,box_height))\n return box_length,box_width,box_height", "def block_sizes(max_size):\n if max_size > 8:\n raise ValueError(\"Invalid max_size value specified!\")\n else:\n return [f\"{2**x}x{2**y}\" for x in range(2, max_size) for y in range(2, max_size) if x != 2 or y != 2]", "def box_sz(b):\n #taken from fastai\n return ((b[:, 2]-b[:, 0]) * (b[:, 3]-b[:, 1]))", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def get_bounding_box_size(images):\n height = max(image.shape[0] for image in images)\n width = max(image.shape[1] for image in images)\n return height, width", "def dimensions():", "def get_term_dimensions():\n height, width = subprocess.check_output(SIZE).split()\n return int(width), int(height)", "def maxSize():\n rect = pf.app.desktop().availableGeometry()\n maxh,maxw = rect.width(),rect.height()\n return maxh,maxw", "def getDimensions():", "def _get_size_var(self):\n size_var = []\n for index in range(self._nr_args):\n restriction = self._domain_restricion[index]\n size_var.append(utils.get_nr_bits(restriction, self._precision))\n return size_var", "def getScaledDimensions(size, max_size, returnFactor=False):\n\n width, height = size\n max_width, max_height = max_size\n if (max_width, max_height) == (0, 0) or (width, height) == (0, 0): return (0, 0)\n wfactor, hfactor = 1.0, 1.0\n\n if width > max_width: wfactor = float(max_width) / width\n if height > max_height: hfactor = float(max_height) / height\n\n factor = min(wfactor, hfactor)\n\n size = (width * factor, height * factor)\n\n if not returnFactor:\n return size\n else:\n return size, factor", "def boxToExtent(box):\n b = normalizeBox(box)\n return (b[0], b[1], b[0]+b[2]-1, b[1]+b[3]-1)", "def get_size_of_grid(self):\n row = 0\n column = 0\n if int(self.var1.get()) == 1:\n row, column = 6, 6\n\n if int(self.var2.get()) == 1:\n row, column = 7, 6\n\n if int(self.var3.get()) == 1:\n row, column = 7, 7\n\n if int(self.var4.get()) == 1:\n row, column = 8, 8\n\n return row, column", "def scale_box(box, img_size):\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]", "def dimensions_of_box(box: ndarray) -> Tuple[float, float]:\n\n (top_left, _, bottom_right, _) = box\n\n (x1, y1) = top_left\n (x2, y2) = bottom_right\n\n return (x2 - x1, y2 - y1)", "def _filter_boxes2(boxes, max_size, min_size):\n ws = boxes[:, 2] - boxes[:, 0] + 1\n hs = boxes[:, 3] - boxes[:, 1] + 1\n if max_size > 0:\n keep = np.where(np.minimum(ws, hs) < max_size)[0]\n elif min_size > 0:\n keep = np.where(np.maximum(ws, hs) > min_size)[0]\n return keep", "def bbox_size(label_sitk):\n\n # Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1)\n return list(bbox_dims[3:6])", "def get_grid_dimensions(current_problem_size, params, grid_div, block_size_names):\n def get_dimension_divisor(divisor_list, default, params):\n if divisor_list is None:\n if default in params:\n divisor_list = [default]\n else:\n return 1\n return numpy.prod([int(eval(replace_param_occurrences(s, params))) for s in divisor_list])\n divisors = [get_dimension_divisor(d, block_size_names[i], params) for i, d in enumerate(grid_div)]\n return tuple(int(numpy.ceil(float(current_problem_size[i]) / float(d))) for i, d in enumerate(divisors))" ]
[ "0.6898253", "0.67774564", "0.6747777", "0.6713784", "0.6704732", "0.66019756", "0.66019756", "0.65982324", "0.6596363", "0.6499011", "0.64778125", "0.6461232", "0.643119", "0.6366755", "0.6308479", "0.6291537", "0.6291537", "0.6284666", "0.6251369", "0.6221639", "0.6197825", "0.6192271", "0.61398184", "0.6121089", "0.6085505", "0.6049268", "0.6043943", "0.6039048", "0.60380536", "0.6026183" ]
0.7289415
0
get the image info structure Set default scale to 1.0. The other fields are 0 for numbers, or blank for strings
def get_image_info_struct(nimage, path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): dt = get_image_info_dtype( path_len, image_id_len=image_id_len, wcs_len=wcs_len, ext_len=ext_len, extra_dtype=extra_dtype, ) data = np.zeros(nimage, dtype=dt) data['scale'] = 1.0 return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def image_info(self):\n\n if not self._image_info:\n path_image_info = os.path.join(\n self._path, f\"ImageSet_{self._image['ImageSetID']}.ImageInfo\"\n )\n\n # Make sure the ImageInfo file really exists\n if not os.path.exists(path_image_info):\n self.logger.warning(\"ImageInfo path doesn't exist: %s\", path_image_info)\n return None\n\n self.logger.debug(\"Reading image data from: %s\", path_image_info)\n self._image_info = pinn_to_dict(path_image_info)\n\n return self._image_info", "def get_img_info(self, idx):\n\n image = self.get_img(idx)\n img_height = image.size[0]\n img_width = image.size[1]\n\n return {\"height\": img_height, \"width\": img_width}", "def get_image_format_for_scale(scale=1.0):\n img = read_single(1, -70, 0, scale)\n return img.shape", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def get_image_size(self):", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, data, pixscale = 7.77/43):\n self.data = data\n self.pixscale = pixscale", "def getimage(self):", "def _get_image_info(\n image_id: int,\n width: int,\n height: int,\n file_name: str,\n license_id=1,\n flickr_url=\"\",\n coco_url=\"\",\n date_captured=datetime.datetime.utcnow().isoformat(' ')):\n image_info = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": license_id,\n \"flickr_url\": flickr_url,\n \"coco_url\": coco_url,\n \"date_captured\": date_captured,\n }\n\n return image_info", "def get_image(self):\n if self._image is None:\n image_data = np.load(self.image_file)\n if not isinstance(image_data, np.ndarray):\n image_data = image_data['arr_0']\n self.meta_data = ImageWrapper.load_metadata(self.image_file+\".meta\")\n exposure_time = self.meta_data['exposure_time_us'] * 1e-6\n dark_level = float(self.meta_data['black_level'])\n # saturation_mask = image_data.max(axis=2) >= 4094\n image_data = np.clip((image_data.astype(np.float32) - dark_level),\n a_min=0.0, a_max=None) / exposure_time\n if self.original_vignetting is not None:\n image_data = image_data / self.original_vignetting\n if self.crop is not None:\n image_data = image_data[\n self.crop[1,0]:self.crop[1,1],\n self.crop[0,0]:self.crop[0,1]\n ]\n # saturation_mask = saturation_mask[\n # self.crop[1,0]:self.crop[1,1],\n # self.crop[0,0]:self.crop[0,1]\n # ]\n if self.down_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=1./self.down_sample,\n fy=1./self.down_sample,\n interpolation=cv2.INTER_AREA\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=1./self.down_sample,\n # fy=1./self.down_sample,\n # interpolation=cv2.INTER_AREA\n # )\n if self.reup_sample is not None:\n image_data = cv2.resize(\n image_data,\n dsize=None,\n fx=self.reup_sample,\n fy=self.reup_sample,\n interpolation=cv2.INTER_CUBIC\n )\n # saturation_mask = cv2.resize(\n # saturation_mask,\n # dsize=None,\n # fx=self.reup_sample,\n # fy=self.reup_sample,\n # interpolation=cv2.INTER_CUBIC\n # )\n image = torch.tensor(np.transpose(image_data, (2,0,1)), dtype=torch.float32, device=self.device)\n # saturation_mask = torch.tensor(saturation_mask, dtype=torch.float32, device=self.device)\n if not self.lazy:\n self._image = image\n # self._saturation_mask = saturation_mask\n else:\n image = self._image\n # saturation_mask = self._saturation_mask\n\n return image#, saturation_mask", "def GetScale(self):\n ...", "def format_img(self, img, C):\n img, ratio = self.format_img_size(img, C)\n img = self.format_img_channels(img, C)\n return img, ratio", "def __attrs_post_init__(self):\n self.key = uuid.uuid4().hex\n if self.properties is None:\n self.properties = {}\n if self.is_image:\n try:\n img_size = Image.open(self.open()).size\n self.properties.update(width=img_size[0], height=img_size[1])\n except IOError:\n self.content_type = 'application/octet-stream'", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def get_image_characteristics(self):\r\n self.image_height, self.image_width, self.image_channels = self.image.shape\r\n\r\n # Estimate the cell size to be around a ninth of the width of the screenshot area\r\n self.cell_size = int(self.image_width / 9) | 1\r\n\r\n # Cell size should be at most a ninth of the width and at least a twentieth of the width of the screenshot\r\n # Since a typical grid is 9x9, so it should be at most a ninth of the image width, and it shouldn't be too small\r\n self.min_cell_size = int(self.image_width / 20 * self.image_width / 20)\r\n self.max_cell_size = int(self.image_width / 9 * self.image_width / 9)", "def small_image(self):\n pass", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def _createImageInfo(self, \n height,\n width, \n img_id=None, \n license=0, \n flickr_url='',\n coco_url='', \n date_captured=None,\n other=None,\n ):\n\n if date_captured is None:\n date_captured = datetime.datetime.utcnow().isoformat(' ')\n\n if img_id is not None:\n filename = self.imId2name(img_id)\n else:\n filename = None\n\n img_info={\"id\" : img_id,\n \"width\" : width,\n \"height\" : height,\n \"file_name\" : filename,\n \"license\" : license,\n \"flickr_url\" : flickr_url,\n \"coco_url\" : coco_url,\n \"date_captured\" : date_captured,\n \"other\": other\n }\n\n return img_info", "def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def set_attributes(self):\n\n pil_image = PILImage.open(self.path)\n\n # Get the exif data\n # Thanks https://gist.github.com/erans/983821\n exif_data = {}\n info = pil_image._getexif()\n if info:\n for tag, value in info.items():\n decoded = PILExifTags.TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = PILExifTags.GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n gps_latitude = exif_data.get(\"GPSInfo\",{}).get(\"GPSLatitude\")\n gps_latitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLatitudeRef')\n gps_longitude = exif_data.get(\"GPSInfo\",{}).get('GPSLongitude')\n gps_longitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSLongitudeRef')\n gps_altitude = exif_data.get(\"GPSInfo\",{}).get('GPSAltitude')\n gps_altitude_ref = exif_data.get(\"GPSInfo\",{}).get('GPSAltitudeRef')\n gps_direction = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirection')\n gps_direction_ref = exif_data.get(\"GPSInfo\",{}).get('GPSImgDirectionRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = gps_tag_to_decimal_degress(gps_latitude)\n if gps_latitude_ref != \"N\": \n lat = 0 - lat\n\n lon = gps_tag_to_decimal_degress(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0 - lon\n\n # image attributes\n self.width, self.height = pil_image.size\n # exif attributes\n self.lat, self.lon = lat, lon\n self.focal = float(exif_data[\"FocalLengthIn35mmFilm\"])\n self.timestamp = datetime.datetime.strptime(exif_data[\"DateTimeOriginal\"], \"%Y:%m:%d %H:%M:%S\").timestamp()\n self.altitude = gps_altitude[0] / gps_altitude[1]\n self.direction = float(gps_direction) if gps_direction is not None else None\n self.pixel_size = (self.altitude * 35.0 / self.focal) / float(self.width)\n # transform attributes\n self.point = self.drone_map.reproject(lon,lat)\n self.angle = float(gps_direction) if gps_direction is not None else 0\n self.scale = 1.0", "def get_picture_info(instance, preset_name):\n # Bail out if the picture does not have an image as that's the object we use to get\n # all the information we need to return any picture info.\n if not instance.picture:\n return None\n\n thumbnailer = instance.picture.easy_thumbnails_thumbnailer\n\n # Look for the preset in settings and fallback to \"default\"\n preset = SIMPLEPICTURE_PRESETS.get(preset_name, SIMPLEPICTURE_PRESETS[\"default\"])\n\n # Complete picture information with thumbnails url calculated according to what is\n # defined in the preset\n picture_info = {}\n location_dict = {\"subject_location\": instance.picture.subject_location}\n\n # - src\n options = preset[\"src\"].copy()\n options.update(location_dict)\n picture_info[\"src\"] = thumbnailer.get_thumbnail(options).url\n\n # - srcset\n srcset = []\n for info in preset.get(\"srcset\", []):\n options = info[\"options\"].copy()\n options.update(location_dict)\n url = thumbnailer.get_thumbnail(options).url\n srcset.append(f\"{url:s} {info['descriptor']:s}\")\n picture_info[\"srcset\"] = \", \".join(srcset) if srcset else None\n\n # - sizes\n picture_info[\"sizes\"] = preset.get(\"sizes\")\n\n return picture_info", "def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))", "def __bobo_traverse__(self, REQUEST, name):\n if name.startswith('image'):\n field = self.getField('image')\n image = None\n if name == 'image':\n image = field.getScale(self)\n else:\n scalename = name[len('image_'):]\n if scalename in field.getAvailableSizes(self):\n image = field.getScale(self, scale=scalename)\n if image is not None and not isinstance(image, basestring):\n # image might be None or '' for empty images\n return image\n\n return base.ATCTContent.__bobo_traverse__(self, REQUEST, name)" ]
[ "0.647343", "0.6325045", "0.6177507", "0.61103743", "0.61006016", "0.60990447", "0.60865074", "0.6057824", "0.6057824", "0.6057824", "0.6057824", "0.6057824", "0.59974873", "0.5942097", "0.5923346", "0.58519304", "0.5841813", "0.5808897", "0.57983494", "0.5795056", "0.57798666", "0.57730556", "0.57640034", "0.5718838", "0.57116294", "0.5709371", "0.5697625", "0.5695958", "0.5695206", "0.5693608" ]
0.65033776
0
get the image_info dtype for the specified path string length and wcs string length
def get_image_info_dtype(path_len, image_id_len=None, wcs_len=None, ext_len=None, extra_dtype=None): path_fmt = 'U%d' % path_len if image_id_len is None: image_id_descr = 'i8' else: image_id_descr = 'U%d' % image_id_len if ext_len is not None: ext_descr = 'U%d' % ext_len else: ext_descr = 'i2' dt=[] for ctype in IMAGE_INFO_TYPES: path_name = '%s_path' % ctype ext_name = '%s_ext' % ctype dt += [ (path_name, path_fmt), (ext_name,ext_descr), ] dt += [ ('image_id', image_id_descr), ('image_flags', 'i8'), ('magzp', 'f4'), ('scale', 'f4'), ('position_offset','f8'), ] if wcs_len is not None: wcs_fmt = 'U%d' % wcs_len dt += [ ('wcs',wcs_fmt), ] if extra_dtype is not None: dt += extra_dtype return dt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def get_dimensions(image, classname):\n start, ext = os.path.splitext(image)\n if ext == '.yuv':\n bitdepth = \"8\"\n res_split = start.split('x')\n width_split = res_split[0].split('_')\n width = width_split[-1]\n height_split = res_split[-1].split('_')\n m = res_split[-1].find(\"bit\")\n if res_split[-1][m - 2] == \"_\":\n depth = res_split[-1][m - 1]\n else:\n depth = res_split[-1][m - 2:m]\n height = height_split[0]\n elif classname == \"classE_exr\":\n size = os.path.basename(image).split('_')[2]\n try:\n dimension_cmd = [\"identify\", '-size', size, '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n else:\n try:\n dimension_cmd = [\"identify\", '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n return width, height, depth", "def getImageWidthHeight(path2img):\n from struct import unpack\n with open(path2img, 'rb') as f:\n metadata = f.read(25)\n return unpack('>LL', metadata[16:24])[::-1]", "def fast_get_image_size(raw_data):\n size = len(raw_data)\n data = raw_data[:25]\n input_io = io.BytesIO(data)\n if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):\n # GIFs\n w, h = struct.unpack(\"<HH\", data[6:10])\n width = int(w)\n height = int(h)\n elif ((size >= 24) and data.startswith('\\211PNG\\r\\n\\032\\n')\n and (data[12:16] == 'IHDR')):\n # PNGs\n w, h = struct.unpack(\">LL\", data[16:24])\n width = int(w)\n height = int(h)\n elif (size >= 16) and data.startswith('\\211PNG\\r\\n\\032\\n'):\n # older PNGs?\n w, h = struct.unpack(\">LL\", data[8:16])\n width = int(w)\n height = int(h)\n elif (size >= 2) and data.startswith('\\377\\330'):\n # JPEG\n input_io.seek(0)\n input_io.read(2)\n b = input_io.read(1)\n try:\n w = ''\n h = ''\n while (b and ord(b) != 0xDA):\n while (ord(b) != 0xFF): b = input_io.read(1)\n while (ord(b) == 0xFF): b = input_io.read(1)\n if (ord(b) >= 0xC0 and ord(b) <= 0xC3):\n input_io.read(3)\n h, w = struct.unpack(\">HH\", input_io.read(4))\n break\n else:\n input_io.read(int(struct.unpack(\">H\", input_io.read(2))[0])-2)\n b = input_io.read(1)\n width = int(w)\n height = int(h)\n except Exception as e:\n #print 'get size error'\n return 0, 0\n else:\n # print \"Sorry, don't know how to get information from this file %s\" % file_path\n return 0, 0\n if width < 0 or height<0:\n return 0, 0\n else:\n return width, height", "def formatLookup(format_str):\n pat = '(\\d+)([A-Z])'\n match = re.search(pat, format_str)\n #print match.group()\n \n data_len = int(match.group(1))\n data_fmt = str(match.group(2))\n np_fmt = fitsFormatLookup(data_fmt)\n np_dtype = '%i%s'%(data_len, np_fmt)\n \n return np_dtype, data_len, np_fmt", "def get_image_size(path, width, type_name):\n fc = _os.path.getsize(path) / type_mapping[type_name].itemsize\n shape = [width, int(fc / width)]\n computed_size = shape[0] * shape[1] * type_mapping[type_name].itemsize\n measured_size = _os.path.getsize(path)\n return shape", "def get_metadata(filename):\n import ome_files\n \n reader = ome_files.OMETIFFReader()\n reader.set_id(filename)\n H, W, Z, T, C = reader.get_size_y(), reader.get_size_x(), reader.get_size_z(), reader.get_size_t(), reader.get_size_c()\n reader.close()\n return H, W, Z, T, C", "def fl_get_string_dimension(style, size, txtstr, strlng):\n _fl_get_string_dimension = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_string_dimension\",\\\n None, [cty.c_int, cty.c_int, xfdata.STRING, cty.c_int,\n cty.POINTER(cty.c_int), cty.POINTER(cty.c_int)],\\\n \"\"\"void fl_get_string_dimension(int fntstyle, int fntsize,\n const char * s, int len, int * width, int * height)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(style, xfdata.TEXTSTYLE_list)\n i_style = library.convert_to_intc(style)\n i_size = library.convert_to_intc(size)\n s_txtstr = library.convert_to_bytestrc(txtstr)\n i_strlng = library.convert_to_intc(strlng)\n i_width, ptr_width = library.make_intc_and_pointer()\n i_height, ptr_height = library.make_intc_and_pointer()\n library.keep_elem_refs(style, i_style, size, i_size, txtstr, s_txtstr, \\\n strlng, i_strlng, i_width, i_height, ptr_width, ptr_height)\n _fl_get_string_dimension(i_style, i_size, s_txtstr, i_strlng, \\\n ptr_width, ptr_height)\n return i_width.value, i_height.value", "def image_info(img):\n\tprint(img.format)\n\tprint(img.size)\n\tprint(img.mode)", "def get_data_info(data_path):\n if data_path.endswith('.npz'):\n data = np.load(data_path)\n labels = data['labels'][...]\n example_img = data['images'][:data['ids'][1]]\n out = {'size': len(labels),\n 'num_classes': len(np.unique(labels)),\n 'is_segment': False\n }\n elif data_path.endswith('.csv') or data_path.endswith('.txt'): # list of datasets\n lst = pd.read_csv(data_path)\n base_dir = os.path.dirname(data_path)\n segment0 = os.path.join(base_dir, lst['path'].tolist()[0])\n data = np.load(segment0)\n example_img = data['images'][:data['ids'][1]]\n out = {'size': sum(lst['N'].tolist()),\n 'num_classes': int(lst['num_classes'][0]),\n 'is_segment': True\n }\n else:\n raise TypeError(\"Error! dataset not supported.\")\n vocab_size = max(example_img) + 1\n out['vocab_size'] = vocab_size\n out['SEP'] = vocab_size - 3\n out['SOS'] = vocab_size - 2\n out['EOS'] = vocab_size - 1\n return out", "def fetchInfo(self, path):\n\n\n img = self.getImageObject(path)\n\n if isinstance(img, ImageFile):\n return img.size\n else:\n return [img.width, img.height]", "def get_dtype_info(dtype: type[np.dtype]) -> np.iinfo | np.finfo:\n try:\n dtype_info = np.iinfo(dtype)\n except ValueError:\n dtype_info = np.finfo(dtype)\n return dtype_info", "def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n if f.read(4) == b\"\\x76\\x2f\\x31\\x01\": # EXR magic number\n version = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n max_strlen = 256 if (version & 0x400) else 32\n got_channels = False\n got_dims = False\n while not (got_channels and got_dims):\n attr_name = _read_string_nul(f, max_strlen)\n _ = _read_string_nul(f, max_strlen) # attr_type\n attr_size = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n if attr_name == \"channels\":\n nchan = 0\n isfloat = False\n bitdepth = 16\n while not got_channels:\n name = _read_string_nul(f, max_strlen)\n if len(name) >= 1:\n dtype = np.frombuffer(f.read(16), dtype=\"<u4\")[0]\n isfloat = isfloat or (dtype > 0)\n bitdepth = max(bitdepth, 16 if dtype == 1 else 32)\n nchan += 1\n else:\n got_channels = True\n elif attr_name == \"dataWindow\":\n box = np.frombuffer(f.read(16), dtype=\"<i4\")\n xmin, ymin, xmax, ymax = box\n width = xmax - xmin + 1\n height = ymax - ymin + 1\n got_dims = True\n else:\n _ = f.seek(attr_size, 1)\n if verbose:\n print(f\"Reading file {filespec} \", end='')\n print(f\"(w={width}, h={height}, c={nchan}, bitdepth={bitdepth})\")\n return width, height, nchan, isfloat, bitdepth\n raise RuntimeError(f\"File {filespec} is not a valid EXR file.\")", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def get_image_format_from_datatext(self, datatext):\n image_format = \"VIRT\"\n temp = re.search('VX_DF_IMAGE_(.+?)\\]', datatext) #Obs. Needed to ecape the [ ]'s\n if temp:\n image_format = temp.group(1)\n return image_format", "def getImageInformation(file_path):\n if os.path.isdir(file_path) == False:\n file_dir = os.path.basename(file_path)\n file_name = os.path.splitext(file_dir)[0]\n file_format = os.path.splitext(file_path)[1]\n return file_name, file_format", "def getheader(filename):\n # read header and convert to string\n h = np.fromfile(filename, dtype='uint8', count=512)\n header = ''\n for s in h[h > 0]:\n header += chr(s)\n # start reading at 'datatype'\n hd = header[header.lower().find('datatype'):]\n hd = hd.split(':')[0].replace(',', ' ').split()\n # Types: uint8 int16 int32 float32\n typelist = ['u1', 'i2', 'i4', 'f4']\n # extract datatype\n try:\n dtype = typelist[int(hd[0].split('=')[1]) - 1]\n except:\n print(header)\n raise IOError('getheader: datatype invalid or missing')\n # extract endianness\n try:\n if hd[-1].split('=')[0].lower() != 'endian':\n raise IndexError()\n endian = hd[-1].split('=')[1]\n except IndexError:\n print(header)\n raise IOError('getheader: endianess missing.')\n if endian.lower() == 'l':\n dtype = '<' + dtype\n else:\n dtype = '>' + dtype\n # extract dims\n try:\n if hd[2].split('=')[0].lower() != 'dims':\n raise IndexError()\n dims = int(hd[2].split('=')[1])\n if dims not in [2, 3]:\n raise ValueError('Invalid dims=%i (must be 2 or 3)' % dims)\n except IndexError:\n print(header)\n raise IOError('getheader: dims invalid or missing.')\n try:\n if hd[3].split('=')[0].lower() != 'nx':\n raise IndexError()\n nx = int(hd[3].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nx invalid or missing.')\n try:\n if hd[4].split('=')[0].lower() != 'ny':\n raise IndexError()\n ny = int(hd[4].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: ny invalid or missing.')\n if dims == 3:\n try:\n if hd[5].split('=')[0].lower() != 'nt':\n raise IndexError()\n nt = int(hd[5].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nt invalid or missing.')\n shape = (nx, ny, nt)\n else:\n shape = (nx, ny)\n return [shape, dtype, header]", "def from_path(fname):\n def parse_header(lines):\n metadata = {}\n for ln in lines:\n if ln.startswith('#') or len(ln) < 2:\n continue\n match = re.match('(\\w+)\\s+([\\w\\s\\.]+)', ln)\n if not match:\n warnings.warn(\"warning: can't understand line: %s\" % ln)\n continue\n key, value = match.group(1).lower(), match.group(2)\n if key == 'version':\n metadata[key] = value\n elif key in ('fields', 'type'):\n metadata[key] = value.split()\n elif key in ('size', 'count'):\n metadata[key] = map(int, value.split())\n elif key in ('width', 'height', 'points'):\n metadata[key] = int(value)\n elif key == 'viewpoint':\n metadata[key] = map(float, value.split())\n elif key == 'data':\n metadata[key] = value.strip().lower()\n # TO-DO apparently count is not required?\n # add some reasonable defaults\n if 'count' not in metadata:\n metadata['count'] = [1] * len(metadata['fields'])\n if 'viewpoint' not in metadata:\n metadata['viewpoint'] = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]\n if 'version' not in metadata:\n metadata['version'] = '.7'\n return metadata\n\n def _build_dtype(metadata_):\n \"\"\" build numpy structured array dtype from pcl metadata.\n note that fields with count > 1 are 'flattened' by creating multiple\n single-count fields.\n TO-DO: allow 'proper' multi-count fields.\n \"\"\"\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype\n\n def parse_binary_pc_data(f, dtype, metadata):\n rowstep = metadata['points'] * dtype.itemsize\n # for some reason pcl adds empty space at the end of files\n buf = f.read(rowstep)\n return np.fromstring(buf, dtype=dtype)\n\n def parse_binary_compressed_pc_data(f, dtype, metadata):\n # compressed size of data (uint32)\n # uncompressed size of data (uint32)\n # compressed data\n # junk\n fmt = 'II'\n compressed_size, uncompressed_size = struct.unpack(fmt, f.read(struct.calcsize(fmt)))\n compressed_data = f.read(compressed_size)\n # (compressed > uncompressed)\n # should we read buf as raw binary?\n buf = lzf.decompress(compressed_data, uncompressed_size)\n if len(buf) != uncompressed_size:\n raise Exception('Error decompressing data')\n # the data is stored field-by-field\n pcs_data = np.zeros(metadata['width'], dtype=dtype)\n ix = 0\n for dti in range(len(dtype)):\n dt = dtype[dti]\n bytess = dt.itemsize * metadata['width']\n column = np.fromstring(buf[ix:(ix + bytess)], dt)\n pcs_data[dtype.names[dti]] = column\n ix += bytess\n return pcs_data\n\n with open(fname, 'rb') as f:\n header = []\n while True:\n ln = f.readline().strip()\n header.append(ln)\n if ln.startswith('DATA'):\n metadata = parse_header(header)\n dtype = _build_dtype(metadata)\n break\n if metadata['data'] == 'ascii':\n pc_data = np.loadtxt(f, dtype=dtype, delimiter=' ')\n pc_data.dtype = np.float32\n pc_data = pc_data.reshape(-1, 4)\n elif metadata['data'] == 'binary':\n pc_data = parse_binary_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary_compressed':\n pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)\n else:\n print('File->py_pcd.py: DATA field is not \"ascii\",maybe \"binary\" or \"binary_compressed\", try to add method for both')\n return 'CODE: 0x123'\n pc = point_cloud(metadata, pc_data)\n return pc", "def getImageSize(language=None):", "def _determine_dtype(fields):\n # Check whether the required fields are there\n for field in _NRRD_REQUIRED_FIELDS:\n if field not in fields:\n raise NrrdError('Nrrd header misses required field: \"%s\".' % (field))\n\n # Process the data type\n np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]\n # Endianness is not necessary for ASCII encoding type\n if np.dtype(np_typestring).itemsize > 1 and fields['encoding'] not in ['ascii', 'text', 'txt']:\n if 'endian' not in fields:\n raise NrrdError('Nrrd header misses required field: \"endian\".')\n if fields['endian'] == 'big':\n np_typestring = '>' + np_typestring\n elif fields['endian'] == 'little':\n np_typestring = '<' + np_typestring\n\n return np.dtype(np_typestring)", "def getimagesize(filename):\n img = Image.open(filename)\n (w,h) = img.size\n t = \"IMAGETYPE_%S\" % img.format\n a = \"width=\\\"%d\\\" height=\\\"%d\\\"\" % img.size\n return (w,h,t,a)", "def read_med_image(file_path, dtype):\n img_stk = sitk.ReadImage(file_path)\n img_np = sitk.GetArrayFromImage(img_stk)\n img_np = img_np.astype(dtype)\n return img_np, img_stk", "def getImageDescriptor(im):\n bb = '\\x2C' # Image separator,\n bb += intToBin( 0 ) # Left position\n bb += intToBin( 0 ) # Top position\n bb += intToBin( im.size[0] ) # image width\n bb += intToBin( im.size[1] ) # image height\n bb += '\\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.\n # LZW minimum size code now comes later, begining of [image data] blocks\n return bb", "def filename_type(filename):\n import re\n\n nii_re = re.compile(\".+(nii.gz)$|.+(nii)$\")\n npy_re = re.compile(\".+(npy)$|.+(npz)$\")\n\n\n if len(nii_re.findall(filename)):\n return 'nii'\n elif len(npy_re.findall(filename)):\n return 'npy'\n return None", "def get_image_size(self):", "def _build_dtype(metadata_):\n fieldnames = []\n typenames = []\n numpy_pcd_type_mappings = [(np.dtype('float32'), ('F', 4)),\n (np.dtype('float64'), ('F', 8)),\n (np.dtype('uint8'), ('U', 1)),\n (np.dtype('uint16'), ('U', 2)),\n (np.dtype('uint32'), ('U', 4)),\n (np.dtype('uint64'), ('U', 8)),\n (np.dtype('int16'), ('I', 2)),\n (np.dtype('int32'), ('I', 4)),\n (np.dtype('int64'), ('I', 8))]\n pcd_type_to_numpy_type = dict((q, p) for (p, q) in numpy_pcd_type_mappings)\n\n for f, c, t, s in zip(metadata_['fields'],\n metadata_['count'],\n metadata_['type'],\n metadata_['size']):\n np_type = pcd_type_to_numpy_type[(t, s)]\n if c == 1:\n fieldnames.append(f)\n typenames.append(np_type)\n else:\n fieldnames.extend(['%s_%04d' % (f, i) for i in xrange(c)])\n typenames.extend([np_type] * c)\n dtype = np.dtype(zip(fieldnames, typenames))\n return dtype", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def get_vtk_image_attrib(image):\n data = vtk_image_to_numpy(image)\n return (data.shape, data.dtype)", "def getTiffInfo(path):\n # py 2/3 comp\n first_file = glob.glob(os.path.join(path, '*.tif'))[0]\n if ScanImageTiffReader is not None and ScanImageTiffReader(first_file).metadata() != '':\n string = ScanImageTiffReader(first_file).metadata()\n else:\n tfh = tifffile.TiffFile(first_file)\n # If software key is in dict tags --> SI2016\n if 'software' in tfh.pages[0].tags:\n string = tfh.pages[0].tags['software'].value.decode('utf-8')\n else:\n string = tfh.pages[0].tags['image_description'].value.decode('utf-8')\n string = \" \".join(string.split()).replace('\\\\', ' ')\n string = string.replace(')', '')\n string = string.replace('(', '')\n return string", "def type_info(np_type):\n dt = np.dtype(np_type)\n np_type = dt.type\n width = dt.itemsize\n try: # integer type\n info = np.iinfo(dt)\n except ValueError:\n pass\n else:\n return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,\n maxexp=None, nmant=None, nexp=None, width=width)\n info = np.finfo(dt)\n # Trust the standard IEEE types\n nmant, nexp = info.nmant, info.nexp\n ret = dict(min=np_type(info.min),\n max=np_type(info.max),\n nmant=nmant,\n nexp=nexp,\n minexp=info.minexp,\n maxexp=info.maxexp,\n width=width)\n if np_type in (_float16, np.float32, np.float64,\n np.complex64, np.complex128):\n return ret\n info_64 = np.finfo(np.float64)\n if dt.kind == 'c':\n assert np_type is np.longcomplex\n vals = (nmant, nexp, width / 2)\n else:\n assert np_type is np.longdouble\n vals = (nmant, nexp, width)\n if vals in ((112, 15, 16), # binary128\n (info_64.nmant, info_64.nexp, 8), # float64\n (63, 15, 12), (63, 15, 16)): # Intel extended 80\n return ret # these are OK without modification\n # The remaining types are longdoubles with bad finfo values. Some we\n # correct, others we wait to hear of errors.\n # We start with float64 as basis\n ret = type_info(np.float64)\n if vals in ((52, 15, 12), # windows float96\n (52, 15, 16)): # windows float128?\n # On windows 32 bit at least, float96 is Intel 80 storage but operating\n # at float64 precision. The finfo values give nexp == 15 (as for intel\n # 80) but in calculations nexp in fact appears to be 11 as for float64\n ret.update(dict(width=width))\n return ret\n if vals == (105, 11, 16): # correctly detected double double\n ret.update(dict(nmant=nmant, nexp=nexp, width=width))\n return ret\n # Oh dear, we don't recognize the type information. Try some known types\n # and then give up. At this stage we're expecting exotic longdouble or\n # their complex equivalent.\n if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):\n raise FloatingError('We had not expected type %s' % np_type)\n if (vals == (1, 1, 16) and on_powerpc() and\n _check_maxexp(np.longdouble, 1024)):\n # double pair on PPC. The _check_nmant routine does not work for this\n # type, hence the powerpc platform check instead\n ret.update(dict(nmant=106, width=width))\n elif (_check_nmant(np.longdouble, 52) and\n _check_maxexp(np.longdouble, 11)):\n # Got float64 despite everything\n pass\n elif (_check_nmant(np.longdouble, 112) and\n _check_maxexp(np.longdouble, 16384)):\n # binary 128, but with some busted type information. np.longcomplex\n # seems to break here too, so we need to use np.longdouble and\n # complexify\n two = np.longdouble(2)\n # See: https://matthew-brett.github.io/pydagogue/floating_point.html\n max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383\n if np_type is np.longcomplex:\n max_val += 0j\n ret = dict(min=-max_val,\n max=max_val,\n nmant=112,\n nexp=15,\n minexp=-16382,\n maxexp=16384,\n width=width)\n else: # don't recognize the type\n raise FloatingError('We had not expected long double type %s '\n 'with info %s' % (np_type, info))\n return ret" ]
[ "0.6820558", "0.5704018", "0.5546598", "0.5516806", "0.5513057", "0.54892457", "0.53587013", "0.5346659", "0.5331653", "0.529922", "0.5295019", "0.52838844", "0.5277544", "0.5265453", "0.5226433", "0.5198636", "0.51947224", "0.5194617", "0.5169112", "0.51395136", "0.50898606", "0.5039762", "0.50133353", "0.50117266", "0.50100327", "0.49992338", "0.49855244", "0.49773875", "0.4973709", "0.49664664" ]
0.8044237
0
Move files out of subdirectories in the current working directory.
def move_file(): # print("\n".join(os.listdir(filepath))) # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)] # print(filepath + ":\n " + "\n ".join(folders)) folders = filter(os.path.isdir, os.listdir(u".")) # print("Sub-folders: ", u"\n".join(folders)) for folder in folders: files = [os.path.join(folder, fn) for fn in os.listdir(folder)] files = filter(os.path.isfile, files) for fn in files: _, filename = os.path.split(fn) shutil.move(fn, filename) assert 0 == len(os.listdir(folder))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def move_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n movetree(src, dst, overwrite, changed_only)\n else:\n movefile(src, dst, overwrite, changed_only)", "def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))", "def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))", "def organizeDir(self):\n # Classify every file in dir\n for file in os.listdir(self.path):\n curPath = self.path + file\n self.moveFile(curPath)", "def _recursive_put_files(self, is_subdirectory=False, sub_directory_name=None):\n current_path = os.path.basename(os.getcwd())\n LOG.info(f\"Copying files from the directory '{current_path}'\")\n for path_ in os.listdir():\n # Skip dotfiles and __pycache__\n if path_.startswith('.') or path_.startswith('__'):\n continue\n if os.path.isdir(path_):\n if sub_directory_name is not None:\n dir_name = os.path.join(sub_directory_name, path_)\n else:\n dir_name = path_\n try:\n self._file_explorer.md(dir_name)\n except Exception as e:\n print(e)\n os.chdir(dir_name.split(os.path.sep)[-1])\n self._recursive_put_files(\n is_subdirectory=True,\n sub_directory_name=dir_name,\n )\n else:\n try:\n if sub_directory_name is not None:\n self._file_explorer.put(path_, os.path.join(sub_directory_name, path_))\n else:\n self._file_explorer.put(path_)\n except RemoteIOError as e:\n print(path_, e)\n if is_subdirectory:\n os.chdir(UP_ONE_DIRECTORY)", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def main():\n os.chdir('FilesToSort')\n extension_to_category = {}\n for filename in os.listdir('.'):\n if os.path.isdir(filename):\n continue\n extension = filename.split('.')[-1]\n make_subdirectories(extension, extension_to_category)\n shutil.move(filename, extension_to_category[extension])", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def _sync_directories(from_directory, to_directory):\n if not os.path.exists(to_directory):\n os.mkdir(to_directory)\n for root, dirs, files in os.walk(from_directory):\n to_root = root.replace(from_directory, to_directory)\n for directory in dirs:\n to_child_dir = os.path.join(to_root, directory)\n if not os.path.exists(to_child_dir):\n os.mkdir(to_child_dir)\n for fname in files:\n from_file = os.path.join(root, fname)\n to_file = os.path.join(to_root, fname)\n with open(from_file, 'rb') as a, open(to_file, 'wb') as b:\n b.write(a.read())", "def move_files(self, download_path):\n if self.file_list is None:\n self._set_file_list()\n\n for individual_file in self.file_list:\n source_path = os.path.join(self.base_dir, individual_file)\n dest_path = os.path.join(download_path, individual_file)\n # We don't move files that don't exist\n if not os.path.exists(source_path):\n continue\n\n # Make sure the destination directory exists\n if not os.path.exists(os.path.dirname(dest_path)):\n os.makedirs(os.path.dirname(dest_path))\n if self.to_copy:\n shutil.copy(source_path, dest_path)\n else:\n os.rename(source_path, dest_path)\n return", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def mirror_directory_tree_with_files_loop(self, in_dirpath, out_dirpath, only_include_filetypes, include_file_suffix, avoid_files_with):\n for i in os.listdir(in_dirpath):\n if i[0] == '.' or i[:6] == 'README':\n continue\n elif os.path.isdir(in_dirpath + i):\n if not os.path.exists(out_dirpath + i):\n os.makedirs(out_dirpath + i)\n self.mirror_directory_tree_with_files_loop(self, in_dirpath + i + '/', out_dirpath + i + '/', only_include_filetypes, include_file_suffix, avoid_files_with)\n elif os.path.isfile(in_dirpath + i):\n if avoid_files_with:\n if avoid_files_with in '.'.join(i.split('.')[:-1]):\n continue\n if only_include_filetypes:\n suffix = i.split('.')[-1]\n if suffix in only_include_filetypes:\n if include_file_suffix:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n if include_file_suffix or not '.' in i:\n filename = i\n else:\n filename = '.'.join(i.split('.')[:-1])\n self.files_containing_filetype.update([in_dirpath + i])\n self.mirrored_filepaths.update([out_dirpath + filename])\n self.mirrored_directory_leaves.update([out_dirpath])\n else:\n print dirpath + i, 'does not exist'\n return", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def rename_files_dirs(options):\n # create dirs first\n call_command('''find . -type d | while read f; do mkdir -p \"$(echo $f | sed 's/%(patrn)s/%(repl)s/g')\"; done''', options)\n # than move files\n call_command('''find . -type f | while read f; do mv \"$f\" \"$(echo $f | sed 's/%(patrn)s/%(repl)s/g')\"; done''', options)\n # delete empty dirs\n call_command('''find -depth -type d -empty -exec rmdir {} \\;''', [(1,1)])", "def organize_by_order(current_path):\n\tfor file in sorted(os.listdir(current_path)):\n\t\tif file != 'file_organizer.py':\n\t\t\ttry:\n\t\t\t\tos.makedirs(file[0])\n\t\t\t\tclick.echo(\"Creating a Folder\",file[0])\n\t\t\texcept:\n\t\t\t\tNone\n\t\t\tshutil.move(file,file[0])\n\t\t\tclick.secho(('Finished moving : {} to {} folder'.format(file,file[0])),fg='green')", "def moveFiles(rootDir):\n\n homedir = os.environ['HOME']\n albumDirec = 'AlbumCoverImages'\n #Check if a directory exists\n if not os.path.isdir(os.path.join(homedir, 'Pictures', albumDirec)):\n print('AlbumCoverImages not found, trying to make...')\n os.makedirs(os.path.join(homedir, 'Pictures', albumDirec))\n \n for root, dirs, files in os.walk(rootDir, topdown=False):\n #print('testtest')\n for name in files:\n \n\n #Find image files, and move them to albumCoverImages\n #For some bullshit reason or statments won't work here, have to\n # parse this out to elif statements, ughhhh...\n \n if '.jpg' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec)))\n \n elif '.png' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.gif' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n \n elif '.pdf' in name:\n os.rename(os.path.join(root, name), os.path.join(homedir, 'Pictures', albumDirec, name))\n print('{0} moved to {1}!'.format(name, os.path.join(homedir, 'Pictures', albumDirec, name)))\n\n else:\n try:\n #Use tinytag to get file metadata\n tag = TinyTag.get(os.path.join(root, name))\n artistName = tag.artist\n albumName = tag.album\n \n #TODO: Need to add more conditions\n if isinstance(artistName, str):\n artistName = artistName.replace('/', '_')\n\n elif isinstance(albumName, str):\n albumName.replace('/', '_')\n \n\n #Check if the artists directory exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName)):\n os.makedirs(os.path.join(rootDir, artistName))\n print('{0} directory made!'.format(artistName))\n \n except ValueError:\n print('ValueError with {0}'.format(root+'/'+name))\n continue\n\n except TypeError:\n print('TypeError with {0}'.format(root+'/'+name))\n continue\n\n #Check if the songs album exists, if not make it\n try:\n if not os.path.isdir(os.path.join(rootDir, artistName, albumName)):\n os.makedirs(os.path.join(rootDir, artistName, albumName))\n print('{0} directory made!'.format(albumName))\n \n except TypeError:\n print('TypeError with {0}! Look at album directory making.'.format(root+'/'+name))\n continue\n\n #TODO: Check if album is in artist direc, if not, move it\n\n #Check if song is in album, if not move it \n try:\n if os.path.isfile(os.path.join(rootDir, artistName, albumName, name)) == False:\n os.rename(os.path.join(root, name), os.path.join(rootDir, artistName, albumName, name))\n print('{0} moved to {1}!'.format(name, albumName))\n \n except TypeError:\n print('TypeError with file {0}! Look at line song moving'.format(root+'/'+name))\n continue\n \n #TODO: Check if this part works\n except LookupError:\n if (\".jpg\") or (\".png\") or (\".7z\") or (\"README\") or (\".zip\") in name:\n continue\n \n else:\n print('No reader support for {0}'.format(name))\n continue", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def sort_folder():\n for file in downloads_path.iterdir():\n if file.is_file():\n extension = file.suffix\n file = str(file)\n if extension in program_types:\n move_file(file, programs_path)\n elif extension in compressed_types:\n move_file(file, compressed_path)\n elif extension in doc_types:\n move_file(file, documents_path)\n elif extension in music_types:\n move_file(file, music_path)\n elif extension in video_types:\n move_file(file, video_path)\n elif extension in picture_types:\n move_file(file, pictures_path)\n else:\n move_file(file, other_path)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)", "def wrap_move_file_folder(src, dst):\r\n try:\r\n if os.path.exists(dst):\r\n if os.path.isdir(dst):\r\n shutil.rmtree(dst)\r\n else:\r\n os.remove(dst)\r\n except Exception:\r\n pass\r\n for i in range(5):\r\n try:\r\n shutil.move(src, dst)\r\n break\r\n except Exception:\r\n time.sleep(10)" ]
[ "0.68817514", "0.6756969", "0.66850746", "0.6610166", "0.6560671", "0.65322196", "0.65059394", "0.643787", "0.6378325", "0.63559556", "0.6313153", "0.62905586", "0.62528837", "0.6242785", "0.62256724", "0.6196293", "0.61216223", "0.611683", "0.61057925", "0.61052036", "0.6077716", "0.6065733", "0.6044018", "0.6006781", "0.60042477", "0.5984417", "0.5982206", "0.5965177", "0.5953763", "0.5953763" ]
0.7259415
0
Find duplications in submitted homework.
def find_duplication(homework): re_id = re.compile(r'(?P<stuid>[0-9]{10,11})') dup_check = dict() with open(homework, 'r') as data: lines = data.readlines() for ln in lines: dt = ln.split() csum, right = dt[0], dt[1] if csum not in dup_check: dup_check[csum] = list() m = re_id.search(right) if m is not None: stu_id = m.group('stuid') dup_check[csum].append(stu_id) dup_check = filter(lambda k, v: len(v) > 1, dup_check.items()) dup_check = [(key, sorted(val)) for key, val in dup_check] return dup_check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def find_duplicates():\n return AppServer.service.find_duplicated_files()", "def handle_duplicates(self, database):\n number_of_duplicates = 0\n number_of_merged = 0\n if not database.session:\n logger.error(\"no database session\")\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout already has been checked\n if self.is_duplicate_with or self.manual_check_required_with:\n logger.debug(\"dup check - no check, since this workout is marked: {}\".format(self))\n return (number_of_duplicates, number_of_merged)\n\n # return if this workout does not have start_time set, since the following checks are based on it\n if not self.start_time or not self.duration_sec:\n return (number_of_duplicates, number_of_merged)\n\n # potential duplicate if time is overlapping\n # this workout |-----------------|\n # 1st potential duplicate in db |-----------------|\n # 2nd potential duplicate in db |------------------------|\n # 3rd potential duplicate in db |----------------|\n # 4th potential duplicate in db |---------|\n # (Remark to line 2 of 1st filter: needed to use database functions, \n # because modifiers like timedelta do not work with sqlalchemy sql attributes)\n # TODO handle timezones (needed for sqlite strftime)\n duplicates = database.session.query(Workout)\\\n .filter(or_(and_(Workout.start_time < self.start_time,\n func.strftime('%s', Workout.start_time, 'utc') + Workout.duration_sec >= self.start_time.timestamp()),\n and_(Workout.start_time >= self.start_time,\n Workout.start_time < (self.start_time + datetime.timedelta(seconds=int(self.duration_sec))))))\\\n .filter(Workout.is_duplicate_with == None)\\\n .filter(Workout.manual_check_required_with == None)\\\n .all()\n\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of different sports -> set manual_check_required_with\n for duplicate in duplicates:\n if duplicate.sport_id != self.sport_id:\n self.manual_check_required_with = duplicate.id\n logger.debug(\"dup check - workout marked to be checked: {}\".format(duplicate))\n duplicates.remove(duplicate)\n if len(duplicates) <= 1: \n return (number_of_duplicates, number_of_merged)\n\n # find overlapping workouts of same sports (they are duplicate workouts) -> now find the leading workout\n leading_workout = None\n # Step 1: if one of the duplicates is a previously merged one, use it as the leading workout\n for duplicate in duplicates:\n if duplicate.source and duplicate.source == \"MERGED WORKOUT\":\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 1: {}\".format(leading_workout))\n break\n # Step 2: else if one of the duplicates is from Zwift, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.name and \"Zwift\" in duplicate.name:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 2: {}\".format(leading_workout))\n break\n # Step 3: else if one of the duplicates is a Garmin import, prefer it as the leading workout\n if not leading_workout:\n for duplicate in duplicates:\n if duplicate.source and \"Garmin\" in duplicate.source:\n leading_workout = duplicate\n logger.debug(\"Found leading workout in step 3: {}\".format(leading_workout))\n break\n # Step 4: else use this workout as the leading workout\n if not leading_workout:\n leading_workout = self\n logger.debug(\"Found leading workout in step 4: {}\".format(leading_workout))\n\n # create a new workout that will be treated as the leading one. Mark the duplicates \n if leading_workout.source == \"MERGED WORKOUT\":\n merged_workout = leading_workout\n else:\n merged_workout = Workout(source=\"MERGED WORKOUT\", external_id=datetime.datetime.now().timestamp())\n number_of_merged += 1\n merged_workout._merge_attributes(leading_workout)\n logger.debug(\"dup check - merged workout with leading: {}\".format(merged_workout))\n merged_workout.add(database)\n leading_workout.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n\n for duplicate in duplicates:\n if duplicate is leading_workout:\n # already merged above\n continue\n if duplicate.is_duplicate_with == merged_workout.id:\n # already merged\n continue\n merged_workout._merge_attributes(duplicate)\n logger.debug(\"dup check - merged workout duplicate: {}\".format(merged_workout))\n duplicate.is_duplicate_with = merged_workout.id\n number_of_duplicates += 1\n logger.debug(\"dup check - duplicate workout marked: {}\".format(duplicate))\n\n return (number_of_duplicates, number_of_merged)", "def filter_dups(saved_home, dups_info_home):\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_context_for_corenlp.txt'),\n encoding='utf-8')\n context_lines = orig_context_file.readlines()\n orig_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_keyword_for_corenlp.txt'),\n encoding='utf-8')\n allkeys_lines = orig_allkeys_file.readlines()\n assert len(context_lines) == len(allkeys_lines)\n\n # filter out the duplicates in the validation and the testing datasets and the kp20k training dataset itself\n dups_info_datasets = ['kp20k_training', 'kp20k_validation', 'kp20k_testing',\n 'inspec_testing', 'krapivin_testing',\n 'nus_testing', 'semeval_testing']\n total_filtered_idx_set = set()\n for dataset in dups_info_datasets:\n filtered_idx_set = set()\n dups_info_file = open(\n os.path.join(dups_info_home, '{}_context_nstpws_dups_w_kp20k_training.txt'.format(dataset)), encoding='utf-8')\n for line in dups_info_file:\n line = line.strip()\n # inspec_testing_48 kp20k_training_433051 jc_sc:0.7368; affine invariants of convex polygons | affine invariants of convex polygons\n dups, titles = line.split(';')\n src_dup, filtered_dup, _ = dups.split()\n src_idx = int(src_dup.strip().split('_')[-1])\n filtered_idx = int(filtered_dup.strip().split('_')[-1])\n if dataset != 'kp20k_training':\n filtered_idx_set.add(filtered_idx)\n else:\n if src_idx not in filtered_idx_set:\n filtered_idx_set.add(filtered_idx)\n total_filtered_idx_set = total_filtered_idx_set.union(filtered_idx_set)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n # also filter out the invalid data samples\n print('Finding the invalid data samples in the original kp20k training ...')\n for corpus_idx in tqdm(range(len(context_lines))):\n if context_lines[corpus_idx].strip().split() == [''] or allkeys_lines[corpus_idx].strip().split(' ; ') == ['']:\n total_filtered_idx_set.add(corpus_idx)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n total_filtered_idxes = sorted(list(total_filtered_idx_set))\n for filter_idx in total_filtered_idxes:\n context_lines[filter_idx] = '\\n'\n allkeys_lines[filter_idx] = '\\n'\n\n filtered_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_context_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_context_file.writelines(context_lines)\n\n filtered_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_keyword_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_allkeys_file.writelines(allkeys_lines)\n\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_filtered_for_corenlp_idxes.txt'),\n 'w', encoding='utf-8')\n orig_context_file.write(' '.join([str(idx) for idx in total_filtered_idxes]) + '\\n')\n orig_context_file.write(str(len(total_filtered_idxes)) + '\\n')", "def find_duplicate(student_list):\r\n place_holder = student_info('null', 'null', '0', '0')\r\n current = place_holder\r\n dupe = []\r\n final = []\r\n for student in student_list:\r\n previous = current\r\n current = student\r\n if current.first == previous.first:\r\n if previous in final:\r\n dupe.append(final.pop())\r\n dupe.append(student)\r\n elif current.first != previous.first:\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n final.append(student)\r\n dupe = []\r\n else:\r\n final.append(student)\r\n if len(dupe) > 1:\r\n dupe.sort(key=lambda x: x[1])\r\n for student_dupe in dupe:\r\n final.append(student_dupe)\r\n for student_final in final:\r\n print(student_format(student_final))", "def find_duplicates(lst):\n \"*** YOUR CODE HERE ***\"\n return len( set(lst) ) != len(lst)", "def list_dups(exproot, **kwargs):\n seen_args = []\n seen_names = []\n for jobname, args, results in load_all(exproot):\n if args in seen_args:\n print jobname, 'is dup of', seen_names[seen_args.index(args)]\n elif args != None:\n seen_args.append(args)\n seen_names.append(jobname)", "def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def test_identify_duplicates_1(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def test_identify_duplicates_3(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))", "def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def duplicates(deleteFlag=False,cnst='1'):\n output = db.query(['fwid','strjob'],cnst) # list of (fwid,strjob) pairs\n rptDict={} # dictionary for repeat values (key = first fwid, value = list of duplicates)\n for fwid,strjob in output:\n for f,s in output: # double-FOR loop\n if f is None: print 'NONE FWID??? ',f,s # hopefully this isn't flagged\n if strjob == s and f!=fwid: # condition for duplicate job\n if fwid not in list(itertools.chain.from_iterable(rptDict.values())): \n if fwid in rptDict.keys(): rptDict[fwid].append(f) # add to the list\n else: rptDict[fwid] = [f] # create a key,value pair\n print 'FWIDs with equal strjob entries: \\n',abbreviateDict(rptDict) # summarize results\n if deleteFlag:\n delfws = list(itertools.chain.from_iterable(rptDict.values()))\n if ask('Are you sure you want to delete %d duplicates?'%len(delfws)):\n for f in delfws: delete('fwid = %d'%f,False)", "def __delete_duplicates(self):\n log = logging.getLogger()\n log.debug(\"\\n---> Duplicate check <---\")\n\n chromosomes = list(set(self.chromosomes))\n diff = self.size - len(chromosomes)\n\n if diff > 0:\n log.debug(\"---> Duplicate(s) found! <---\")\n for i in range(diff):\n chromosomes.append(\n Chromosome(self.__generate_random_gene_sequence(),\n self.environment))\n else:\n log.debug(\"---> No duplicates found! <---\")\n\n self.chromosomes = chromosomes", "def check(self):\n if not self.session:\n print(\"no database\")\n\n number_of_checked_workouts = 0\n number_of_merged_workouts = 0\n number_of_duplicate_workouts = 0\n workouts = self.session.query(Workout).all()\n for workout in workouts:\n number_of_checked_workouts += 1\n if workout.is_duplicate_with:\n number_of_duplicate_workouts += 1\n else:\n (a, b) = workout.handle_duplicates(self)\n number_of_duplicate_workouts += a\n number_of_merged_workouts += b\n logger.info('{} workouts checked, {} of them were duplicate, created {} merged workouts'\\\n .format(number_of_checked_workouts,\n number_of_duplicate_workouts,\n number_of_merged_workouts,))", "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def find_duplicates(path, extension):\n files = list_all_files_with_extension(path, extension)\n result = set()\n duplicates = set()\n for file in files:\n if file in result:\n print(\"duplicate\")\n print(file)\n duplicates.add(file)\n else:\n result.add(file)\n return duplicates", "def dupable_matches_required(self):\n return 2", "def isduplicate(a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio()\n return refs.eq(e1, e2)", "def isduplicate(self, a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio(similarity=self.similarity)\n return refs.eq(e1, e2)", "def findDuplicates(self, nums):\n nums = sorted(nums)\n ans = []\n i = 0\n while i < len(nums) - 1:\n if nums[i] == nums[i + 1]:\n ans.append(nums[i])\n i += 2\n else:\n i += 1\n\n return ans", "def for_duplicates(self):\n print('++++++++++++ Duplicates Check Start+++++++++++++')\n print('Report for:', self.name)\n if not self.df.empty:\n for column in self.df.columns:\n if self.df.duplicated(column).sum() > 0:\n print('Duplicates found in: ', column)\n else:\n print('No duplicates found in: ', column)\n else:\n print('Empty data set')\n print('++++++++++++ Duplicates Check End+++++++++++++')", "def find_duplicate_game_docs(self):\n gids = self._db.Games.aggregate([{'$group':\n {'_id' : '$gid',\n 'count' : {'$sum' : 1}}},\n {'$match':\n {'count' : {'$gt' : 1}}}])\n return [x['_id'] for x in gids]", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def find_the_duplicate(nums):\n # frequency = {}\n\n # for num in nums:\n # frequency[num] = frequency.get(num, 0) + 1\n\n # for num in frequency:\n # if frequency[num] == 2:\n # return num\n\n ##########\n\n # nums_dict = list(enumerate(sorted(nums)))\n\n # for i, num in nums_dict:\n # if num == nums_dict[i + 1]:\n # return num\n\n ##################\n\n seen = set()\n\n for num in nums:\n if num in seen:\n return num\n seen.add(num)", "def check_no_duplicates(examples):\n return len(examples) == len(set(examples))", "def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans, res = [], []\n\n for i in range(2**n, 2**(n+1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n res = [nums[j] for j in range(n) if bitmask[j] == '1']\n if res not in ans:\n ans.append(res)\n\n return ans\n # print(ans)" ]
[ "0.6404877", "0.6166872", "0.58831835", "0.5859948", "0.58292764", "0.58159447", "0.57876647", "0.5782457", "0.5769974", "0.5762715", "0.5746975", "0.5736089", "0.5733144", "0.57238513", "0.5710152", "0.5654848", "0.553705", "0.5500801", "0.54478735", "0.5438873", "0.54368997", "0.54306716", "0.54144025", "0.54073423", "0.53861696", "0.53712606", "0.5346199", "0.53129274", "0.5275559", "0.5272303" ]
0.753223
0
Display the duplication check results.
def display_dup(dup_result): lines = [k + ": " + ", ".join(v) for k, v in dup_result] return lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def for_duplicates(self):\n print('++++++++++++ Duplicates Check Start+++++++++++++')\n print('Report for:', self.name)\n if not self.df.empty:\n for column in self.df.columns:\n if self.df.duplicated(column).sum() > 0:\n print('Duplicates found in: ', column)\n else:\n print('No duplicates found in: ', column)\n else:\n print('Empty data set')\n print('++++++++++++ Duplicates Check End+++++++++++++')", "def _display_sims(self, sims):\n nb_lignes_dupliquees = 0\n for num, couples in sims:\n print()\n print(num, \"similar lines in\", len(couples), \"files\")\n couples = sorted(couples)\n lineset = idx = None\n for lineset, idx in couples:\n print(\"==%s:%s\" % (lineset.name, idx))\n if lineset:\n for line in lineset._real_lines[idx : idx + num]:\n print(\" \", line.rstrip())\n nb_lignes_dupliquees += num * (len(couples) - 1)\n nb_total_lignes = sum([len(lineset) for lineset in self.linesets])\n print(\n \"TOTAL lines=%s duplicates=%s percent=%.2f\"\n % (\n nb_total_lignes,\n nb_lignes_dupliquees,\n nb_lignes_dupliquees * 100.0 / nb_total_lignes,\n )\n )", "def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")", "def _render_results_scan_summary(self):\n\n core.add_text(\n 'Scan Summary',\n color=self._control_text_color,\n parent=self._window_name)\n\n core.add_text(\n 'Number of images scanned: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n name='number_of_scanned_images_text',\n source=NUMBER_OF_SCANNED_IMAGES,\n parent=self._window_name)\n\n core.add_text(\n 'Number duplicate image sets: ',\n parent=self._window_name)\n\n core.add_same_line(parent=self._window_name)\n\n core.add_text(\n str(len(self._duplicates_list)),\n parent=self._window_name)\n\n core.add_text('', parent=self._window_name)", "def print_duplicates(md):\n for digest,paths in md.iteritems():\n for p in paths:\n print digest, p\n # print blank line between groups\n print \"\"", "def print_results(self):\n pass", "def display_results():\n pass", "def showClusters(self,clusterOfFiles,batchSize=3):\n #groupCounter keeps track of how many clusters of duplicate files has been printed\n clusterCounter=0\n for acluster in clusterOfFiles:\n #print a cluster/group of duplicate files\n print(\"Duplicate group {0}\".format(clusterCounter+1))\n print (\"All of these files have the same content:\")\n for afile in acluster:\n print(afile)\n \n #increase the groupCounter by 1 as one group has been printed\n clusterCounter+=1\n if clusterCounter%batchSize==0:\n raw_input(\"Press any key for more duplicates\")", "def check(self):\n if not self.session:\n print(\"no database\")\n\n number_of_checked_workouts = 0\n number_of_merged_workouts = 0\n number_of_duplicate_workouts = 0\n workouts = self.session.query(Workout).all()\n for workout in workouts:\n number_of_checked_workouts += 1\n if workout.is_duplicate_with:\n number_of_duplicate_workouts += 1\n else:\n (a, b) = workout.handle_duplicates(self)\n number_of_duplicate_workouts += a\n number_of_merged_workouts += b\n logger.info('{} workouts checked, {} of them were duplicate, created {} merged workouts'\\\n .format(number_of_checked_workouts,\n number_of_duplicate_workouts,\n number_of_merged_workouts,))", "def _render_torrents(ctx, torrent_iterator, format):\n\n show_duplicates = ctx.params.get('show_duplicates', False)\n result_count = ctx.params.get('results', 25)\n\n (seen, count,) = (set(), 0,)\n while count < result_count:\n try:\n torrent = next(torrent_iterator)\n if torrent['hash'] not in seen:\n rendered = format.format(**COLORED, **torrent)\n if not show_duplicates:\n seen.add(torrent['hash'])\n\n yield (torrent, rendered,)\n count += 1\n except StopIteration:\n break\n\n if count <= 0:\n print((\n '{style.BOLD}{fore.WHITE}sorry, no results{style.RESET}'\n ).format(**COLORED))\n return", "def print_not_uniq(list, mesg=\"{multiplicity} times: {item}\"):\n\n not_uniq = search_not_uniq(list)\n for item, multiplicity in not_uniq.items():\n print(mesg.format(**locals()))\n return len(not_uniq)", "def test__get_duplicates(self):\n\n result = list_of_duplicates\n expected = [\n 'Fred',\n 'Sarah',\n 'Matthew',\n 'Joanna',\n 'Sam',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def _show_results(results):\n if len(results) == 0:\n click.echo(\"Could not find any command with these parameters.\")\n else:\n for kw, result in results.items():\n click.echo(kw.upper())\n for dic in result:\n if dic[\"explanation\"] != \"\":\n click.echo(\"\\t#%i\\t%s \\n\\t%s\" %(dic[\"id\"], dic[\"command\"], dic[\"explanation\"]))\n else:\n click.echo(\"\\t#%i\\t%s\" % (dic[\"id\"], dic[\"command\"]))", "def duplicates_marked_reciprocally():\n ids = FRAMEWORKS_DF['CURATED-COFs ID'].str\n messages = []\n\n for _index, row in FRAMEWORKS_DF.iterrows():\n if row['Duplicate found'] != 'none':\n original_id = row['CURATED-COFs ID']\n duplicate_id = row['Duplicate found']\n duplicate_row = FRAMEWORKS_DF.loc[FRAMEWORKS_DF['CURATED-COFs ID'] == duplicate_id ]\n if not len(duplicate_row) == 1:\n messages.append(f'Found row without reciprocal duplicate mark:\\n{row}')\n\n duplicate_row_original_id = duplicate_row['Duplicate found'].values[0]\n if not duplicate_row['Duplicate found'].values[0] == original_id:\n messages.append(f'Duplicate row lists ID {duplicate_row_original_id}, expected {original_id}')\n\n if messages:\n print('\\n'.join(messages))\n sys.exit(1)\n\n print('Rows marked as duplicates go both ways.')", "def test_duplicate_entries(self):", "def duplicate_record_check(cur):\n # get all created tables from db\n cur.execute(\"SELECT * FROM information_schema.tables WHERE table_schema='public'\")\n result = cur.fetchall()\n\n # create list of tables\n table_list = [table[2] for table in result]\n\n print('Checking tables for duplicate records...')\n\n # check each table for duplicates\n for table_name in table_list:\n cur.execute(f\"SELECT COUNT(*) FROM {table_name}\")\n row_count = cur.fetchall()\n cur.execute(f\"SELECT DISTINCT COUNT(*) FROM {table_name}\")\n distinct_count = cur.fetchall()\n if row_count[0][0] == distinct_count[0][0]:\n print(f\"GREAT, no duplicate records found in {table_name}!\")\n elif distinct_count[0][0] < row_count[0][0]:\n print(f\"WARNING, duplicate records found! {distinct_count[0][0]}\"\n f\"distinct record count is less than total record count of {row_count[0][0]}\")", "def _display_results(self):\n self._display_summary()\n self._display_domain_record()\n self._display_ip_record()\n self._display_cert_details()\n self._display_ti_data()\n self._display_screenshot()\n self._display_related_alerts()\n self._display_bookmarks()\n self._display_dns_results()\n self._display_hosts()\n self._display_flows()", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def display_results_for_errors(result):\n i = 0\n for r in result:\n print('\\t'+str(result[i][0])+' ---> '+str(result[i][1])+' %\\n')\n i = i + 1", "def display_results(results):\n winners = results[0]\n losers = results[1]\n pushers = results[2]\n blackjack_winners = results[3]\n print(generate_results_string(winners, \" wins\", \" win\"))\n print(generate_results_string(losers, \" loses\", \" lose\"))\n print(generate_results_string(pushers, \" pushes\", \" push\"))\n print(generate_results_string(blackjack_winners, \" wins with blackjack\", \" win with blackjack\"))", "def search_results(self, results):\n for index, item in enumerate(results):\n print '[%s] %s (%s) {%s}' % (\n index, \n self._color(item.title), \n self._color(item.year, 'RED'), \n self._color(item.imdbid, 'GREEN'))", "def show_result(dict_result):\r\n\r\n\tcorrects = dict_result[\"Corrects\"]\r\n\twrongs = dict_result[\"Wrongs\"]\r\n\tn_questions = dict_result[\"n_questions\"]\r\n\r\n\tprint(\"\\n\\n\",\"-\"*10,\"Final Result\", \"-\"*10)\r\n\r\n\tfinal_note = (len(corrects)*100)/n_questions\r\n\tprint(\"\\nResult: \", final_note*10)\r\n\r\n\tif final_note*10 > 600:\r\n\t\tprint(\"\\nYOU PASS!\")\r\n\telse:\r\n\t\tprint(\"\\nI'm sorry, you don't pass, but please try again!\")\r\n\r\n\tif len(wrongs) > 0:\r\n\t\tprint(\"\\nSome questions for review:\", end=\" \")\r\n\t\tfor i in wrongs:\r\n\t\t\tif i == wrongs[-1]:\r\n\t\t\t\tprint(i)\r\n\t\t\telse:\r\n\t\t\t\tprint(i, end=\", \")", "def _display_dns_results(self):\n if self.check_valid_result_data(\"dns_results\", silent=True):\n nb_markdown(f\"DNS events related to {self.url}\", \"bold\")\n display(self._last_result.dns_results)\n else:\n nb_markdown(f\"No DNS resolutions found for {self.url}\")", "def test_check_bc_duplicates_default_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = ['Duplicate barcode ACGT found.\\t1,1',\r\n 'Duplicate barcode ACGT found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def mark_duplicates():\n\n mkdir(MD_DIR)\n\n printp(\"\"\"# drmr:label mark-duplicates\\n\"\"\")\n printp(\"\"\"# drmr:job nodes=1 processors=1 memory=12g working_directory={} time_limit=8h\"\"\".format(MD_DIR))\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n srr = get_srr(sample) if x == 'treatment' else get_input_control_srr(sample)\n input_bam = get_bwa_bam(sample, control=False) if x == 'treatment' else get_bwa_bam(sample, control=True)\n output_bam = get_md_bam(sample, control=False) if x == 'treatment' else get_md_bam(sample, control=True)\n printp(\"\"\"picard -m 8g MarkDuplicates I={input_bam} O={output_bam} ASSUME_SORTED=true METRICS_FILE={srr}.markdup.metrics VALIDATION_STRINGENCY=LENIENT TMP_DIR=.; samtools index {output_bam}\"\"\".format(**locals()), timed=True)\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def expect_duplicate(self):\n # Reset everything for this record\n self._expect_duplicate = False\n self.__dupcntr = 0\n self.__maxdup = 0\n # Get the probability to generate duplicate for next record\n if self.fake.random.random() < self.duplicate_cfg[\"Prob_duplicate\"]:\n self._expect_duplicate = True\n self.__maxdup = self.random_select_ndups()\n else:\n self._expect_duplicate = False\n self.__maxdup = 0\n\n self.__logger.debug(\"expect_duplicate ndups: %d\", self.__maxdup)" ]
[ "0.6287719", "0.5866031", "0.58241785", "0.57221144", "0.56531405", "0.56339365", "0.56136125", "0.5593977", "0.55925065", "0.55140984", "0.5512862", "0.5487937", "0.5483856", "0.5471412", "0.5454186", "0.53893715", "0.5378345", "0.53769517", "0.5371023", "0.5360018", "0.5356362", "0.5355224", "0.5293162", "0.52846473", "0.5282217", "0.5282178", "0.524647", "0.52459943", "0.52421564", "0.5233009" ]
0.67122084
0
Create a response model to pass to the presenter
def _create_response_model(self, data): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_response_model_ctor(self):\n return self._response_model_ctor", "def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json", "def handle_create_response(self, response):\n\n if not self.model._meta['update_from_write'] or not response.content:\n return\n\n try:\n obj = self.obj_from_response(response)\n except ValueError:\n obj = None\n\n self.handle_response(response)\n\n return obj", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):\n desired_format = self.determine_format(request)\n serialized = self.serialize(request, data, desired_format)\n return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)", "def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj", "def get_response(self, request):\n view = self.get_view()\n # Call its view with the request and this model.\n return view(request, flexible_page=self)", "def create_response(result):\n return ControllerResponse(\n response=result,\n status=200,\n mime='application/json',\n jsonize=True,\n )", "def create_response_element(self, **kwargs):\r\n return None", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def create_response_object(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n\n create_response_object = {\n \"status\": request_dict[\"status\"],\n \"response\": request_dict[\"response\"],\n \"cache_condition\": request_dict.get(\"cache_condition\", \"\"),\n \"request_condition\": request_dict.get(\"request_condition\", \"\"),\n \"name\": request_dict[\"name\"],\n \"version\": service_version,\n \"content\": request_dict[\"content\"],\n \"content_type\": \"text/plain\",\n \"service_id\": service_id\n }\n\n if 'response_object_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['response_object_list'] = []\n\n self.fastly_cache[service_id][\n 'response_object_list'].append(create_response_object)\n return create_response_object", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.event_id = response['event_id']\n self.created_at = response['created_at']\n self.data = response['data']\n self.request = response['request']\n self.event_type = response['type']\n self.livemode = response['livemode']", "def process_response(self, response: response_domain_model):\n ...", "def adapt_response(self, response):\n return response", "def adapt_response(self, response):\n return response", "def to_response(self):\n raise NotImplementedError(\"Must define to_response on `%s`\" % self.__class__.__name__)", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, res):\n self.fromResponseObj(res)", "def to_response(self, data):\n return self.from_dict(data).to_dict()", "def get_initial_response():\n # Message to the user\n message = {\n 'apiVersion': 'v1.0',\n 'status': '200',\n 'message': 'Flask API - Doubtnut - OPENCV'\n }\n # Making the message looks good\n resp = jsonify(message)\n # Returning the object\n return resp", "def CreateModel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _bld_resp(self, status=200, entry_or_list=None):\n resp = pvm_adp.Response('meth', 'path', status, 'reason', {})\n resp.entry = None\n resp.feed = None\n if entry_or_list is None:\n resp.feed = pvm_ent.Feed({}, [])\n else:\n if isinstance(entry_or_list, list):\n resp.feed = pvm_ent.Feed({}, entry_or_list)\n else:\n resp.entry = entry_or_list\n return resp", "def make_response(self):\n params = {\n 'tweet.fields': 'created_at,public_metrics,entities',\n 'expansions': 'author_id',\n 'user.fields': 'description'\n }\n return self.response_limit(params)", "def make_response(self, rv):\n status_or_headers = headers = None\n if isinstance(rv, tuple):\n rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))\n\n if rv is None:\n raise ValueError('View function did not return a response')\n\n if isinstance(status_or_headers, (dict, list)):\n headers, status_or_headers = status_or_headers, None\n\n if not isinstance(rv, self.response_class):\n if isinstance(rv, six.text_type):\n rv = self.response_class(rv, status=status_or_headers)\n else:\n raise ValueError('Content must be a string')\n\n if status_or_headers is not None:\n if isinstance(status_or_headers, six.text_type):\n # FIXME: I'm pretty sure Django's reason_phrase is *just* the\n # 'OK' in '200 OK', whereas Flask allows passing '200 OK'\n rv.reason_phrase = status_or_headers\n else:\n rv.status = status_or_headers\n\n if headers:\n # HttpResponse doesn't take a headers kwarg, so we must set each\n # header manually with rv[header] = value\n if isinstance(headers, dict):\n headers_iter = six.iteritems(headers)\n elif isinstance(headers, list):\n headers_iter = headers\n else:\n raise ValueError('headers must be dict, list, or None')\n\n for header, value in headers_iter:\n rv[header] = value\n\n return rv", "def get_response_data(self):\r\n raise NotImplementedError", "def main_response(self, data):", "def main_response(self, data):", "def __init__(self, **kwargs):\n\n ## Error message\n self.error = ''\n ## Error code\n self.result = 0\n ## Apply passed keyword arguments to the Request object.\n super(ObjectDetectionLoadModels.Response, self).__init__(**kwargs)" ]
[ "0.70443594", "0.63880855", "0.61683625", "0.61580884", "0.6102591", "0.61025757", "0.60942143", "0.60272694", "0.59198946", "0.5912622", "0.5899994", "0.5891911", "0.58650625", "0.58380055", "0.58380055", "0.58321315", "0.5831333", "0.582527", "0.582527", "0.58120483", "0.57982874", "0.5794504", "0.57437545", "0.5729958", "0.5720223", "0.5707388", "0.5695699", "0.5690905", "0.5690905", "0.5686904" ]
0.8459162
0
takes in a string of columns and places alternating checkers in those columns, starting with 'X' For example, call b.setBoard('012345') to see 'X's and 'O's alternate on the bottom row, or b.setBoard('000000') to see them alternate in the left column. moveString must be a string of integers
def setBoard( self, moveString ): nextCh = 'X' # start by playing 'X' for colString in moveString: col = int(colString) if 0 <= col <= self.__width: self.addMove(col, nextCh) if nextCh == 'X': nextCh = 'O' else: nextCh = 'X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"", "def setBoard(self, moveString):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X':\n nextCh = 'O'\n else:\n nextCh = 'X'", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': \n nextCh = 'O'\n else: nextCh = 'X'", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def make_board(board_string):\n\n letters = board_string.split()\n\n board = [\n letters[0:5],\n letters[5:10],\n letters[10:15],\n letters[15:20],\n letters[20:25],\n ]\n\n return board", "def apply_move(b,player,move):\n move = move.strip().lower()\n if len(move)!=2:\n raise Exception(\"Valid move is two characters (e.g. A2 or B3)\")\n if move[0] not in COLS:\n move = move[::-1]\n if move[0] not in COLS:\n raise Exception(\"No column spec found\")\n j = COLS.index(move[0])\n i = int(move[1])-1\n if b[i][j] != \" \":\n raise Exception(\"Another move already filled that position\")\n b[i][j] = player", "def make_move(board, player_num, row, col):\n board[row][col] = 'X' if player_num == 1 else 'O'", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def move(state_int, col, player):\n assert isinstance(state_int, int)\n assert 0 <= col < GAME_COLS\n assert player == PLAYER_BLACK or player == PLAYER_WHITE\n field = decode_binary(state_int)\n assert len(field[col]) < GAME_ROWS\n field[col].append(player)\n # check for victory: the simplest vertical case\n suff = field[col][-COUNT_TO_WIN:]\n won = suff == [player] * COUNT_TO_WIN\n if not won:\n won = _check_won(field, col, 0) or _check_won(field, col, 1) or _check_won(field, col, -1)\n state_new = encode_lists(field)\n return state_new, won", "def test_board_coordinates_toXY():\r\n m = Move()\r\n for col_num, col_name in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']):\r\n for row in range(1, 9):\r\n assert m.translate_to_xy(col_name + str(row)) == (Board.SIZE - row, col_num)", "def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2", "def try_moves(self, moves_set):\n for choice in tuple(moves_set):\n self.game.move(choice)\n self.game.board.create_layout()", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(board: Connect4Board) -> \"(row, col)\":\r\n\r\n while True:\r\n\r\n try:\r\n\r\n print('\\nPlease Specify your move. Enter the number column of a cell on the board.')\r\n print('-'*85)\r\n \r\n col = Connect4GameUI.move_col(board)\r\n row = Connect4GameUI._get_valid_row(board, col)\r\n print(row,col)\r\n return row, col\r\n\r\n break\r\n\r\n except:\r\n print('\\nInvalid move!!!')\r\n print('Please try it again.')", "def compile_board(self, moves=None) -> List[List[str]]:\n if not moves:\n moves = self.moves\n board = []\n current_line = []\n for itr in range(1, 10):\n current_line.append(\n self.tokens[moves.get(itr, ' ')]\n )\n if itr % 3 == 0 and current_line:\n board.append(current_line)\n current_line = []\n board.append(current_line)\n return board", "def next_move(self, board):\n\n while True:\n\n i = int(input('Enter a column: ' ))\n\n if board.can_add_to(i) == True:\n break\n\n print('Try again!')\n\n self.num_moves += 1\n\n return i", "def addMove(self, col, ox):\n if self.allowsMove(col) == True:\n R = -1\n for row in self.board:\n if row[col] == '':\n R += 1\n else:\n break\n self.board[R][col] = ox", "def makeMove(self, moveStr):\r\n\t\tmoveStr = str(moveStr)\r\n\r\n\t\tmoveUci = self._userParseSanToUci(moveStr)\r\n\t\t# print(moveUci)\r\n\r\n\t\tif moveUci is None:\r\n\t\t\treturn\r\n\r\n\t\tresponse = requests.post(f'https://lichess.org/api/board/game/{self.gameId}/move/{moveUci}', headers=self.authHeader)\r\n\r\n\t\tif response.status_code == 200:\r\n\t\t\tlog.debug('Move Successfully Sent')\r\n\r\n\t\telse:\r\n\t\t\tlog.warning(f'Move Unsuccessfully Sent. Status Code: {response.status_code}')", "def askMove(self,posibleMoves):\n print(\"Where will you move?\")\n while True:\n pos = raw_input(\"Type Colum and Row 'CR' Ex:a1 for first column/row: \")\n if len(pos) == 2:\n c = ord(pos[0])-97\n r = int(pos[1])-1\n move = c+r*8\n if move in posibleMoves:\n return move\n print(\"Invalid move, try again\")\n return", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def test_valid_move():\n\n board = Board()\n\n # a col outside the width of the board should be false\n assert board.valid_move(board.get_grid_size()[1] + 1) is False\n\n # only positive cols should be considered for a move\n assert board.valid_move(-2) is False\n\n # since board is empty all cols should have moves\n for i in range(board.get_grid_size()[1]):\n assert board.valid_move(i) is True\n\n # if a col is full no move can be made\n for i in range(board.get_grid_size()[1]):\n if i % 2 == 0:\n board.move(board.P1, 0)\n else:\n board.move(board.P2, 0)\n\n \"\"\"\n board now looks like this...\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|O|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|X|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|O|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|X|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|O|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n\n \"\"\"\n assert board.valid_move(0) is False", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)" ]
[ "0.83378243", "0.8336306", "0.83127975", "0.82836413", "0.722551", "0.7191393", "0.7191393", "0.7191393", "0.7188351", "0.7188351", "0.71681666", "0.71523356", "0.61226887", "0.6058321", "0.582009", "0.5713693", "0.56568706", "0.5623423", "0.5570228", "0.556483", "0.5557871", "0.5519342", "0.5497061", "0.5493463", "0.54759836", "0.5464171", "0.54581976", "0.545712", "0.5456884", "0.5454254" ]
0.83451086
0
Checks if AutoML can be loaded from a folder
def _check_can_load(self): if self.results_path is not None: # Dir exists and can be loaded if os.path.exists(self.results_path) and os.path.exists( os.path.join(self.results_path, "params.json") ): self.load(self.results_path) self._results_path = self.results_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_load(cls, filename):\n return False", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def is_valid_animation(path, verbose=True):\n try:\n if \"idle\" in os.listdir(path) or \"transition\" in os.listdir(path):\n return True\n else:\n if verbose:\n print(path, \"is not a valid animation folder! It needs an /idle or /transition folder!\")\n return False\n except:\n return False", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def autoload(self):\n\t\tpath = self.world.config[\"plugin\"][\"path\"]\n\t\tif not self.load_glob(path):\n\t\t\treturn False\n\t\tif not self.check_deps():\n\t\t\treturn False\n\t\treturn True", "def __isValidXMLResourcesFolder(self, folder):\n tablesInFolder = filter(lambda f: os.path.isdir(os.path.join(folder, f)),\n os.listdir(folder))\n containedInRequiredTables = map(lambda f: f in self.__requiredTables,tablesInFolder)\n return (True if len(containedInRequiredTables)>0 else False)", "def isMayaFile(potentialMayaFile):\n\n pass", "def check_if_exists(self): \r\n dir_name = os.path.dirname(os.path.abspath(__file__))\r\n fucntion_dir = os.path.join(dir_name, 'openfaas', self.name)\r\n if not os.path.isdir(fucntion_dir):\r\n raise ValueError(\r\n f\"Function name `{self.name}` provided does not exist.\")\r\n self.yaml_path = os.path.join(fucntion_dir, f\"{self.name}.yml\")\r\n return True", "def __is_file_eligible_to_scan(cls, path_to_test):\n return path_to_test.endswith(\".md\")", "def check_if_anim_exist(name, ext=vext, figpath=figpath):\n return not(os.path.isfile(format_filename(name, ext, figpath)))", "def __check_in_autonotes_dir():\n if not os.path.isfile('master.tex'):\n cli.log.error(f'I can\\'t find a {emph(\"master.tex\")} file, '\n 'are you inside an autonotes directory?')\n exit(3)", "def __contains__(self, name):\n return (self.model_dir / (str(name) + '.pkl')).exists()", "def check_loader(self, dt):\n if EVENTS['FILE_PATH'] and EVENTS['CAN_WRITE']:\n self.editor.load_file(EVENTS['FILE_PATH'])\n EVENTS['CAN_WRITE'] = False", "def isLoaded(self,fileName):\n return mwIniFile.isLoaded(fileName)", "def test1_loading(self):\n\t\tprint \"\\nTEST 1: Loading ontologies from %s folder.\\n=================\" % DATA_FOLDER\n\t\t\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\t\t\n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\t\n\t\t\t\tself.assertEqual(type(o), ontospy.Ontology)\n\t\t\t\tprint \"Success.\"", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def has_file(self, doc):\n return len(doc.package.files) != 0", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def is_app_dir(path):\n try:\n find_app_yamls(path)\n return True\n except ValueError:\n return False", "def load(self):\n return True", "def has_annotations(filepath):\n return filepath.endswith('.ll') and '[#uses=' in open(filepath).read()", "def isLoaded(self,modFile):\n return (modFile in self.loadFiles)", "def _is_azureml_available() -> bool:\n if importlib.util.find_spec(\"azureml\") is None:\n return False\n if importlib.util.find_spec(\"azureml.core\") is None:\n return False\n return importlib.util.find_spec(\"azureml.core.run\") is not None", "def __check_exists(self):\n\n return os.path.exists(os.path.join(self.__path, 'train_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'train_labels_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_images_tensor.pt')) and \\\n os.path.exists(os.path.join(self.__path, 'test_labels_tensor.pt'))", "def detect(self, path):\n valid = False\n path = pathlib.Path(path)\n # basic check for suffix\n try:\n if path.suffix == self.suffix:\n valid = True\n except ValueError:\n pass\n\n # advanced check with \"detect\"\n if valid and \"detect\" in self.recipe:\n fdetect = self.recipe[\"detect\"]\n valid = fdetect(path)\n\n return valid", "def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))", "def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):\n meta_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.meta\".format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.index\".format(step.get_name())))\n\n return meta_exists and index_exists", "def isVideoFolder():", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)" ]
[ "0.64446145", "0.61130095", "0.60769767", "0.6070284", "0.6070284", "0.5984742", "0.57872206", "0.5693269", "0.566746", "0.5642804", "0.56116366", "0.5606089", "0.56005824", "0.5568455", "0.5547063", "0.55245954", "0.5513016", "0.54848117", "0.5477921", "0.5474287", "0.54519325", "0.54477096", "0.5439994", "0.54385996", "0.543717", "0.54022706", "0.54004055", "0.5396557", "0.53954196", "0.53921455" ]
0.62103647
1
Append error message to errors.md file.
def _update_errors_report(self, model_name, error_msg): errors_filename = os.path.join(self._get_results_path(), "errors.md") with open(errors_filename, "a") as fout: self.verbose_print(f"There was an error during {model_name} training.") self.verbose_print(f"Please check {errors_filename} for details.") fout.write(f"## Error for {model_name}\n\n") fout.write(error_msg) link = "https://github.com/mljar/mljar-supervised/issues/new" fout.write( f"\n\nPlease set a GitHub issue with above error message at: {link}" ) fout.write("\n\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_error(self, msg):\n self._add_message(msg, self._errors)", "def add_error(self, reference_id, error):\n\n with open('runReport.txt', 'a') as report:\n try:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id + \": \" + error)\n except Exception:\n report.write(\"\\nError: \" + self.domain + \" \" + reference_id)", "def add_error(self, message):\n self.errors.append(message)", "def add_error(self, content):\n self._add_content(html_error(content))", "def append_error(self, msg):\n if msg.startswith(IGNORE_PREFIX):\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator: error message cannot be ignored (%s)\" % msg)\n msg = msg[len(IGNORE_PREFIX):]\n self._messages.append((msg, 'alert-error'))\n misc.cdblogv(misc.kLogErr, 0, \"bomcreator error hint: \" + msg)\n self._hasError = True", "def add_error(\n self,\n message: str,\n position: Optional[Tuple[int, int]] = None,\n headline: Optional[Headline] = None,\n word: Optional[Word] = None,\n ) -> None:\n start: int = 0\n end: int = 0\n\n if position:\n start, end = position\n elif headline:\n start, end = self.report.get_headline_position(headline)\n elif word:\n start, end = self.report.get_word_postion(word)\n\n self.errors.append({\"message\": message, \"start\": start, \"end\": end})", "def add(self, message: str) -> None:\n self.errors.append(message.strip())", "def add_error(*msg):\n\n global errors\n errors.append(''.join(msg))", "async def add_error(self, ctx, error):\n embed: Embed = settings.get_ticket_error_embed()\n\n embed.set_footer(text=embed.footer.text,\n icon_url=self.bot.user.avatar_url)\n print(error)\n if isinstance(error, commands.MissingRequiredArgument):\n embed.description = f\"\\nUse **!add <user>**\"\n elif isinstance(error, commands.BadArgument):\n embed.description = f\"\\nUser not found.\"\n else:\n embed.description = f\"\\nYou don't have permissions for executing this command.\"\n\n await ctx.send(embed=embed)", "async def gen_error(error_id: str, ctx: commands.Context) -> Embed:\n errors = get_file(\"errors\")\n error = Embed(color=error_color)\n error.add_field(name=\"⚠️ \" + errors[error_id][\"title\"], value=errors[error_id]['txt'])\n error = set_footer(error, ctx)\n await ctx.send(embed=error)", "def add_error(self, err_msg):\n assert err_msg is not None, 'err_msg cannot be None'\n\n self.error_found = True\n self.error_message = err_msg.strip()", "def add_error(self, path, error):\n self.errors = merge_errors(self.errors, self._make_error(path, error))", "def add_error(self, error):\n self.errors.append(error)", "def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')", "def add_error(self, u_file: UserFile, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:", "def add_errdir(self):\n os.rename(self.rundir[-1], self.rundir[-1] + \"_err.\" + str(len(self.errdir)))\n self.update_errdir()", "def add_error_entry(title, description):\n global data_output\n\n data_output.append({\n 'title': title,\n 'value': description,\n 'color': fg(\"grey_30\")\n })", "def initialize_error_summary() -> str:\n error_summary = '\\nSummary of <span class=\"tex-fatal\">Critical Errors:</span>\\n\\n<ul>\\n'\n return error_summary", "def error(self, msg):\n if self.current_line and self.current_file:\n msg = '{}\\nError in {} line {}'.format(\n msg, self.current_file, self.current_line)\n return self.DirectiveError(msg)", "def _reportErrorMsg(self, ErrorMessage, outputFile):\n f=self.openFile(outputFile, \"a\") #open otuputFile for appending\n self._insertErrorMsg(ErrorMessage, f)\n f.close()", "def write_error_summary(error):\n fullpath = request.environ.get('FULLPATH', request.path)\n uid = c.user._id if c.user_is_loggedin else '-'\n g.log.error(\"E: %s U: %s FP: %s\", error, uid, fullpath)", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def error(msg):\n sys.stdout.write('%s[ ERROR ]%s %s\\n' % (colors.RED, colors.RESET, msg))", "def AppendErrorMessage(self, error_message):\n self._test_results[self._output_volume_index] = False\n self._test_message.append(\n 'Under output volume %r' % self._output_volumes[\n self._output_volume_index])\n self._test_message.append(error_message)\n session.console.error(error_message)", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def errormessage(self, msg) :\n\t\tif msg != self.__olderror :\n\t\t\tself.__stderr.write(\"%s\\n\" % msg)\n\t\t\tself.htmlmessage(msg)\n\t\tself.__olderror = msg[:]\n\t\treturn -1", "def err(message: str) -> None:\n filename, line = filename_line()\n\n with State.lock:\n State.stderr.write(err_as_text(filename=filename, line=line, message=message))\n State.stderr.flush()", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def add_error(self, u_file: UserFile, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:\n e = Error(severity=severity, path=u_file.path, code=code,\n message=msg, is_persistant=is_persistant)\n u_file.add_error(e)", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)" ]
[ "0.68803924", "0.68077487", "0.6791102", "0.6587171", "0.65130657", "0.6467027", "0.6418997", "0.6406481", "0.63759094", "0.6353462", "0.63198704", "0.6297876", "0.62030095", "0.6134235", "0.61272323", "0.60763824", "0.60648596", "0.605622", "0.6027484", "0.5985176", "0.598342", "0.5975521", "0.5962527", "0.5954178", "0.59461045", "0.59300107", "0.5927192", "0.58871007", "0.58852726", "0.5885242" ]
0.7012683
0
Gets the current model_time_limit
def _get_model_time_limit(self): self._validate_model_time_limit() return deepcopy(self.model_time_limit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timelimit(self):\n return self._timelimit", "def time_limit(self) -> float:\n return self._time_limit", "def _get_total_time_limit(self):\n self._validate_total_time_limit()\n if self._get_mode() == \"Optuna\":\n return None # there no training limit for model in the Optuna mode\n # just train and be happy with super models :)\n return deepcopy(self.total_time_limit)", "def time_limit(self):\n all_time_limit_updates = self.updates.exclude(\n time_limit_delta=timedelta())\n return self.time_limit_as_of_update(\n all_time_limit_updates.latest('id'))", "def timelimit_hard(self):\n return self._timelimit_hard", "def get_current_timeout(cls):\n return cls.current().get_timeout()", "def time_limit(self):\n return 2503", "def get_timeout(self):\n return self.timeout", "def get_limit(self):\n return self.limit", "def get_limit_per_second(self):\n pass", "def max_time(self):\n return self._max_time", "def get_view_rate_limit():\n return getattr(g, '_view_rate_limit', None)", "def get_limit(self):\n return self._limit", "def get_limit(self):\n return self._limit", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def max_timeout(self):\n return self._max_timeout", "def max_time(self) -> str:\n return self._max_time", "def gettimeout(self):\r\n return self._timeout", "def get_timeout(self) -> int:", "def limit(self):\n return self._limit", "def limit(self):\n return self._limit", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)", "def MaxWaitTime(self):\r\n\t\treturn self._get_attribute('maxWaitTime')", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def max_delay_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_delay_time\")", "def limit(self):\n return self._owner.plan", "def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def timeout(self):\n return self._timeout" ]
[ "0.808775", "0.7704996", "0.73726135", "0.7298717", "0.72923505", "0.6901317", "0.6841979", "0.66637945", "0.6617291", "0.6602201", "0.6576346", "0.6545491", "0.6522294", "0.6522294", "0.62877345", "0.6281024", "0.62770474", "0.6243926", "0.62277204", "0.62133306", "0.62133306", "0.61373436", "0.6106775", "0.61018616", "0.60987335", "0.60987335", "0.5951156", "0.5906317", "0.5894278", "0.5867684" ]
0.8786821
0
Gets the current algorithms. If "auto" it is determined
def _get_algorithms(self): self._validate_algorithms() if self.algorithms == "auto": if self._get_mode() == "Explain": return [ "Baseline", "Linear", "Decision Tree", "Random Forest", "Xgboost", "Neural Network", ] if self._get_mode() == "Perform": return [ "Linear", "Random Forest", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] if self._get_mode() == "Compete": return [ "Decision Tree", "Linear", "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", "Nearest Neighbors", ] if self._get_mode() == "Optuna": return [ "Random Forest", "Extra Trees", "LightGBM", "Xgboost", "CatBoost", "Neural Network", ] else: return deepcopy(self.algorithms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms", "def get_algorithm(self):\n return self.alg", "def get_algorithm(self):\n pass", "def algorithms(self):\n if self._algorithms is None:\n uri = \"/loadbalancers/algorithms\"\n resp, body = self.method_get(uri)\n self._algorithms = [alg[\"name\"] for alg in body[\"algorithms\"]]\n return self._algorithms", "def get_alg(self):\r\n raise NotImplementedError", "def _get_algorithm(self, **options):\n\n raise CoreNotImplementedError()", "def algorithm(self) -> str:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self) -> str:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self):\n return self._algorithm", "def algorithms_factory():\n all_algorithms = []\n for algorithm_module in ALGORITHMS:\n module_name = \"{}.{}\".format(PREFIX, algorithm_module)\n module = importlib.import_module(module_name)\n for item in dir(module):\n item = getattr(module, item)\n try:\n if issubclass(item, base.Algorithm):\n item.is_implemented()\n else:\n continue\n except (exceptions.AlgorithmsNotImplemented, TypeError):\n continue\n\n all_algorithms.append(item)\n\n return all_algorithms", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def get_algorithm(self):\n if self.ALGO_INFO not in self._data_dict:\n return None\n algo_group = dao.find_group(self._data_dict[self.ALGO_INFO]['module'], \n self._data_dict[self.ALGO_INFO]['class'],\n self._data_dict[self.ALGO_INFO]['init_param'])\n if algo_group:\n algorithm = dao.get_algorithm_by_group(algo_group.id, self._data_dict[self.ALGO_INFO]['identifier'])\n return algorithm\n return None", "def algorithm(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"algorithm\")", "def Algorithm(self):\n return self._get_attribute('algorithm')", "def algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"algorithm\")", "def algorithm(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"algorithm\")", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def __top_Algs_ ( self ) :\n _algs = self.TopAlg\n\n def _alg_name_ ( _n ):\n\n _p = _n.rfind('/')\n if 0 > _p : return _n\n return _n[_p:]\n\n def _pyAlg ( _n ) :\n for _a in self.pyalgorithms :\n if _n == _a.name() : return _a\n return None \n \n algs = [] \n for _a in _algs :\n # get the proper name \n _n = _alg_name_ ( _a )\n # check if it is pyalgorithm:\n _pa = _pyAlg ( _n )\n if _pa :\n algs += [ _pa ]\n else :\n _alg = self.algorithm ( _a , True )\n algs += [ _alg ]\n \n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def _get_algorithm(self, **options):\n\n return 'TEST'", "def alg(self) -> t.Optional[str]:\n return self._alg", "def initialize_algorithm(algo_cls: Type):\n if algo_cls == algorithms.Alibi:\n return algo_cls(max_sequence_length=1)\n elif algo_cls == algorithms.StochasticDepth:\n return algo_cls(target_layer_name='ResNetBottleneck')\n elif algo_cls == algorithms.FusedLayerNorm or algorithms.LowPrecisionLayerNorm:\n pytest.importorskip('apex')\n return algo_cls()\n elif algo_cls == algorithms.GatedLinearUnits:\n pytest.importorskip('transformers')\n return algo_cls()\n elif algo_cls == algorithms.Factorize:\n return algo_cls(min_features=48, latent_features=24)\n elif algo_cls == algorithms.SqueezeExcite:\n return algo_cls(min_channels=32)\n else:\n return algo_cls()", "def get_session_algorithms(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_common_algorithm(external, prefered=None):\n if prefered is not None:\n if prefered in external:\n return prefered\n for alg in ALGORITHMS:\n if alg in external:\n return alg\n raise ValueError(\"No common algorithm found\")", "def get_algo_info(self, algo=None, **kwargs):\n if algo:\n return self.mrr_obj.get('/info/algos' + '/' + algo, **kwargs)\n return self.mrr_obj.get('/info/algos')", "def _get_algorithm(name: str) -> Any:\n algo_cls = getattr(hashes, name.upper(), None) # hack: get class object by name\n if algo_cls is None:\n raise ValueError(f'Unsupported algorithm: hashes.{name}'.format(name=name.upper()))\n\n return algo_cls() # pylint: disable=not-callable", "def algorithm(self):\n\n if self.__algorithm:\n return self.__algorithm\n if isinstance(self, Map): return None\n return self.ancestor.algorithm", "def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}", "def get_tracker(algorithm):\n if algorithm == \"default\":\n algorithm = \"peaks\"\n\n if algorithm == \"peaks\":\n from fingertracker_peaks import FingerTrackerPeaks\n return FingerTrackerPeaks\n elif algorithm == \"skeleton\":\n from fingertracker_skeleton import FingerTrackerSkeleton\n return FingerTrackerSkeleton\n else:\n print \"Unknown algorithm: {}\".format(algorithm)" ]
[ "0.72617215", "0.69703907", "0.6834078", "0.649296", "0.64006877", "0.6347613", "0.62988967", "0.62988967", "0.62693024", "0.62255967", "0.6208306", "0.6183921", "0.6171044", "0.6160205", "0.6155423", "0.6155423", "0.60712767", "0.60712767", "0.6055243", "0.6055243", "0.60232526", "0.5974244", "0.5836437", "0.58189136", "0.58051974", "0.5659022", "0.5638491", "0.562957", "0.5592733", "0.55262816" ]
0.79699975
0
Gets the current train_ensemble
def _get_train_ensemble(self): self._validate_train_ensemble() return deepcopy(self.train_ensemble)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensemble(self):\n return self._ensemble", "def getTrainSet(self):\r\n return self.fTrainData", "def training_set(self):\n return self._training_set", "def getTrainInstance(self): #NOTE: Probably faster way of doing this than additional 'if' statement every learning iteration\r\n return [self.currentTrainState, self.currentTrainPhenotype] #Return unadulterated training data\r", "def get_classifier(self):\n return self.model", "def ensemble_perts(self):\n #emean = self.ensemble_mean()\n return self - self.ensemble_mean()\n #return self.state.values", "def get_hr_ensemble(self, ensemble_id: str):\n return self.hr_ensemble.get(ensemble_id, None)", "def train_data(self):\n return self._train_data", "def ensemble_mean(self):\n self.cube = self.cube_ensemble_mean(self.cube)\n self.processes.append('ensemble_mean')\n return self.cube", "def get_train_examples(self):\n raise NotImplementedError()", "def get_estimator_state(self):\n return self.estimator.state_dict()", "def get_estimator_state(self):\n return self.estimator.state_dict()", "def get_ensemble_model():\n ss = StandardScaler()\n xgb_clf = xgb.XGBClassifier(objective=\"binary:logistic\", random_state=42)\n\n xgb_model = Pipeline(steps=(['scale', ss], ['clf', xgb_clf]))\n\n xgb_model_params = {\n \"clf__colsample_bytree\": uniform(0.5, 0.5), # default 1\n \"clf__gamma\": loguniform(1e-1, 1e3), # default 0\n \"clf__learning_rate\": uniform(0.03, 0.57), # default 0.3\n \"clf__max_depth\": randint(2, 5), # default 3\n \"clf__n_estimators\": randint(10, 50), # default 100\n \"clf__subsample\": uniform(0.5, 0.25), # default 1\n \"clf__min_child_weight\": randint(1, 8) # default 1\n }\n\n # model: classifier with randomised parameter search over nested 3-fold CV (more iters to account for large space)\n ensemble_model = RandomizedSearchCV(xgb_model, xgb_model_params, n_iter=250, cv=3)\n\n return clone(ensemble_model)", "def train(self):\n return", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def train(self):\n return self.with_transforms(\"train\")", "def ensemble():\n return {\n \"type\": \"class\",\n \"base\": \"activity.activity\",\n \"is_abstract\": False,\n \"properties\": [\n (\n \"common_conformances\",\n \"linked_to(activity.conformance)\",\n \"0.N\",\n \"Conformance documents for requirements common across \"\n \"ensemble.\",\n ),\n (\n \"representative_performance\",\n \"linked_to(platform.performance)\",\n \"0.1\",\n \"Representative model performance across ensemble.\",\n ),\n (\n \"documentation\",\n \"shared.online_resource\",\n \"0.N\",\n \"Links to web-pages and other ensemble specific documentation \"\n \"(including workflow descriptions).\",\n ),\n (\n \"ensemble_axes\",\n \"linked_to(activity.ensemble_axis)\",\n \"0.N\",\n \"Set of axes for the ensemble.\",\n ),\n (\n \"uber_ensembles\",\n \"linked_to(activity.uber_ensemble)\",\n \"0.N\",\n \"Link to one or more over-arching ensembles that might \"\n \"includes this one.\",\n ),\n (\n \"experiments\",\n \"linked_to(designing.numerical_experiment)\",\n \"1.N\",\n \"Experiments with which the ensemble is associated (may \"\n \"differ from constituent simulations).\",\n ),\n (\n \"members\",\n \"linked_to(activity.simulation)\",\n \"0.N\",\n \"Simulations within ensemble (should only be zero while \"\n \"ensemble is being defined)\",\n ),\n ],\n \"constraints\": [\n (\"cardinality\", \"rationale\", \"0.0\"),\n (\"cardinality\", \"canonical_name\", \"0.0\"),\n (\"cardinality\", \"keywords\", \"0.0\"),\n (\"cardinality\", \"duration\", \"0.0\"),\n ],\n }", "def getTrainingData(self):\n raise NotImplementedError", "def train_op_a(self):\r\n return self._train_op_a", "def ensemble_mean(self):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_ensemble_mean(cube))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('ensemble_mean')\n return self.cubelist", "def get_trainer(self):\n return AutoEncoderTrainer", "def uber_ensemble():\n return {\n \"type\": \"class\",\n \"base\": \"activity.ensemble\",\n \"is_abstract\": False,\n \"properties\": [\n (\n \"child_ensembles\",\n \"linked_to(activity.ensemble)\",\n \"1.N\",\n \"Ensemble which are aggregated into this one.\",\n )\n ],\n \"constraints\": [\n (\"cardinality\", \"ensemble_axes\", \"1.N\"),\n (\"cardinality\", \"common_conformances\", \"0.0\"),\n (\"cardinality\", \"members\", \"0.0\"),\n ],\n }" ]
[ "0.82772297", "0.6414114", "0.6375453", "0.63674873", "0.62640953", "0.6179646", "0.6133189", "0.6104231", "0.6051133", "0.59600097", "0.5959593", "0.5959593", "0.59380734", "0.59326303", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.58842814", "0.57831234", "0.577973", "0.57277197", "0.57136345", "0.5684906", "0.5673595", "0.56684524" ]
0.84837633
0
Gets the current stack_models
def _get_stack_models(self): self._validate_stack_models() if self.stack_models == "auto": val = self._get_validation_strategy() if val.get("validation_type", "") == "custom": return False return True if self.mode in ["Compete", "Optuna"] else False else: return deepcopy(self.stack_models)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def get_models(self):\n self.load()\n return self._models", "def get_models(self):\n return self.P, self.Q", "def models(self):\n return self.config.models()", "def models(self):\n return self._base.classes", "def availablemodels(self):\n return self.__models.keys()", "def getModel(self):\n return _libsbml.SBase_getModel(self)", "def get_model(self):\n return self.chain.model", "def getModel(self):\n return self.model", "def get_stack(self):\n return self.__stack", "def model(self):\n return self.model_", "def _getModel(self):\r\n \r\n return self._model", "def model(self):\n return MODELS.get(self._model,self._model)", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get_model(self):\n return self.model", "def get():\n\n return {'model_ids': mgmt.get_model_ids()}", "def iter_models(self):\n return iter(self.model_list)", "def models(self) -> list[AbstractModel]:\n return self._models", "def model(self):\n return self.__model", "def get_object_models(self):\n parser = WorldParser(self.world_fpath)\n return parser.models", "def get_model(self):\n return self._model" ]
[ "0.6803391", "0.6803391", "0.64394", "0.6406779", "0.6275852", "0.6213419", "0.6203647", "0.6106908", "0.60893244", "0.6065379", "0.5988689", "0.5973042", "0.5936472", "0.5927681", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5875577", "0.5820495", "0.5818902", "0.578552", "0.5754407", "0.57457685", "0.57137257" ]
0.72028995
0
Gets the current validation_strategy
def _get_validation_strategy(self): strat = {} self._validate_validation_strategy() if self.validation_strategy == "auto": if self._get_mode() == "Explain": strat = { "validation_type": "split", "train_ratio": 0.75, "shuffle": True, "stratify": True, } elif self._get_mode() == "Perform": strat = { "validation_type": "kfold", "k_folds": 5, "shuffle": True, "stratify": True, } elif self._get_mode() in ["Compete", "Optuna"]: strat = { "validation_type": "kfold", "k_folds": 10, "shuffle": True, "stratify": True, } if self._get_ml_task() == REGRESSION: if "stratify" in strat: # it's better to always check # before delete (trust me) del strat["stratify"] return strat else: strat = deepcopy(self.validation_strategy) if self._get_ml_task() == REGRESSION: if "stratify" in strat: del strat["stratify"] return strat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_validate(self):\n return self.validate", "def validator(self):\n return self._validator", "def paramValidationPref(self):\n # If the level of the object is below the Preference level,\n # recursively call base (super) classes to get preference at specified level\n return self.get_pref_setting_for_level(PARAM_VALIDATION_PREF, self._param_validation_pref.level)[0]", "def get_validator(self):\n return self.get_validator_class()(**self.get_validator_kwargs())", "def get_validator_class(self):\n return self.validator_class", "def get_layout_validator(self):\n if self._layout_validator is None:\n self._compute_layout_validator()\n return self._layout_validator", "def get_validation_schema(self):\n return self.validation_schema", "def session_validator(self):\n return self.session.validator", "def _get_validator(self):\n validator_class = self.validator_class or AssertionValidator\n if validator_class:\n return validator_class()", "def validation_frame(self):\n return self._parms.get(\"validation_frame\")", "def validation_state(self) -> str:\n return pulumi.get(self, \"validation_state\")", "def validation_config(self) -> Optional[pulumi.Input['ValidationConfigArgs']]:\n return pulumi.get(self, \"validation_config\")", "def validation_path(self):\n return self._validation_path", "def validation_id(self):\n return self._validation_id", "def get_validator(cls):\n cls.validator.model = cls\n return cls.validator or SageValidator", "def get_validator_class(self):\n validator_class = self.oauth_validator_class\n if validator_class is not None:\n return validator_class\n return oauth_api_settings.DEFAULT_VALIDATOR_CLASS", "def validation_type(self) -> Optional[pulumi.Input[Union[str, 'ValidationType']]]:\n return pulumi.get(self, \"validation_type\")", "def validation_required(self):\n return self._validation_required", "def _get_validation_method(self):\n return CommentWfItem.process_comment", "def validation_type(self) -> Optional[str]:\n return pulumi.get(self, \"validation_type\")", "def strategy_config(self):\n return self._strategy_config", "def validator(self) -> Optional[Dict[str, Any]]:\n return self._validator", "def get_save_strategy(self):\r\n return self.save_strategy", "def validations(self):\n return self.container['validations']", "def validation_config(self) -> pulumi.Output['outputs.ValidationConfigResponse']:\n return pulumi.get(self, \"validation_config\")", "def strategy(self) -> Optional[pulumi.Input['UpgradeSettingsStrategy']]:\n return pulumi.get(self, \"strategy\")", "def get_rule(self):\n\n return self.__rule_", "def get_setting_validator(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('validator', None)", "def validations(self) -> Optional[Sequence['outputs.ValidationPatch']]:\n return pulumi.get(self, \"validations\")", "def authentication_strategy(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authentication_strategy\")" ]
[ "0.6838031", "0.6627097", "0.6515644", "0.6456136", "0.6417892", "0.63516897", "0.634212", "0.63212353", "0.6285395", "0.6255339", "0.6221338", "0.62149113", "0.6194205", "0.61286926", "0.60330224", "0.60326296", "0.60178643", "0.5966911", "0.5960197", "0.59587127", "0.58250636", "0.5818182", "0.58013624", "0.57982504", "0.5790065", "0.5705156", "0.567935", "0.56018883", "0.5594347", "0.55285174" ]
0.67801
1
Gets the current explain_level
def _get_explain_level(self): self._validate_explain_level() if self.explain_level == "auto": if self._get_mode() == "Explain": return 2 if self._get_mode() == "Perform": return 1 if self._get_mode() == "Compete": return 0 if self._get_mode() == "Optuna": return 0 else: return deepcopy(self.explain_level)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_explain(self):\n print(EXPLAIN)", "def level(self):\n return self.__level", "def level(self):\n return self.__level", "def getLevel(self):\n return _libsbml.ASTBasePlugin_getLevel(self)", "def get_level(self):\n return self.debug_level, self.verbosity", "def level(self) -> int:\n return self.__state.level()", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self) -> int:\n return self._level", "def level(self) -> int:\n return self._level", "def get_level(self) -> int:\n return self.rstate.level()", "def level(self) -> int:\n return self.categorization.level(self)", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def getResultLevel(self):\n return _libsbml.DefaultTerm_getResultLevel(self)", "def explain(self):", "def explain(self, *, format=None, **options):\n return self.query.explain(using=self.db, format=format, **options)", "def getLevel(self):\n return self._level", "def Explain(self, request, global_params=None):\n config = self.GetMethodConfig('Explain')\n return self._RunMethod(\n config, request, global_params=global_params)", "def logging_verbosity(self):\n\n return self.get_raw(\"logging_verbosity\")", "def currentLevel( self ):\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n return self._env.level( )", "def getLevel(self):\n return _libsbml.SBase_getLevel(self)", "def level(self):\n return self.init_v[2]", "def verbose( self ):\n return Verbose.__level", "def explain_query(self, query):\n return self.user_con.explain_query(query)", "def option(self):\r\n return conf.lib.clang_getDiagnosticOption(self, None)", "def verbosity(self):\n return self._verbosity", "def unittest_verbosity():\n frame = inspect.currentframe()\n while frame:\n self = frame.f_locals.get(\"self\")\n if isinstance(self, unittest.TestProgram):\n return self.verbosity\n frame = frame.f_back\n return 0", "def get_debug_level(self):\n return self.debug_level" ]
[ "0.62678665", "0.59786433", "0.59786433", "0.5973489", "0.5863395", "0.58591205", "0.58441484", "0.58441484", "0.58441484", "0.58441484", "0.5830532", "0.5830532", "0.57422584", "0.55639464", "0.55551803", "0.55497247", "0.5544945", "0.55366", "0.5531055", "0.55061364", "0.54631025", "0.5451836", "0.5439167", "0.5430806", "0.540714", "0.53675157", "0.5367409", "0.5332075", "0.531812", "0.53156674" ]
0.86530817
0