query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Write the concordance entries to the output file(filename) See sample output files for format. | def write_concordance(self, filename):
all_keys = self.concordance_table.get_all_keys()
lines = []
for i in all_keys:
a = ""
a += i + ":"
f = self.concordance_table.get_value(i)
if f != None:
for s in f:
a += " " + str(s)
a += "\n"
lines.append(a)
a = open(filename, "w+")
for i in lines:
a.write(i)
a.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_concordance(self, filename):\n out = ''\n values = [x for x in self.concordance_table.hash_table if x is not None]\n values.sort(key=lambda x: x[0])\n for v in values:\n out += f'{v[0]}: {\" \".join(str(x) for x in sorted(set(v[1])))}\\n' \n with open(filename, 'w') as f:\n f.write(out.rstrip())",
"def write_cando_file(self, file_name):\n cando_writer = CandoWriter(self.dna_structure)\n cando_writer.write(file_name)",
"def _write_conductances(self, cond_file_name):\n cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name)\n\n #TODO: Check that the file doesn't already exist.\n LOG.info(\"Writing head conductance file: %s\" % cond_file_path)\n file_handle = file(cond_file_path, \"a\")\n\n file_handle.write(\"# Properties Description 1.0 (Conductivities)\\n\\n\")\n file_handle.write(\"Air %4.2f\\n\" % self.conductances[\"air\"])\n file_handle.write(\"Scalp %4.2f\\n\" % self.conductances[\"skin\"])\n file_handle.write(\"Brain %4.2f\\n\" % self.conductances[\"brain\"])\n file_handle.write(\"Skull %4.2f\\n\" % self.conductances[\"skull\"])\n\n file_handle.close()\n LOG.info(\"%s written successfully.\" % cond_file_path)\n\n return cond_file_path",
"def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)",
"def write_conll(conll_file, sents):\n with codecs.open(conll_file, mode = 'w', errors = 'ignore', encoding = 'utf-8') as ofile:\n for sent in sents:\n if sent:\n for element in sent:\n word = element[0]\n tag = element[1]\n ofile.write(str(tag) + '\\t' + str(word) + '\\n')\n ofile.write('\\n')",
"def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)",
"def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()",
"def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")",
"def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()",
"def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()",
"def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()",
"def write_output_file(filename, actions):\n f = open(filename, 'w')\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n f.close()",
"def write_corpus_to_file(output_file, corpus): \n \n file = open(output_file, 'w')\n for line in corpus: \n file.write(line)\n print ('Corpus has been writted in file')\n file.close()",
"def write_file(self, filename):\n\n with open(filename, 'w', newline = '') as csvfile:\n langwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for key in self.features:\n value = self.features[key]\n l = []\n for val in value:\n l.append(str(val))\n langwriter.writerow([l])\n return",
"def write_cn_cards(bc_file, bc_class):\n cn = bc_class.constituent_properties\n bc_file.write('! Constituent Properties\\n')\n if not cn.general_constituents.empty:\n # bc_file.write(cn.general_constituents.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.general_constituents.iterrows():\n bc_file.write(\n 'CN CON {} {}\\n'.format(row['ID'].astype('int'), row['CONC']))\n if not cn.sand.empty:\n # bc_file.write(cn.sand.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.sand.iterrows():\n bc_file.write(\n 'CN SND {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if not cn.clay.empty:\n # bc_file.write(cn.clay.to_csv(sep=' ', index=False, header=False).replace('\\r\\n', '\\n'))\n for index, row in bc_class.constituent_properties.clay.iterrows():\n bc_file.write(\n 'CN CLA {} {} {} {} {}\\n'.format(row['ID'].astype('int'), *row[['C_0', 'C_1', 'C_2', 'C_3']].values))\n if cn.salinity:\n bc_file.write('CN SAL {} {}\\n'.format(cn.salinity_id, cn.reference_concentration))\n if cn.temperature:\n bc_file.write('CN TMP {} {}\\n'.format(cn.temperature_id, cn.reference_temperature))\n if cn.vorticity:\n bc_file.write('CN VOR {} {} {} {}\\n'.format(cn.vorticity_id, cn.vorticity_normalization,\n cn.vorticity_as_term, cn.vorticity_ds_term))\n\n bc_file.write('\\n') # blank line at the end of the Constituent Properties",
"def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")",
"def write_conformers(self, filename): # ccids):\n cnt = 0\n for confId in range(self.nconf): #ccids:\n w = Chem.SDWriter('%s_c%03d.sdf'%(filename,cnt+1))\n w.write(self.mol, confId=confId)\n w.flush()\n w.close()\n cnt += 1",
"def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)",
"def writeCC(self, fileName, allSCC):\n f = open(fileName,'w')\n\n for compNumber in range(0,len(allSCC)):\n f.write(\"Component number %s: \" % (compNumber))\n f.write(\"%s\\n\" % (str(allSCC[compNumber])))\n f.close()",
"def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')",
"def write_crf_input(out_file, sentences, poss, lemmas, concepts):\n\n print '\\n\\tWrite out data in crf compliant format'\n f = open(out_file, 'w+')\n for position_i in range(len(sentences)):\n for position_j in range(len(sentences[position_i])):\n f.write(\n sentences[ position_i ][ position_j ] + '\\t' +\n poss[ position_i ][ position_j ] + '\\t' +\n lemmas[ position_i ][ position_j ] + '\\t' +\n concepts[ position_i ][ position_j ]\n + '\\n'\n )\n f.write('\\n')\n f.close()\n print '\\t--done'",
"def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))",
"def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')",
"def write_dialogue_to_file(utterances, dialogue_index, filename):\n with open(filename, 'a') as file:\n for sentence_index in range(len(utterances[dialogue_index][0])):\n file.write('{0} {1}\\n'.format(utterances[dialogue_index][0][sentence_index],\n utterances[dialogue_index][1][sentence_index]))",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write_to_file(info, mode='w', file=\"output4.txt\"):\n with open(file, mode, encoding='utf-8') as f:\n for line in info:\n f.write(' '.join(map(str, line)) + '\\n')",
"def export(self, fname):\n f = open(fname, 'w')\n for ue in self.ue_list:\n line_components = list()\n line_components.append(ue.expression)\n line_components.append(ue.meaning)\n print >>f, '\\t'.join(line_components).encode('utf-8')",
"def write_conll(cls, filename, writer, document_id, sentences):\n with open(filename, 'w') as fd:\n writer.write(fd, document_id, sentences)",
"def write_output(basis, filename):\n\n logging.info('Writing output to {}'.format(filename))\n\n basis.to_csv(filename)"
] | [
"0.7794726",
"0.66742295",
"0.64932483",
"0.64526165",
"0.6379942",
"0.63655496",
"0.63634735",
"0.62910575",
"0.6240714",
"0.6233921",
"0.6233921",
"0.6233921",
"0.61785156",
"0.61412483",
"0.61257005",
"0.610843",
"0.6082861",
"0.60720426",
"0.6064205",
"0.60603034",
"0.59847915",
"0.5953382",
"0.5949586",
"0.59256744",
"0.59232116",
"0.59232116",
"0.5918855",
"0.5918259",
"0.591524",
"0.59104925"
] | 0.7876976 | 0 |
Builds a kfactor circulant matrix (A matrix with the structure of circulant matrices, but with the entries above the diagonal multiplied by the same factor.) The matrix is store in memory. | def factor_circulant_matrix(x, k):
n=len(x)
return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K",
"def _K(m):\n M = m*(m - 1)/2\n K = np.zeros((M, m**2), dtype=np.int64)\n row = 0\n for j in range(1, m):\n col = (j - 1)*m + j\n s = m - j\n K[row:(row+s), col:(col+s)] = np.eye(s)\n row += s\n return K",
"def K(self):\n\n # Calculate and return the stiffness matrix in global coordinates\n return matmul(matmul(inv(self.T()), self.k()), self.T())",
"def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def nCk(n, k):\n return factorial(n)//factorial(k)//factorial(n-k)",
"def calc_big_K(T, n_factors, tau, var_n, out=None):\n if out is None:\n K = np.zeros((T * n_factors, T * n_factors))\n else:\n K = out\n for delta_t in range(T):\n diag = calc_K(tau, delta_t, var_n)\n diag = np.tile(diag, T - delta_t)\n idxs_0 = np.arange(0, (T - delta_t) * n_factors)\n idxs_1 = np.arange(delta_t * n_factors, T * n_factors)\n K[idxs_0, idxs_1] = diag\n K[idxs_1, idxs_0] = diag\n return K",
"def nCr(n, k):\n if n < k:\n return 0\n f = math.factorial\n return f(n) / f(k) / f(n - k)",
"def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator",
"def ckm(i,j):\n if i >= 1 and i <= 3 and j >= 1 and j <= 3:\n return _ckm_abs[i-1, j-1]\n else:\n raise(ValueError('Wrong generation index in CKM matrix: ({},{}).'.format(i,j)))",
"def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0",
"def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y",
"def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c",
"def expansion_matrix_c(self):\n row = np.zeros(0)\n nnz = 0\n col = np.arange(nnz, dtype=np.int)\n data = np.zeros(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))",
"def _Kdiag(self, X):\r\n return self.mapping.f(X).flatten()**2",
"def matrix_K1(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- d21(l, zt), d23(l, xt)))\n row2 = np.array((- d41(l, zt), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))",
"def k(self):\n return add(self.k_b(), self.k_m())",
"def _compute_kTable(self, expand=False, factor=False, simplify=False):\n if self._has(\"k\"):\n return\n if self._has(\"p\"):\n k = tuple(self._.p[0, i, i] for i in range(self._.d + 1))\n else:\n if not self._has(\"P\"):\n self.eigenmatrix(expand=expand, factor=factor,\n simplify=simplify)\n k = tuple(integralize(x) for x in self._.P[0])\n assert k[0] == 1, \\\n \"the valency of the first relation is not 1\"\n self._.k = k",
"def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)",
"def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result",
"def calc_k(self):\n\t\n\tself.k = -np.array([self.sth*self.cphi, self.sth*self.sphi, self.cth])\n\n\treturn",
"def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans",
"def _knn_matrix(x, k=16, self_loop=True):\n x = x.transpose(2, 1).squeeze(-1)\n batch_size, n_points, n_dims = x.shape\n if self_loop:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k)\n else:\n _, nn_idx = torch.topk(-_pairwise_distance(x.detach()), k=k+1)\n nn_idx = nn_idx[:, :, 1:]\n center_idx = torch.arange(0, n_points).repeat(batch_size, k, 1).transpose(2, 1)\n center_idx = center_idx.to(x.device)\n return torch.stack((nn_idx, center_idx), dim=0)",
"def matrices(self):\n # Creating L\n L = scipy.sparse.diags((self.inv_dx2, -2*self.inv_dx2, self.inv_dx2, 1),\n (-(self.N+1), -self.N, -(self.N-1), self.N),\n shape=(2*self.N, 2*self.N), dtype=np.complex128)\n self.L = scipy.sparse.csr_matrix(L)\n self.L[-(self.N+1), 0], self.L[-1, -self.N] = 0, 0\n\n # Computing largest eigenvalue of L explicitely:\n self.mu_max = self.inv_dx*np.sqrt(2*(1 + np.cos(np.pi/(self.N+1))))\n\n # Creating K\n self.K = scipy.sparse.diags((-self.inv_dx2, 2*self.inv_dx2, -self.inv_dx2),\n (-1, 0, 1), # Diagonals\n shape=(self.N, self.N), # Size of matrix\n dtype=np.complex128)",
"def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue",
"def __factor_matrix(self, R, K, alpha, steps, beta, error_limit):\n # Transform regular array to numpy array\n R = numpy.array(R)\n\n # Generate P - N x K\n # Use random values to start. Best performance\n N = len(R)\n M = len(R[0])\n P = numpy.random.rand(N, K)\n\n # Generate Q - M x K\n # Use random values to start. Best performance\n Q = numpy.random.rand(M, K)\n Q = Q.T\n\n error = 0\n\n # iterate through max # of steps\n for step in xrange(steps):\n\n # iterate each cell in r\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # get the eij (error) side of the equation\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K):\n # (*update_rule) update pik_hat\n P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])\n\n # (*update_rule) update qkj_hat\n Q[k][j] = Q[k][j] + alpha * ( 2 * eij * P[i][k] - beta * Q[k][j] )\n\n # Measure error\n error = self.__error(R, P, Q, K, beta)\n\n # Terminate when we converge\n if error < error_limit:\n break\n\n # track Q, P (learned params)\n # Q = Products x feature strength\n # P = Users x feature strength\n self.Q = Q.T\n self.P = P\n\n self.__print_fit_stats(error, N, M)",
"def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def make_mat_cp_le(cons_pot_mesh, lin_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == lin_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_le_DL_terms(K, cons_pot_mesh, lin_geo_mesh)\n add_cp_le_RBM_terms(K, cons_pot_mesh, lin_geo_mesh)\n return K",
"def fastdiag_solver(KM):\n dim = len(KM)\n n = tuple(K.shape[0] for (K,_) in KM)\n EV = [scipy.linalg.eigh(_asdense(K), _asdense(M)) for (K,M) in KM]\n\n diags = []\n for d in range(dim):\n D = [np.ones(n[j]) for j in range(dim)]\n D[d] = EV[d][0] # eigenvalues\n diags.append(reduce(np.kron, D))\n diag = sum(diags)\n\n l_op = KroneckerOperator(*tuple(U for (_,U) in EV))\n r_op = KroneckerOperator(*tuple(U.T for (_,U) in EV))\n\n return l_op * DiagonalOperator(1.0 / diag) * r_op",
"def make_mat_cp_qe(cons_pot_mesh, quad_geo_mesh):\n pot_faces = cons_pot_mesh.get_faces()\n assert pot_faces.shape[0] == quad_geo_mesh.get_faces().shape[0]\n num_faces = pot_faces.shape[0]\n K = np.zeros((3 * num_faces, 3 * num_faces))\n add_cp_qe_DL_terms(K, cons_pot_mesh, quad_geo_mesh)\n add_cp_qe_RBM_terms(K, cons_pot_mesh, quad_geo_mesh)\n return K",
"def bc_outgoing_mat(n, h, k):\n \n d = [1.0, 2.0j*k*h]\n i = [n-1, n-1]\n j = [n-2, n-1]\n return scipy.sparse.coo_matrix((d, (i, j)))"
] | [
"0.6495986",
"0.6089255",
"0.6045119",
"0.59890914",
"0.5949488",
"0.59035623",
"0.5859298",
"0.58462423",
"0.57634705",
"0.574443",
"0.5730508",
"0.5717386",
"0.56819576",
"0.566873",
"0.5568253",
"0.55545205",
"0.5523086",
"0.55172205",
"0.5492196",
"0.5491694",
"0.5478032",
"0.545727",
"0.54372895",
"0.5429208",
"0.54242074",
"0.54238397",
"0.5373548",
"0.5370893",
"0.5370422",
"0.5327783"
] | 0.78092545 | 0 |
Compute the matrixvector product y = Cu where C is a kfactor circulant matrix All matrices are real | def factor_circulant_multiplication(u, x, k=1):
n = len(u)
D_k = (k**(1/n))**np.arange(0,n)
Lambda = fft(D_k*x)
return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor",
"def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product",
"def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))",
"def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy",
"def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)",
"def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)",
"def __matmul__(self, csys):\n self._transform(csys)\n return self",
"def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3",
"def circulant_multiplication(u, a):\n \n return real(ifft(fft(a)*fft(u)))",
"def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]",
"def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)",
"def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z",
"def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)",
"def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp",
"def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C",
"def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)",
"def matmul(x, y):\n return np.matmul(x, y)",
"def test_two_qubit_weyl_decomposition_cnot(self):\n for k1l, k1r, k2l, k2r in K1K2S:\n k1 = np.kron(k1l.data, k1r.data)\n k2 = np.kron(k2l.data, k2r.data)\n a = Ud(np.pi / 4, 0, 0)\n self.check_two_qubit_weyl_decomposition(k1 @ a @ k2)",
"def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J",
"def zzX_mul_term(f, c, k):\n if poly_univariate_p(f):\n return zzx_mul_term(f, c, k)\n elif zzX_zero_p(f):\n return f\n elif zzX_zero_p(c):\n return zzX_zero_of(f)\n else:\n return [ zzX_mul(c, coeff) for coeff in f ] + zzX_zeros_of(f, k, 1)",
"def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M",
"def method1(self):\n cres=0. # Variable for storing Chern number.\n # The U matrices from Fukui's method; storage...\n Ux=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n # ... and calculation of U matrices\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.alleigvecs[:,:,ix ,iy ]\n if ix<self.kS.Nx:\n mat2=self.alleigvecs[:,:,ix+1,iy ]\n else:\n mat2=self.alleigvecs[:,:,1 ,iy ]\n if iy<self.kS.Ny:\n mat3=self.alleigvecs[:,:,ix ,iy+1]\n else:\n mat3=self.alleigvecs[:,:,ix ,1 ]\n Ux[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[:self.NL,:self.NL])\n Uy[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[:self.NL,:self.NL])\n \n # Local estimates of Berry curvature; storage ...\n ftempall=np.zeros((self.kS.Nx,self.kS.Ny),complex)\n # ... and calculation\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux[ix,iy]*Uy[ix+1,iy]/Ux[ix,iy+1]/Uy[ix,iy])\n ftempall[ix,iy]=ftemp # ... of local Berry curvature ...\n cres+=ftemp/2./pi/1j # ... and of Berry phase (Chern number).\n\n return cres.real, ftempall",
"def dot_kf(u, v):\n # TODO: implement the kernel function\n\n counter = 0\n if len(u)==len(v):\n for i in range(len(u)):\n counter = counter + (u[i]*v[i])\n return counter",
"def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)",
"def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced",
"def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced",
"def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c",
"def zzx_mul_term(f, c, k):\n if not c or not f:\n return []\n else:\n return [ c * coeff for coeff in f ] + [INT_ZERO]*k",
"def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices"
] | [
"0.6325033",
"0.6273725",
"0.6251581",
"0.62479377",
"0.6177961",
"0.6087597",
"0.6022537",
"0.60215706",
"0.6020421",
"0.60090333",
"0.6000697",
"0.5998053",
"0.59429264",
"0.59204763",
"0.58713275",
"0.5850264",
"0.5813686",
"0.57964927",
"0.57901424",
"0.57262236",
"0.57260317",
"0.5713855",
"0.571201",
"0.5704799",
"0.57028663",
"0.5689596",
"0.5675992",
"0.56757015",
"0.5666318",
"0.5655894"
] | 0.693636 | 0 |
Solves Tx=b using the Levinson algorithm where T is apositivedefinite symmetric Toeplitz matrix b is a real vector | def levinson(r, b):
n = len(b)
y = zeros((n,))
x = zeros((n,))
# normalize the system so that the T matrix has diagonal of ones
r_0 = r/r[0]
b_0 = b/r[0]
if n == 1:
return b_0
y[0] = -r_0[1]
x[0] = b_0[0]
beta = 1
alpha = -r_0[1]
for k in range(0,n-1):
beta = (1 - alpha*alpha)*beta
mu = (b_0[k+1] - dot(r_0[1:k+2], x[k::-1])) /beta
x[0:k+1] = x[0:k+1] + mu*y[k::-1]
x[k+1] = mu
if k < n-2:
alpha = -(r_0[k+2] + dot(r_0[1:k+2], y[k::-1]))/beta
y[0:k+1] = y[0:k+1] + alpha * y[k::-1]
y[k+1] = alpha
return x | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _tridisolve(d, e, b, overwrite_b=True):\n\t\tN = len(b)\n\t\t# work vectors\n\t\tdw = d.copy()\n\t\tew = e.copy()\n\t\tif overwrite_b:\n\t\t\tx = b\n\t\telse:\n\t\t\tx = b.copy()\n\t\tfor k in range(1, N):\n\t\t\t# e^(k-1) = e(k-1) / d(k-1)\n\t\t\t# d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\n\t\t\tt = ew[ k - 1 ]\n\t\t\tew[ k - 1 ] = t / dw[ k - 1 ]\n\t\t\tdw[ k ] = dw[ k ] - t * ew[ k - 1 ]\n\t\tfor k in range(1, N):\n\t\t\tx[ k ] = x[ k ] - ew[ k - 1 ] * x[ k - 1 ]\n\t\tx[ N - 1 ] = x[ N - 1 ] / dw[ N - 1 ]\n\t\tfor k in range(N - 2, -1, -1):\n\t\t\tx[ k ] = x[ k ] / dw[ k ] - ew[ k ] * x[ k + 1 ]\n\n\t\tif not overwrite_b:\n\t\t\treturn x",
"def tridisolve(d, e, b, overwrite_b=True):\r\n N = len(b)\r\n # work vectors\r\n dw = d.copy()\r\n ew = e.copy()\r\n if overwrite_b:\r\n x = b\r\n else:\r\n x = b.copy()\r\n for k in range(1, N):\r\n # e^(k-1) = e(k-1) / d(k-1)\r\n # d(k) = d(k) - e^(k-1)e(k-1) / d(k-1)\r\n t = ew[k - 1]\r\n ew[k - 1] = t / dw[k - 1]\r\n dw[k] = dw[k] - t * ew[k - 1]\r\n for k in range(1, N):\r\n x[k] = x[k] - ew[k - 1] * x[k - 1]\r\n x[N - 1] = x[N - 1] / dw[N - 1]\r\n for k in range(N - 2, -1, -1):\r\n x[k] = x[k] / dw[k] - ew[k] * x[k + 1]\r\n\r\n if not overwrite_b:\r\n return x",
"def housetriang_solve(A, b):\n\n n, _ = A.shape\n b = np.reshape(b.copy(), (n, 1))\n R, c = housetriang(A, b)\n x = np.reshape(rbackwardsolve(R, c, n), (n,))\n\n\n return x",
"def trisolve(l, u, c, b):\n n = shape(b)[0]\n for k in range(1, n):\n b[k] -= l[k-1]*b[k - 1]\n b[n-1] /= u[n-1]\n for k in range(n-2,-1,-1):\n b[k] -= c[k]*b[k + 1]\n b[k] /= u[k]",
"def ftlan_E1c(hop, v0, T, m=50, Min_b=10e-10, Min_m=5, kB=1, norm = np.linalg.norm):\n# def Tri_diag(a1, b1):\n# mat = np.diag(b1, -1) + np.diag(a1, 0) + np.diag(b1, 1)\n# e, w = np.linalg.eigh(mat)\n# return e, w\n\n beta = 1./(T * kB)\n E = 0.\n a, b = [], []\n v0 = v0/norm(v0)\n Hv = hop(v0)\n a.append(v0.dot(Hv))\n v1 = Hv - a[0] * v0\n b.append(norm(v1))\n if b[0] < Min_b:\n return 0\n\n v1 = v1/b[0]\n Hv = hop(v1)\n a.append(v1.dot(Hv))\n\n for i in range(1, m - 1):\n v2 = Hv - b[i - 1] * v0 - a[i] * v1\n b.append(norm(v2))\n if abs(b[i]) < Min_b:\n b.pop()\n break\n\n v2 = v2/b[i]\n Hv = hop(v2)\n a.append(v2.dot(Hv))\n v0 = v1.copy()\n v1 = v2.copy()\n \n a = np.asarray(a)\n b = np.asarray(b)\n\n eps, phi = Tri_diag(a, b)\n l = len(eps)\n# Eo = eps[0]\n# eps = eps-Eo\n exp_eps = np.exp(-beta * eps)\n E = np.sum(exp_eps * eps * phi[0, :]**2.)\n Z = np.sum(exp_eps * phi[0, :]**2.)\n# for i in range(len(eps)):\n# E += exp_eps[i] * eps[i] * phi[0, i]**2\n\n# E = E + Eo\n# de = eps[:, np.newaxis] - eps\n# for i in range(l):\n# E += eps[i] * phi[0, i]**2./np.sum(np.exp(-beta*de[:l, i])*(phi[0, :l]**2.))\n return E, Z",
"def SelfDualNewtonSystem(A, b, c, e):\n \n n = A.shape[1]\n m = A.shape[0]\n \n b_bar = b - np.matmul(A,e)\n c_bar = c - e\n alpha = 1 + np.dot(c, e)\n beta = n + 2\n \n A_star = np.c_[A,-b,b_bar]\n C = np.zeros((n+2,n+2))\n C[0:n,n] = c\n C[n,0:n] = -C[0:n,n].T\n C[0:n,n+1] = -c_bar\n C[n+1,0:n] = -C[0:n,n+1].T\n C[n,n+1] = alpha\n C[n+1,n] = -C[n,n+1].T\n \n yA = np.r_[np.zeros((m,m)), -A_star.T, np.zeros((n+2, m))]\n xA = np.r_[A_star, C, np.eye(n+2)]\n sA = np.r_[np.zeros((m, n+2)), -np.eye(n+2), np.eye(n+2)]\n \n return np.c_[yA, xA, sA]",
"def stbinv(A, B, C, D, y, t):\n # Description to help the user\n\n # calculate the number of samples of the output\n N = np.shape(y)[\n 1\n ] # the number of samples is the number of columns of the data matrix y\n\n # calculate system's dimensions: number of states and number of inputs\n m = B.shape[1] # number of inputs\n n = A.shape[0] # number of states\n\n # initialize the variable v (additional input)\n v = np.zeros((n, N)) # it will be important later\n\n # initializing the flag variable\n flag = 0\n # initializing the flag variable for the vrft method\n flag_vr = 0\n # initializing the counter of reduction steps done by the algorithm\n kround = 0\n\n # starting the loop of the reduction procedure\n while flag == 0:\n # run a step of the reduction order algorithm\n Ahat, Bhat, Chat, Dhat, yhat, vhat, nhat, phat, rhat = invredc(A, B, C, D, y, v)\n # increase the counter of reductions\n kround = kround + 1\n\n # preallocating the state vector of the inverse system\n xhat = np.zeros((nhat, N - kround)) # it must have N-kround samples\n # preallocating the calculated input\n uhat = np.zeros((m, N - kround))\n\n # defining the reduced time vector\n tt = t[:, 0 : N - kround]\n\n # test the conditions of invertibility\n if phat < m:\n # if this condition is true, then the algorithm has failed and it is not possible to find the inverse\n flag = 1\n flag_vr = 1\n # if this is the case, we print a message and end the execution\n # print('The inversion algorithm has failed')\n return uhat, tt, flag_vr\n else:\n if rhat == m:\n # ((rhat==m)&(rhat==phat)):\n # if this condition is true, then the algorithm is done. We can calculate the signal u\n flag = 2\n # calculating the inverse of the feedforward matrix\n # E=np.linalg.inv(Dhat)\n E = np.linalg.pinv(Dhat)\n else:\n # if none of the conditions above is true, then we need to proceed to another round of the reduction step of the algorithm\n A = Ahat\n B = Bhat\n C = Chat\n D = Dhat\n y = yhat\n v = vhat\n # after the reduction procedure is done, then the system can be inverted\n\n # calculating the dynamic matrix of the inverse system\n Ainv = Ahat - Bhat @ E @ Chat\n # eigenvalues of the inverse system's dynamic matrix\n wv, v = np.linalg.eig(Ainv) # w=eigenvalues, v=eigenvectors\n # calculating the input matrix of the inverse system\n Binv = Bhat @ E\n # calculating the output matrix of the inverse system\n Cinv = -E @ Chat\n # calculating the feedforward matrix of the inverse system\n Dinv = E\n\n # test if the inverse dynamic system is stable\n wbool = wv > 1\n wsum = np.sum(wbool)\n # test if wsum is greater than 1\n if wsum > 0:\n # if wsum is greater than 1, then, the inverse system is unstable, so we end the execution of the algorithm\n # print('The inverse system is unstable')\n flag_vr = 2\n return uhat, tt, flag_vr\n else:\n # if wsum=0, then the inverse system is stable, and we can calculate the input signal\n # calculate the first value for the output (t=0)\n uhat[:, 0] = Cinv @ xhat[:, 0] + Dinv @ yhat[:, 0]\n # calculate the states and the output of the inverse system\n for k in range(0, N - 1 - kround):\n xhat[:, k + 1] = Ainv @ xhat[:, k] + Binv @ yhat[:, k] + vhat[:, k]\n uhat[:, k + 1] = Cinv @ xhat[:, k + 1] + Dinv @ yhat[:, k + 1]\n\n return uhat, tt, flag_vr",
"def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)",
"def __solve(self, tsnMat, vecB):\n A_d = np.linalg.inv(np.dot(tsnMat.T, tsnMat))\n return np.dot(np.dot(A_d, tsnMat.T), vecB)",
"def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a,b)\n loss =compute_loss_LS(y,tx,w)\n return loss, w",
"def solveForModeB1(X, M, n, maxInner, epsilon, tol,sita,Y1, lambta2):\n # Pi(n) = [A(N) kr A(N-1) kr ... A(n+1) kr A(n-1) kr .. A(1)]^T\n Pi = tensorTools.calculatePi(X, M, n)\n #print 'Pi size', Pi.shape\n #print 'pi='+str(Pi)\n #print(M.U[n])\n for iter in range(maxInner):\n # Phi = (X(n) elem-div (B Pi)) Pi^T\n #print X.vals.shape,X.shape\n #print X.vals.flatten().shape\n Phi = tensorTools.calculatePhi(X, M.U[n], Pi, n, epsilon=epsilon)\n #print('phi'+str(Phi))\n #print(Phi)\n # check for convergence that min(B(n), E - Phi(n)) = 0 [or close]\n kktModeViolation = np.max(np.abs(np.minimum(M.U[n], 1-Phi).flatten()))\n if (kktModeViolation < tol):\n break\n\n B=M.U[n]\n #print B.shape\n colNorm = np.apply_along_axis(np.linalg.norm, 0, B, 1)\n zeroNorm = np.where(colNorm == 0)[0]\n colNorm[zeroNorm] = 1\n B = B / colNorm[np.newaxis, :]\n tm=np.hstack((np.ones((B.shape[0],1)),B))\n Y1=Y1.reshape((Y1.shape[0],1))\n\n derive=-1.0*lambta2/B.shape[0]*np.dot((Y1-np.dot(tm,sita)),sita.T)\n #print derive.shape\n #print np.multiply(M.U[n],derive[:,1:]).shape\n #print np.multiply(M.U[n],Phi).shape\n M.U[n] = np.array(np.multiply(M.U[n],Phi))-np.array((np.multiply(M.U[n],derive[:,1:])))\n\n #print 'after'\n #print M.U[n][0]\n #print(\" Mode={0}, Inner Iter={1}, KKT violation={2}\".format(n, iter, kktModeViolation))\n return M, Phi, iter, kktModeViolation",
"def solve(matrix, b):\n lu_matrix = decompose_to_LU(matrix)\n # get supporting vector y\n y = np.matrix(np.zeros([lu_matrix.shape[0], 1]), dtype=np.float64)\n for i in range(y.shape[0]):\n y[i, 0] = b[i] - lu_matrix[i, :i] * y[:i]\n\n # get vector of answers x\n x = np.matrix(np.zeros([lu_matrix.shape[0], 1]))\n for i in range(1, x.shape[0] + 1):\n x[-i, 0] = (y[-i] - lu_matrix[-i, -i:] * x[-i:, 0]) / lu_matrix[-i, -i]\n\n return np.array(x.transpose()[0], dtype=np.float64)[0]",
"def forward_committor_sensitivity(T, A, B, index):\n\n n = len(T)\n set_X = numpy.arange(n) # set(range(n))\n set_A = numpy.unique(A) # set(A)\n set_B = numpy.unique(B) # set(B)\n set_AB = numpy.union1d(set_A, set_B) # set_A | set_B\n notAB = numpy.setdiff1d(set_X, set_AB, True) # list(set_X - set_AB)\n m = len(notAB)\n\n K = T - numpy.diag(numpy.ones(n))\n\n U = K[numpy.ix_(notAB.tolist(), notAB.tolist())]\n\n v = numpy.zeros(m)\n\n # for i in xrange(0, m):\n # for k in xrange(0, len(set_B)):\n # v[i] = v[i] - K[notAB[i], B[k]]\n v[:] = v[:] - K[notAB[:], B[:]]\n\n qI = numpy.linalg.solve(U, v)\n\n q_forward = numpy.zeros(n)\n #q_forward[set_A] = 0 # double assignment.\n q_forward[set_B] = 1\n #for i in range(len(notAB)):\n q_forward[notAB[:]] = qI[:]\n\n target = numpy.eye(1, n, index)\n target = target[0, notAB]\n\n UinvVec = numpy.linalg.solve(U.T, target)\n Siab = numpy.zeros((n, n))\n\n for i in range(m):\n Siab[notAB[i]] = - UinvVec[i] * q_forward\n\n return Siab",
"def nnls(A, b, maxiter=None, eps=1e-11):\n m, n = A.shape\n x = np.zeros(n)\n P = []\n Z = list(range(n))\n k = 0\n\n if maxiter is None:\n maxiter = 3 * m\n\n while True:\n if k == maxiter:\n return x\n\n w = np.matmul(A.T, (b - np.matmul(A, x)))\n if Z == [] or np.all(w[Z] <= eps):\n return x\n\n while True:\n\n t = np.argmax(ma.masked_array(w, mask=[not i in Z for i in range(n)]))\n P.append(t)\n Z.remove(t)\n Ap = A.copy()\n Ap[:, Z] = 0\n\n z = np.linalg.lstsq(Ap, b, rcond=None)[0]\n\n if np.all(z[P] > 0):\n x = z\n break\n\n alpha = np.min(ma.masked_array(x / (x - z), mask=[not i in P or z[i] > 0 for i in range(n)]))\n x = x + alpha * (z - x)\n\n T = np.where(x == 0.0)[0]\n Z = [z for z in set(Z + P) if z in Z or z in P and z in T]\n P = [p for p in P if not p in T]\n\n k = k + 1",
"def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_cost(y, tx, w)\n return w, loss",
"def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w",
"def solve_fwd_bkwd(matrix_a, b):\n _L = cholesky(matrix_a) \n _U = transpose_matrix(_L) \n \n n = len(b)\n x = [0 for i in xrange(n)] \n y = [0 for i in xrange(n)] \n\n #forward solve _Ly = b\n for i in xrange(n):\n y[i] = b[i]\n for j in xrange(i):\n\t y[i] -= _L[i][j] * y[j]\n\ty[i] /= _L[i][i]\n\n #backward solve _Ux = y\n for i in xrange(n-1, -1, -1):\n\tx[i] = y[i]\n for j in xrange(i+1, n):\n x[i] -= _U[i][j] * x[j]\n x[i] /= _U[i][i]\n\n return x",
"def lp_acent(A,b,c,x_0):\n #Parameters\n b = b.flatten()\n c = c.flatten()\n ALPHA = 0.01\n BETA = 0.5\n EPSILON = 1e-6\n MAXITERS = 100\n if (np.min(x_0)<=0) and (np.linalg.norm>1e-3):\n print 'failed' \n return 0\n #m = len(b)\n #n = len(x_0)\n lambda_hist = []\n x = x_0\n for iter in range(MAXITERS):\n # H = np.diag(1/np.power(x,3))\n g = c-np.power(x,-1)\n #print g.shape\n #solving KKT system\n w = np.linalg.solve(np.dot(np.dot(A,np.diag(np.power(x,2))),A.T),\n np.dot(np.dot(-A,np.diag(np.power(x,2))),g))\n dx = np.dot(-np.diag(np.power(x,2)),np.dot(A.T,w)+g)\n lambdasqr = np.dot(-g.T,dx) #dx'*T*dx: newton incremental\n lambda_hist.append(lambdasqr/2)\n if lambdasqr/2 <= EPSILON:\n break\n # backtracking line search\n t = 1\n # brin the point inside the domain\n while np.min(x+t*dx)<=0:\n t =BETA*t\n while np.dot(c.T,np.dot(t,dx))-np.sum(np.log(x+t*dx))+np.sum(np.log(x))-ALPHA*t*np.dot(g.T,dx)>0:\n t = BETA*t\n x = x+t*dx\n if iter == MAXITERS:\n print 'ERROR: MAXITERS reached'\n else:\n #plt.figure()\n #plt.plot(range(len(lambda_hist)),lambda_hist,'b-',range(len(lambda_hist)),lambda_hist,'bo')\n return x,w,lambda_hist",
"def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n loss = compute_loss(y, tx, w)\n return w, loss",
"def SOR_Solve_Opt(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n omega = 1\n l = 5\n p = 2\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n x_new[row] = (1.0-omega) * x[row] + omega*x_new[row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n #record change after iteration k\n if (l==iteration):\n dxl = np.linalg.norm(x_new-x)\n if (l + p == iteration):\n dxlp = np.linalg.norm(x_new-x)\n omega = 2.0/(1.0+np.sqrt(1-(dxlp/dxl)**(1.0/p)))\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new",
"def _solveX(L, U, b):\n m, n = L.shape\n # Forward Substitution\n y = list()\n y.insert(0, b[0]/L[0][0])\n for i in range(1, m):\n summ = 0\n for k in range(0, i):\n summ += L[i][k]*y[k]\n y.insert(i, (b[i]-summ)/(L[i][i]))\n\n # Backwards Substitution\n x = [0]*m\n x[m-1] = y[m-1] / U[m-1][m-1]\n for i in range(m - 2, -1, -1):\n summ = 0\n for k in range(i+1, n):\n summ += U[i][k]*x[k]\n x[i] = (y[i] - summ)/U[i][i]\n\n return x",
"def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)",
"def project_L1_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n # By Moreau's identity, we convert to proximal of dual problem (L-inf norm)\n return x - project_Linf_ball(x, t)",
"def wasserstein(X,t,p,lam=10,its=10,sq=False,backpropT=False):\n\n it = torch.where(t > 0)[0] # getting the positions\n ic = torch.where(t < 1)[0]\n\n Xt = torch.index_select(X, 0, it) # Getting the nx100 for each value\n Xc = torch.index_select(X, 0, ic)\n\n nc = Xc.shape[0]\n nt = Xt.shape[0]\n\n ''' Compute distance matrix'''\n if sq:\n M = pdist2sq(Xt,Xc)\n else:\n M = safe_sqrt(pdist2sq(Xt,Xc))\n\n ''' Estimate lambda and delta '''\n M_mean = torch.mean(M)\n M_drop = torch.nn.Dropout(10/(nc*nt))(M)\n delta = torch.max(M)\n eff_lam = lam/M_mean\n\n ''' Compute new distance matrix '''\n Mt = M\n row = delta*torch.ones(M.shape[1])\n col = torch.cat((delta*torch.ones(M.shape[0]),torch.zeros((1))),0)\n Mt = torch.cat((M, torch.unsqueeze(row, 0)), 0)\n Mt = torch.cat((Mt, torch.unsqueeze(col, 1)), 1)\n\n ''' Compute marginal vectors '''\n temp = torch.where(t > 0)[0].shape\n a = torch.cat((p * torch.ones((torch.where(t > 0)[0].shape[0],1)) / nt, (1 - p) * torch.ones((1,1))), 0)\n b = torch.cat(((1-p) * torch.ones((torch.where(t < 1)[0].shape[0],1)) / nc, p * torch.ones((1,1))), 0)\n\n ''' Compute kernel matrix'''\n Mlam = eff_lam*Mt\n K = torch.exp(-Mlam) + 1e-6 # added constant to avoid nan\n U = K*Mt\n ainvK = K/a\n\n u = a\n for i in range(0,its):\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n u = 1.0/(torch.matmul(ainvK,( b / temp)))\n temp = torch.transpose(torch.matmul(torch.transpose(u,0,1),K),0,1)\n v = b/(temp)\n\n T = u*(torch.transpose(v,0,1)*K)\n\n E = T*Mt\n D = 2*torch.sum(E)\n\n return D, Mlam",
"def solve_L(L, b):\n n = b.size\n assert L.shape == (n,n)\n x = zeros(n)\n for i in range(n):\n x[i] = (b[i] - dot(x[:i], L[i,:i])) / L[i,i]\n if not numpy.isfinite(x[i]):\n x[i] = 0.0\n return x",
"def least_squares(y, tx):\n a = tx.T.dot(tx)\n b = tx.T.dot(y)\n\n w = np.linalg.solve(a, b)\n return w, compute_mse(y, tx, w)",
"def rawsolve(self,):\n m = self.m\n n = self.n\n z = self.z\n mark = self.mark\n kAAt = self.kAAt\n iAAt = self.iAAt\n AAt = self.AAt\n diag = self.diag\n consistent = True\n eps = 0.0\n m2 = m+n\n\n if self.ndep:\n eps = self.epssol * np.abs(z).max()\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- L z |\n #| */\n\n for i in range(m2):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n row = iAAt[k]\n z[row] -= AAt[k]*beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| -1 |\n #| z <- D z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n z[i] = z[i]/diag[i]\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n #/*------------------------------------------------------+\n #| |\n #| t -1 |\n #| z <- (L ) z |\n #| */\n\n for i in range(m2-1, -1, -1):\n if mark[i]:\n beta = z[i]\n for k in range(kAAt[i], kAAt[i+1]):\n beta -= AAt[k]*z[iAAt[k]]\n z[i] = beta\n elif abs(z[i]) > eps:\n consistent = False\n else:\n z[i] = 0.0\n\n return consistent",
"def solve_LF(self):\n self.u = zeros(self.N)\n self.u[0] = self.u0\n self.u[1] = self.u1\n u = self.u\n f= self.f\n dt = self.dt\n t = self.t\n N = self.N\n for n in xrange(1,N-1):\n u[n+1] = 2*dt*f(u[n],t[n]) + u[n-1]\n #return t,u",
"def SYR_forward(b, alpha, V, s0, y0, T=100):\n n = len(y0)\n\n du = np.zeros(n+1)\n u0 = np.zeros(n+1)\n u0[0] = s0\n u0[1:] = y0\n \n def f(t,u):\n s = u[0]\n y = u[1:]\n force = np.dot(y,b) # Force of infection\n du[0] = - s*force\n du[1:] = s*force*alpha - np.dot(V,y)\n return du\n\n times = np.linspace(0,T,10000)\n solution = solve_ivp(f,[0,T],u0,t_eval=times,method='RK23',max_step=0.1)\n s = solution.y[0,:]\n y = solution.y[1:,:]\n t = solution.t\n \n return s, y, t",
"def newton_decent_directions(function, func_derivative, func_hessian, xk, A, P, b, q, t):\r\n # calculate steepest decent direction\r\n newton_dir = -np.dot(np.linalg.inv(func_hessian(x=xk, A=A, P=P, b=b, q=q, t=t)), func_derivative(x=xk, A=A, P=P, b=b, q=q, t=t))\r\n\r\n return newton_dir"
] | [
"0.63466734",
"0.61827254",
"0.61033237",
"0.6093494",
"0.60769826",
"0.5885008",
"0.58844715",
"0.5877297",
"0.58737326",
"0.58588946",
"0.5838278",
"0.5794063",
"0.57753825",
"0.5773156",
"0.5763559",
"0.57562786",
"0.574674",
"0.57452273",
"0.57390094",
"0.57179475",
"0.5697003",
"0.5690629",
"0.5688454",
"0.56821567",
"0.567632",
"0.56759006",
"0.56697446",
"0.56637865",
"0.5655258",
"0.5654495"
] | 0.7257071 | 0 |
Compute the log determinant of a positivedefinite symmetric toeplitz matrix. The determinant is computed recursively. The intermediate solutions of the Levinson recursion are expolited. | def toeplitz_slogdet(r):
n = len(r)
r_0 = r[0]
r = np.concatenate((r, np.array([r_0])))
r /= r_0 # normalize the system so that the T matrix has diagonal of ones
logdet = n*np.log(np.abs(r_0))
sign = np.sign(r_0)**n
if n == 1:
return (sign, logdet)
# now on is a modification of Levinson algorithm
y = zeros((n,))
x = zeros((n,))
b = -r[1:n+1]
r = r[:n]
y[0] = -r[1]
x[0] = b[0]
beta = 1
alpha = -r[1]
d = 1 + dot(-b[0], x[0])
sign *= np.sign(d)
logdet += np.log(np.abs(d))
for k in range(0,n-2):
beta = (1 - alpha*alpha)*beta
mu = (b[k+1] - dot(r[1:k+2], x[k::-1])) /beta
x[0:k+1] = x[0:k+1] + mu*y[k::-1]
x[k+1] = mu
d = 1 + dot(-b[0:k+2], x[0:k+2])
sign *= np.sign(d)
logdet += np.log(np.abs(d))
if k < n-2:
alpha = -(r[k+2] + dot(r[1:k+2], y[k::-1]))/beta
y[0:k+1] = y[0:k+1] + alpha * y[k::-1]
y[k+1] = alpha
return(sign, logdet) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fast_logdet(matrix):\n sign, ld = np.linalg.slogdet(matrix)\n if not sign > 0:\n return -np.inf\n return ld",
"def pddet(A):\r\n L = jitchol(A)\r\n logdetA = 2*sum(np.log(np.diag(L)))\r\n return logdetA",
"def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_",
"def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features):\n if covariance_type == 'full':\n n_components, _, _ = matrix_chol.shape\n log_det_chol = (np.sum(np.log(\n matrix_chol.reshape(\n n_components, -1)[:, ::n_features + 1]), 1))\n\n elif covariance_type == 'tied':\n log_det_chol = (np.sum(np.log(np.diag(matrix_chol))))\n\n elif covariance_type == 'diag':\n log_det_chol = (np.sum(np.log(matrix_chol), axis=1))\n\n else:\n log_det_chol = n_features * (np.log(matrix_chol))\n\n return log_det_chol",
"def log_abs_det_jacobian(self, z):\n self.a = F.softplus(self.a)\n self.w = F.softmax(self.w, dim=1)\n pre_sigm = self.a * z + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = self.w * sigm\n if (len(z.shape) > 2):\n x_pre = torch.sum(self.w * sigm, dim=1)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(self.w, dim=1) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(self.a)\n logj = torch.log(torch.sum(torch.exp(logj)))#,2).sum(2)\n logdet = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return sum_dims(logdet)",
"def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n logdet = self.bn_arn(x)[1]\n return logdet.sum(-1)\n else:\n logdet = intermediates\n return logdet.sum(-1)",
"def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)",
"def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False",
"def _inverse_log_det_jacobian(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n h_prime = -(h ** 2)\n beta_h = beta * h\n log_det_jacobian = tf.reduce_sum(\n (self.dim - 1) * tf.math.log1p(beta_h)\n + tf.math.log1p(beta_h + beta * h_prime * r), axis=-1)\n return log_det_jacobian",
"def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps",
"def log_abs_det_jacobian(self, x, y, intermediates=None):\n if intermediates is None:\n log_scale = self.arn(x)[1]\n log_scale = _clamp_preserve_gradients(\n log_scale, self.log_scale_min_clip, self.log_scale_max_clip\n )\n return log_scale.sum(-1)\n else:\n log_scale = intermediates\n return log_scale.sum(-1)",
"def determinant_fast(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = copy_matrix(A)\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0: \n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1,n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, but one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n \n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product",
"def determinant(A):\n \n total = 0\n\n if len(A) == 1:\n return A[0][0]\n\n for col in range(len(A)):\n Asub = A[1:]\n for j in range(len(A)-1):\n Asub[j] = Asub[j][:col] + Asub[j][col+1:]\n subdet = determinant(Asub)\n sign = (-1) ** (col % 2)\n total += sign * A[0][col] * subdet\n\n return total",
"def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------",
"def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]",
"def determinant(matrix):\n if type(matrix) is not list or len(matrix) == 0:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(matrix) == 1 and len(matrix[0]) == 0:\n return 1\n\n for i in matrix:\n if type(i) is not list:\n raise TypeError(\"matrix must be a list of lists\")\n\n if len(i) != len(matrix):\n raise ValueError(\"matrix must be a square matrix\")\n\n if len(matrix) == 1:\n return matrix[0][0]\n\n if len(matrix) == 2:\n return (matrix[0][0] * matrix[1][1]) - (matrix[0][1]\n * matrix[1][0])\n deter = 0\n\n for j, k in enumerate(matrix[0]):\n rows = [r for r in matrix[1:]]\n sub = []\n for r in rows:\n sub.append([r[a] for a in range(len(matrix)) if a != j])\n deter += k * (-1) ** j * determinant(sub)\n return deter",
"def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product",
"def local_det_chol(node):\r\n if node.op == det:\r\n x, = node.inputs\r\n for (cl, xpos) in x.clients:\r\n if isinstance(cl.op, Cholesky):\r\n L = cl.outputs[0]\r\n return [tensor.prod(extract_diag(L) ** 2)]",
"def logp(value, mu, rowchol, colchol):\n\n if value.ndim != 2:\n raise ValueError(\"Value must be two dimensional.\")\n\n # Compute Tr[colcov^-1 @ (x - mu).T @ rowcov^-1 @ (x - mu)] and\n # the logdet of colcov and rowcov.\n delta = value - mu\n\n # Find exponent piece by piece\n right_quaddist = solve_lower(rowchol, delta)\n quaddist = pt.nlinalg.matrix_dot(right_quaddist.T, right_quaddist)\n quaddist = solve_lower(colchol, quaddist)\n quaddist = solve_upper(colchol.T, quaddist)\n trquaddist = pt.nlinalg.trace(quaddist)\n\n coldiag = pt.diag(colchol)\n rowdiag = pt.diag(rowchol)\n half_collogdet = pt.sum(pt.log(coldiag)) # logdet(M) = 2*Tr(log(L))\n half_rowlogdet = pt.sum(pt.log(rowdiag)) # Using Cholesky: M = L L^T\n\n m = rowchol.shape[0]\n n = colchol.shape[0]\n\n norm = -0.5 * m * n * pm.floatX(np.log(2 * np.pi))\n return norm - 0.5 * trquaddist - m * half_collogdet - n * half_rowlogdet",
"def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return functools.reduce(\n lambda x, y: x ^ y,\n [self[0, j] and\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)],\n )",
"def Determinant(matrix, mul):\r\n width = len(matrix)\r\n # Stop Conditions\r\n if width == 1:\r\n return mul * matrix[0][0]\r\n else:\r\n sign = -1\r\n det = 0\r\n for i in range(width):\r\n m = []\r\n for j in range(1, width):\r\n buff = []\r\n for k in range(width):\r\n if k != i:\r\n buff.append(matrix[j][k])\r\n m.append(buff)\r\n # Change the sign of the multiply number\r\n sign *= -1\r\n # Recursive call for determinant calculation\r\n det = det + mul * Determinant(m, sign * matrix[0][i])\r\n return det",
"def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n result = n * k * pt.log(f(2) * np.pi)\n result += f(2) * n * pt.sum(pt.log(diag))\n result += (delta_trans ** f(2)).sum()\n result = f(-0.5) * result\n logp = pt.switch(ok, result, -np.inf)\n\n def dlogp(inputs, gradients):\n (g_logp,) = gradients\n cov, delta = inputs\n\n g_logp.tag.test_value = floatX(1.0)\n n, k = delta.shape\n\n chol_cov = cholesky(cov)\n diag = pt.diag(chol_cov)\n ok = pt.all(diag > 0)\n\n chol_cov = pt.switch(ok, chol_cov, pt.fill(chol_cov, 1))\n delta_trans = solve_lower(chol_cov, delta.T).T\n\n inner = n * pt.eye(k) - pt.dot(delta_trans.T, delta_trans)\n g_cov = solve_upper(chol_cov.T, inner)\n g_cov = solve_upper(chol_cov.T, g_cov.T)\n\n tau_delta = solve_upper(chol_cov.T, delta_trans.T)\n g_delta = tau_delta.T\n\n g_cov = pt.switch(ok, g_cov, -np.nan)\n g_delta = pt.switch(ok, g_delta, -np.nan)\n\n return [-0.5 * g_cov * g_logp, -g_delta * g_logp]\n\n return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)",
"def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here",
"def det(a):\n a = copy.deepcopy(a)\n n = len(a)\n det = 1\n com_k = 1\n for k in range(n-1):\n step = 1\n\n while a[k][k] == 0:\n a[k+step], a[k] = a[k], a[k+step]\n det = -det\n step += 1\n mul = a[k][k]\n\n for i in range(k+1, n):\n for j in range(k+1, n):\n a[i][j] *= mul\n a[i][j] -= a[i][k] * a[k][j]\n a[i][j] /= com_k\n\n com_k = mul\n\n det = det * a[-1][-1]\n\n return det",
"def determinant(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot calculate the determinant of\"\n \"a non-square matrix\")\n if self.m == 1:\n return self[0, 0]\n # TODO: can we choose a better row/column to improve efficiency\n return sum([self[0, j] * (-1 if j % 2 else 1) *\n self.subset([i for i in range(1, self.m)],\n [k for k in range(self.n) if k != j]).determinant\n for j in range(self.n)])",
"def log_det_K(self, Ks=None):\n log_det = 0.\n for K in self.Ks:\n rank_d = self.n / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det",
"def logit_deriv(y):\n# if y.any() < 0.0 or y.any() > 1.0:\n# raise Exception\n\n return y*(1-y)",
"def log_det_K(self, Ks=None):\n Ks = self.Ks if Ks is None else Ks\n log_det = 0.\n for K in Ks:\n rank_d = self.m / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det",
"def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det",
"def determinant(matrix):\n if matrix == [[]]:\n return 1\n if type(matrix) is not list or len(matrix) < 1 or\\\n not all(isinstance(x, list) for x in matrix):\n raise TypeError(\"matrix must be a list of lists\")\n if not all(len(matrix) == len(x) for x in matrix):\n raise ValueError(\"matrix must be a square matrix\")\n copy = list(map(list, matrix))\n dim = len(matrix)\n if dim == 1:\n return matrix[0][0]\n elif dim == 2:\n return matrix[0][0] * matrix[1][1] - matrix[1][0] * matrix[0][1]\n else:\n for cur in range(dim):\n for i in range(cur + 1, dim):\n if copy[cur][cur] == 0:\n copy[cur][cur] = 1.0e-10\n curScaler = copy[i][cur] / copy[cur][cur]\n for j in range(dim):\n copy[i][j] = copy[i][j] - curScaler * copy[cur][j]\n det = 1\n for i in range(dim):\n det *= copy[i][i]\n return round(det)"
] | [
"0.7205463",
"0.69225436",
"0.6803772",
"0.6577487",
"0.65662503",
"0.6258033",
"0.6235449",
"0.6192166",
"0.61640286",
"0.60718197",
"0.602648",
"0.5906651",
"0.5904567",
"0.58784807",
"0.58522433",
"0.5850299",
"0.58452636",
"0.5838441",
"0.5796368",
"0.57808894",
"0.5778876",
"0.57782644",
"0.5773432",
"0.5766508",
"0.57584566",
"0.5755279",
"0.5697666",
"0.5686144",
"0.5684613",
"0.56827873"
] | 0.6977162 | 1 |
Preprocessing needed for toeplitz_inverse_multiplication() | def toeplitz_inverse_multiplication_prep(T_column):
phi=1
psi=2
assert phi != 0
assert psi != 0
assert phi != psi
n = len(T_column)
x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )
y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )
x_0 = x[0]
D_phi = (phi**(1/n))**np.arange(0,n)
D_psi = (psi**(1/n))**np.arange(0,n)
Lambda_1 = fft(D_psi*x)
Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))
Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))
Lambda_4 = fft(D_phi*x)
return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bd_toeplitz_inverse_multiplication_prep(*arrs):\n \n t = []\n for c in arrs: # loop over each block\n t.append(toeplitz_inverse_multiplication_prep(c))\n return tuple(t)",
"def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y",
"def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y",
"def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]",
"def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))",
"def back_substitution(U, z):\n n = len(U[0])\n x = [0] * n\n for i in range(n - 1, -1, -1):\n if U[i][i] != 0:\n accum = 0\n for j in range(i, n):\n accum += U[i][j] * x[j]\n x[i] = (z[i] - accum) / U[i][i]\n return x",
"def mul(Z,X,Y):",
"def reconstruct(A, B, z):\n f = factorint(igcd(A, B))\n for p, e in f.items():\n if e != 1:\n raise ValueError('a and b should be square-free')\n z *= p\n return z",
"def preprocessing(ct):\n return value_preprocessing(ct, False)",
"def test_inverse_transform(self):",
"def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod",
"def test__inverse_transform_continuous(self):",
"def complex_inverse(c1,cr):",
"def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features",
"def inverse_fisher_z_transform(z):\r\n return ((e ** (2 * z)) - 1.) / ((e ** (2 * z)) + 1.)",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features",
"def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features",
"def mul_inplace(a, b):",
"def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n",
"def multInverse(a, m):\n x0 = 1\n x1 = 0\n y0 = 0\n y1 = 1\n\n while m != 0:\n p = a // m\n z = a % m\n a = m\n m = z\n\n w = x1\n x1 = x0 - p * x1\n x0 = w\n \n v = y1\n y1 = y0 - p * y1\n y0 = v\n if(x0):\n return(x0)\n else:\n print(\"multiplicative inverse does not exist\")\n return 0",
"def inv_inplace(a):",
"def de_mult(self,z):\n z = np.asanyarray(z)\n if not (np.any(z<0) or np.any(z>=9.)):\n return self.de_true_interp(z)\n result = np.zeros_like(z)\n result[z<0.] = (z[z<0.]+1.)**(3.*(1.+self.w))\n result[(z>=0.)*(z<9.)] = self.de_true_interp(z[(z>=0.)*(z<9.)])\n result[z>=9.] = np.exp(3.*(_de_exp_const_w(z[z>=9.],self.w)-_de_exp_const_w(9.,self.w)+np.log(self.de_true_interp(9.))/3.))\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return result",
"def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))",
"def exp2_inplace(a):",
"def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id",
"def local_mul_specialize(node):\r\n # here, we are past the point of canonicalization, so we don't\r\n # want to put in un-necessary fills.\r\n #\r\n # at this point [post canonicalize], mul() may have many inputs.\r\n if node.op == T.mul:\r\n #the idea here is that we have pow(x, y)\r\n neg = False\r\n new_inputs = []\r\n nb_neg_node = 0\r\n nb_cst = 0\r\n for input in node.inputs:\r\n # remove any neg arguments\r\n while input.owner and input.owner.op == T.neg:\r\n neg ^= True\r\n input = input.owner.inputs[0]\r\n nb_neg_node += 1\r\n\r\n # remove special case arguments of 1, -1 or 0\r\n y = local_mul_canonizer.get_constant(input)\r\n if y == 1.0:\r\n nb_cst += 1\r\n elif y == -1.0:\r\n nb_cst += 1\r\n neg ^= True # toggles\r\n elif y == 0.0:\r\n # if we find any zero, we just return right away\r\n return [broadcast_like(0, node.outputs[0], node.fgraph)]\r\n else:\r\n new_inputs.append(input)\r\n\r\n if new_inputs != node.inputs:\r\n if new_inputs:\r\n if len(new_inputs) == 1:\r\n if neg:\r\n rval = -new_inputs[0]\r\n else:\r\n rval = new_inputs[0]\r\n else:\r\n # The next case would cause a replace by an equivalent case.\r\n if (neg and\r\n nb_neg_node == 0 and\r\n nb_cst == 1):\r\n return\r\n elif neg:\r\n # Don't add an extra neg node as we can't\r\n # fully replace this mul by a neg.\r\n m1 = numpy.asarray(-1, dtype=node.outputs[0].dtype)\r\n new_inputs = [m1] + new_inputs\r\n rval = T.mul(*new_inputs)\r\n\r\n return [broadcast_like(rval, node.outputs[0], node.fgraph)]\r\n else:\r\n # there are no variable inputs to mul\r\n # N.B. this could have been constant-folded...\r\n if neg:\r\n return [broadcast_like(-1, node.outputs[0], node.fgraph)]\r\n else:\r\n return [broadcast_like(1, node.outputs[0], node.fgraph)]",
"def inv(z: int) -> int:\n # Adapted from curve25519_athlon.c in djb's Curve25519.\n z2 = z * z % q # 2\n z9 = pow2(z2, 2) * z % q # 9\n z11 = z9 * z2 % q # 11\n z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0\n z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0\n z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...\n z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q\n z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q\n z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q\n z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q\n z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0\n return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2",
"def multiply_by_i(z: torch.Tensor):\n return to_complex(-z.imag, z.real)",
"def preprocess_features(features):\r\n rowsum = np.array(features.sum(1),dtype='float')\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = sp.diags(r_inv)\r\n features = r_mat_inv.dot(features)\r\n # return sparse_to_tuple(features)\r\n return features\r\n # print(features)\r\n # rowsum = np.array(features.sum(1),dtype='float')\r\n #\r\n # r_inv = np.power(rowsum, -1).flatten()\r\n # r_inv[np.isinf(r_inv)] = 0.\r\n # r_mat_inv = np.diag(r_inv)\r\n # features = r_mat_inv.dot(features)\r\n # # return sparse_to_tuple(features)\r\n # return features\r",
"def calculate_compressibility_factor(p_in, p_out, temp_in, temp_out):\n temp = np.transpose([200, 300, 400, 500, 600, 800, 1000, 2000])\n\n p = [1, 10, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000]\n\n z = [\n [1.0007, 1.0066, 1.0134, 1.0275, 1.0422, 1.0575, 1.0734, 1.163, 1.355, 1.555, 1.753, 1.936],\n [1.0005, 1.0059, 1.0117, 1.0236, 1.0357, 1.0479, 1.0603, 1.124, 1.253, 1.383, 1.510, 1.636],\n [1.0004, 1.0048, 1.0096, 1.0192, 1.0289, 1.0386, 1.0484, 1.098, 1.196, 1.293, 1.388, 1.481],\n [1.0004, 1.0040, 1.0080, 1.0160, 1.0240, 1.0320, 1.0400, 1.080, 1.159, 1.236, 1.311, 1.385],\n [1.0003, 1.0034, 1.0068, 1.0136, 1.0204, 1.0272, 1.0340, 1.068, 1.133, 1.197, 1.259, 1.320],\n [1.0002, 1.0026, 1.0052, 1.0104, 1.0156, 1.0208, 1.0259, 1.051, 1.100, 1.147, 1.193, 1.237],\n [1.0002, 1.0021, 1.0042, 1.0084, 1.0126, 1.0168, 1.0209, 1.041, 1.080, 1.117, 1.153, 1.187],\n [1.0009, 1.0013, 1.0023, 1.0044, 1.0065, 1.0086, 1.0107, 1.021, 1.040, 1.057, 1.073, 1.088],\n ]\n\n interp_func = interpolate.interp2d(p, temp, z)\n\n z_in = interp_func(p_in, temp_in)\n z_out = interp_func(p_out, temp_out)\n\n return [z_in, z_out]"
] | [
"0.65743506",
"0.63173485",
"0.60780877",
"0.60345995",
"0.5920918",
"0.5710167",
"0.5684219",
"0.56176597",
"0.56087387",
"0.5590726",
"0.5568226",
"0.556281",
"0.5558012",
"0.5548983",
"0.5540906",
"0.5426001",
"0.5426001",
"0.5406237",
"0.53970987",
"0.5395093",
"0.53894615",
"0.53726643",
"0.53536415",
"0.5352041",
"0.5330332",
"0.53212094",
"0.5295059",
"0.52926826",
"0.5283007",
"0.5263422"
] | 0.65871215 | 0 |
matrix multiplication with the inverse of a blockdiagonal matrix having Toeplitz blocks. y = T u Analogous to toeplitz_inverse_multiplication() | def bd_toeplitz_inverse_multiplication(u, *arrs):
y = zeros(shape(u))
n_start = 0
n_end = 0
for t in arrs:
n_start = n_end
n_end += len(t[3]) # len(t[3]) is the length of the block
y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)
assert len(y) == n_end
return y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)",
"def inv(T):\n K, L = T.shape[1:3]\n squ_matrix = np.einsum('ijkl->ikjl', T).reshape((K*L, K*L),order='F')\n t = np.linalg.inv(squ_matrix)\n return np.einsum('ijkl->ikjl', t.reshape((K,L,K,L), order='F'))",
"def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv",
"def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here",
"def getInverseMatrix(self) -> CMatrix4:\n ...",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi",
"def inverse_basis(T, dimensions, t):\n B = basis(T, dimensions, t)\n return inv(B.T.dot(B)).dot(B.T)",
"def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y",
"def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))",
"def inverse(self) -> 'Matrix':\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Must be a square matrix. This one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n\n # 1) Construct the minor_matrix. Feel free to make this a separate method.\n minor_matrix_times_cofactor = Matrix.zeros(self.shape())\n\n for i in range (num_R):\n for j in range(num_C):\n minor_matrix_times_cofactor.mat[i][j] = self.get_minor(i,j).determinant() * (-1)**(i+j)\n\n minor_matrix_times_cofactor.display(message=\"minor\")\n # 2) Calculate the determinant, either by calling the determinant() method or by using the minor_matrix (faster)\n det = 0\n for i in range (num_R):\n det += self.mat[i][0] * minor_matrix_times_cofactor.mat[i][0]\n #print (f\"determinant: {self.determinant()}\")\n # 3) The inverse is the transpose of the minor matrix, divided by the determinant. Make sure that the determinant\n # isn't zero!\n if det == 0:\n return None\n return minor_matrix_times_cofactor.transpose().times(1/det)\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------",
"def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))",
"def invert(self):\n if self.m != self.n:\n raise exc.LinearAlgebraError(\"cannot invert a non-square matrix\")\n if self.determinant == 0:\n raise exc.LinearAlgebraError(\"cannot invert a singular matrix\")\n # TODO: implement block matrices in their own method\n block_rows = [r1 + r2 for r1, r2 in\n zip(self.data, self.makeIdentity(self.m).data)]\n inverse_block = Matrix.fromRows(block_rows).row_reduce()\n return inverse_block.subset([i for i in range(self.m)],\n [j + self.n for j in range(self.n)])",
"def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)",
"def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse",
"def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv",
"def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird später das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # müssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N können Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, müssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, können wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, müssen wir nun noch einmal von unten nach oben durchgehen\n # um die Einträge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I",
"def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod",
"def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)",
"def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id",
"def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs",
"def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵",
"def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T",
"def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM",
"def inv(self):\n inv = np.linalg.inv(self._mat)\n return MoebTr(inv[0][0], inv[0][1], inv[1][0], inv[1][1])",
"def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n",
"def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)",
"def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);",
"def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())",
"def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I",
"def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return ConstantDiagLinearOperator(self.diag_values.reciprocal(), diag_shape=self.diag_shape)"
] | [
"0.65371925",
"0.6473114",
"0.639856",
"0.6361315",
"0.6302969",
"0.6292023",
"0.6192051",
"0.61344135",
"0.61059606",
"0.60929507",
"0.6069136",
"0.6021487",
"0.60205114",
"0.6011188",
"0.5997013",
"0.5966648",
"0.5926399",
"0.5926365",
"0.5916658",
"0.5888663",
"0.5883227",
"0.5874907",
"0.5866973",
"0.58164996",
"0.5813204",
"0.5803478",
"0.58029234",
"0.5792404",
"0.5783036",
"0.57659465"
] | 0.7164876 | 0 |
Parse a single line of csvtoarrow output. Raise RuntimeError if a line cannot be parsed. (We can't recover from that because we don't know what's happening.) | def _parse_csv_to_arrow_warning(line: str) -> I18nMessage:
for pattern, builder in _ERROR_PATTERNS:
match = pattern.match(line)
if match:
return builder(**match.groupdict())
raise RuntimeError("Could not parse csv-to-arrow output line: %r" % line) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_line(self, line):\n raise NotImplementedError",
"def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()",
"def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult",
"def _parse_csv(\n path: Path,\n *,\n settings: Settings = DEFAULT_SETTINGS,\n encoding: Optional[str],\n delimiter: Optional[str],\n has_header: bool,\n autoconvert_text_to_numbers: bool,\n) -> ParseCsvResult:\n warnings = []\n\n with contextlib.ExitStack() as ctx:\n n_bytes = path.stat().st_size\n if n_bytes > settings.MAX_CSV_BYTES:\n # We can't simply os.truncate() the input file, because sandboxed code\n # can't modify input files.\n truncated_path = ctx.enter_context(tempfile_context(prefix=\"truncated-\"))\n with path.open(\"rb\") as src, truncated_path.open(\"wb\") as dest:\n os.sendfile(dest.fileno(), src.fileno(), 0, settings.MAX_CSV_BYTES)\n path = truncated_path\n warnings.append(\n _trans_cjwparse(\n \"csv.truncated_file\",\n \"{n_bytes_truncated, one{Truncated # byte} other{Truncated # bytes}} from file (maximum is {max_n_bytes} bytes)\",\n dict(\n n_bytes_truncated=(n_bytes - settings.MAX_CSV_BYTES),\n max_n_bytes=settings.MAX_CSV_BYTES,\n ),\n )\n )\n\n utf8_path = ctx.enter_context(tempfile_context(prefix=\"utf8-\", suffix=\".txt\"))\n # raises LookupError, UnicodeError\n warnings.extend(\n transcode_to_utf8_and_warn(path, utf8_path, encoding, settings=settings)\n )\n\n # Sniff delimiter\n if not delimiter:\n delimiter = detect_delimiter(utf8_path, settings)\n\n with tempfile_context(suffix=\".arrow\") as arrow_path:\n # raise subprocess.CalledProcessError on error ... but there is no\n # error csv-to-arrow will throw that we can recover from.\n child = subprocess.run(\n [\n \"/usr/bin/csv-to-arrow\",\n \"--delimiter\",\n delimiter,\n \"--max-rows\",\n str(settings.MAX_ROWS_PER_TABLE),\n \"--max-columns\",\n str(settings.MAX_COLUMNS_PER_TABLE),\n \"--max-bytes-per-value\",\n str(settings.MAX_BYTES_PER_VALUE),\n utf8_path.as_posix(),\n arrow_path.as_posix(),\n ],\n capture_output=True,\n check=True,\n )\n warnings.extend(_parse_csv_to_arrow_warnings(child.stdout.decode(\"utf-8\")))\n\n reader = pyarrow.ipc.open_file(arrow_path.as_posix())\n raw_table = reader.read_all() # efficient -- RAM is mmapped\n\n table, more_warnings = _postprocess_table(\n raw_table, has_header, autoconvert_text_to_numbers, settings\n )\n return ParseCsvResult(table, warnings + more_warnings)",
"def _parse_tuple(self, line):\n elements = line[1:-1].split(\",\\t\")\n if len(elements) == len(self.description):\n return tuple(\n [\n pythonize.convert(element.strip(), description[1])\n for (element, description) in zip(elements, self.description)\n ]\n )\n else:\n self._exception_handler(\n InterfaceError, \"length of row doesn't match header\"\n )",
"def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')",
"def parse(cls, line):\r\n raise NotImplementedError",
"def from_csv_line(line):\r\n return line.strip().split(',')",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def ParseRow(self, parser_mediator, row_offset, row):\n try:\n date_time = self._CreateDateTime(row['date'], row['time'])\n except errors.ParseError as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to create date time with error: {0!s}'.format(exception))\n date_time = None\n\n status = row['status']\n if status:\n status = status.rstrip()\n\n event_data = McafeeAVEventData()\n event_data.action = row['action']\n event_data.filename = row['filename']\n event_data.offset = row_offset\n event_data.rule = row['rule']\n event_data.status = status\n event_data.trigger_location = row['trigger_location']\n event_data.username = row['username']\n event_data.written_time = date_time\n\n parser_mediator.ProduceEventData(event_data)",
"def parse_line(line: str) -> str:\n return line",
"def parse_line(line: str) -> str:\n return line",
"def parse_line(line: str) -> str:\n return line",
"def parse_line(line: str) -> str:\n return line",
"def csv_readline(line):\n for row in csv.reader([line]):\n return row",
"def csv_readline(line):\n for row in csv.reader([line]):\n return row",
"def parse_line(self, line):\n if self.signal_eof:\n return \"\"\n\n match = re.search(\"^([\\w\\s]+from) ([^:]+):(\\d+)(:|,)$\", line)\n if match:\n return self.parse_line_from(match)\n\n match = re.search(\"^([^:]+):(?:((?:\\d+:)?\\d+):)?(?:(error|warning|note):)?(.+)$\", line)\n if match:\n return self.parse_line_err(match)\n\n return line",
"def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]",
"def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data",
"def next_line(self, context, line):\n if \"\\t\" in line:\n next_index = line.find(\"\\t\", 0)\n while next_index != -1:\n extra_data = f\"Column: {next_index + 1}\"\n self.report_next_line_error(\n context, next_index + 1, extra_error_information=extra_data\n )\n next_index = line.find(\"\\t\", next_index + 1)",
"def process_line(line: str):\n \n comment_start = line.find(';')\n\n # Remove comments, one comment per line allowed\n if comment_start != -1:\n line = line[:comment_start]\n\n line = line.strip()\n \n # Splits commands such that the command and all details are seperated\n # \"command ...\" -> [command, ...]\n try:\n command, contents = line.split(maxsplit = 1)\n # Deals with function names, two special commands, and empty lines\n except ValueError:\n if line == '':\n return None\n elif line[-1] == ':' or line == 'end' or line == 'ret':\n return (line,)\n\n # Splits depending on command type, some requiring one argument, others two\n try:\n one, two = contents.split(',')\n return command, one.strip(), two.strip()\n except ValueError:\n return command, contents.strip()",
"def read(self, line):\n data = []\n if six.PY3 and type(line) == six.binary_type:\n line = line.decode('utf-8')\n\n csv_reader = csv.reader(six.StringIO(line),\n delimiter=self.delimiter,\n quotechar=self.quotechar,\n skipinitialspace=True)\n for cr in csv_reader:\n data = [decode_string(f).strip() for f in cr]\n break\n\n return None, data",
"def parse_row(self, response, row):\n raise NotImplementedError",
"def __read_csv(self) -> tuple:\n with open(self.csv_file) as f:\n reader = csv.reader(f)\n for row in reader:\n if row[0].isspace():\n raise StopIteration\n yield row",
"def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)",
"def process_line(self, line):\n columns = line.split('|')\n\n if len(line) == 0 or len(columns) < 16:\n return None # empty line or malformed line\n\n cmte_id, name, zip_code = columns[0], columns[7], columns[10][:5]\n transaction_dt, transaction_amt = columns[13], columns[14]\n other_id = columns[15]\n\n if len(other_id) > 0 or len(transaction_amt) == 0 or len(cmte_id) == 0 or len(name) == 0 or len(zip_code) < 5:\n return None # malformed data fields, ignore this line\n transaction_date = string_to_date(transaction_dt)\n if transaction_date is None:\n return None # 'TRANSACTION_DT' is an invalid date\n\n try:\n if self.repeat_donor(name, zip_code, transaction_date.year):\n # this record is from a repeat donor in any prior calendar year\n amount = float(transaction_amt)\n key = RecipientZipYear(cmte_id, zip_code, transaction_date.year)\n if key not in self.running_percentile:\n self.running_percentile[key] = RunningPercentile(self.percentile)\n self.running_percentile[key].add(amount)\n return self.print_record(key)\n else:\n return None # this record is not from a repeat donor\n except:\n return None # exception may comes from malformed line, so just ignore this line",
"def parse_line(self, line):\n success = self.parser.handle_line(line)\n if success:\n self.data.update()\n else:\n self.bot.log(\"didn't handle line: '{}'\".format(line))",
"def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None",
"def parse_row(self, row):\n \n self.metadata = row",
"def mapper(self, line_no, line):\n cell = csv_readline(line)\n if cell[0] == 'V':\n yield cell[4],1"
] | [
"0.6595832",
"0.6529445",
"0.62704617",
"0.61401874",
"0.61335003",
"0.61316746",
"0.61252147",
"0.61061907",
"0.5982218",
"0.5961737",
"0.5809438",
"0.5809438",
"0.5809438",
"0.5809438",
"0.5806658",
"0.5806658",
"0.5729117",
"0.5704075",
"0.5667828",
"0.56519485",
"0.5627262",
"0.5607163",
"0.55858445",
"0.5569104",
"0.55405027",
"0.552808",
"0.5510118",
"0.5474966",
"0.5470403",
"0.54466015"
] | 0.7278119 | 0 |
Return true if we should fastskip converting a pa.Array. The _true_ reason for this function is to test whether an Array contains "Inf" or "NaN". A numberconversion library will parse those. But _this_ library is for Workbench, and Workbench doesn't support NaN/Inf. So this function helps us decide _not_ to autoconvert a column when the intent isn't perfectly clear. Assume `arr` is of type `utf8` or a dictionary of `utf8`. Assume there are no gaps hidden in null values in the buffer. (It's up to the caller to prove this.) | def _utf8_chunk_may_contain_inf_or_nan(chunk: pyarrow.Array) -> bool:
_, offsets_buf, data_buf = chunk.buffers()
offsets = array.array("i")
assert offsets.itemsize == 4
offsets.frombytes(offsets_buf)
if sys.byteorder != "little":
offsets.byteswap() # pyarrow is little-endian
offset0 = offsets[chunk.offset]
offsetN = offsets[chunk.offset + len(chunk)] # len(offsets) == 1 + len(chunk)
b = data_buf[offset0:offsetN].to_pybytes()
return SCARY_BYTE_REGEX.search(b) is not None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def asarray_chkfinite(a):\n a = asarray(a)\n if (a.dtype.char in typecodes['AllFloat']) \\\n and (_nx.isnan(a).any() or _nx.isinf(a).any()):\n raise ValueError, \"array must not contain infs or NaNs\"\n return a",
"def is_array(self, arr):\n return isinstance(arr, np.ndarray)",
"def pyarrow_array(arr, nan_to_null=False):\n import numpy as np\n import pyarrow as pa\n if nan_to_null and issubclass(arr.dtype.type,\n (np.floating, np.complexfloating)):\n isnan = np.isnan(arr)\n if isnan.any():\n pa_nul = pa.py_buffer(get_bitmap(isnan))\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [pa_nul, pa.py_buffer(arr)])\n return pa.Array.from_buffers(pa.from_numpy_dtype(arr.dtype),\n arr.size,\n [None, pa.py_buffer(arr)])",
"def is_array(self):\n return False",
"def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False",
"def sanitize_array(array):\n a = np.ravel(array)\n maxi = np.nanmax(a[np.isfinite(a)])\n mini = np.nanmin(a[np.isfinite(a)])\n array[array == float('inf')] = maxi\n array[array == float('-inf')] = mini\n mid = (maxi + mini) / 2\n array[np.isnan(array)] = mid\n return array",
"def nonans(array):\n return array[~np.isnan(array)]",
"def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)",
"def _is_double(arr):\n\n # Figure out which dtype for data\n if arr.dtype == np.float32:\n return False\n elif arr.dtype == np.float64:\n return True\n else:\n raise ValueError(\"Only float32 or float64 dtypes are supported\")",
"def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False",
"def isfillvalue(a):\n a = numpy.asarray(a)\n if a.dtype.kind == 'i':\n mask = a == -999999999\n elif a.dtype.kind == 'f':\n mask = numpy.isnan(a)\n elif a.dtype.kind == 'S':\n mask = a == ''\n else:\n raise ValueError('Fill value not known for dtype %s' % a.dtype)\n return mask",
"def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False",
"def filter_nans(seq):\n return np.array([x for x in seq if not isinstance(x, float)])",
"def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'",
"def is_sorted_array(arr, increasing=True):\n # If only 1\n if len(arr) == 0:\n return True\n # If multiple values\n if increasing:\n return np.all(np.diff(arr) >= 0)\n else:\n return np.all(np.diff(arr) <= 0)",
"def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)",
"def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True",
"def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)",
"def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)",
"def test_dtype_None(self):\n array = np.array([[0, 1, 2], [2, 1, 0]]).T\n self.assertTrue(to_ndarray(array, None, safe=True).flags.contiguous,\n msg='to_ndarray: Non contiguous arrays are not being consolidated when dtype is None')",
"def is_array(t):\n return isinstance(t, ast.Array)",
"def is_array(self):\n return len(self.descriptor) > 1",
"def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)",
"def convert_non_monotonic_to_nan(array):\n keep = np.arange(0, len(array))\n is_monotonic = False\n while not is_monotonic:\n is_monotonic_array = np.hstack(\n (array[keep][1:] >= array[keep][:-1], np.array(True))\n )\n is_monotonic = is_monotonic_array.all()\n keep = keep[is_monotonic_array]\n out_array = np.full_like(array.astype(np.float), np.nan)\n out_array[keep] = array[keep]\n return out_array",
"def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False",
"def _autocast_column(data: pyarrow.ChunkedArray) -> pyarrow.ChunkedArray:\n # All-empty (and all-null) columns stay text\n for chunk in data.iterchunks():\n # https://arrow.apache.org/docs/format/Columnar.html#variable-size-binary-layout\n _, offsets_buf, _ = chunk.buffers()\n # If data has an offset, ignore what comes before\n #\n # We don't need to grab the _int_ offset: we can just look at the\n # byte-representation of it.\n offset_0_buf = offsets_buf[chunk.offset * 4 : (chunk.offset + 1) * 4]\n # last offset isn't always the last 4 bytes: there can be padding\n offset_n_buf = offsets_buf[\n (chunk.offset + len(chunk)) * 4 : (chunk.offset + len(chunk) + 1) * 4\n ]\n if offset_0_buf.to_pybytes() != offset_n_buf.to_pybytes():\n # there's at least 1 byte of text. (Assumes the CSV reader doesn't\n # pad the buffer with gibberish.)\n break\n else:\n # there are 0 bytes of text\n return data\n\n # Convert \"\" => null, so pyarrow cast() won't balk at it.\n sane = pyarrow.chunked_array(\n [_nix_utf8_chunk_empty_strings(chunk) for chunk in data.iterchunks()]\n )\n\n for chunk in sane.iterchunks():\n # pyarrow cast() uses double-conversion, so it parses \"NaN\" and \"Inf\"\n # as doubles. Workbench doesn't support NaN or Inf, so don't convert to\n # them.\n if _utf8_chunk_may_contain_inf_or_nan(chunk):\n return data\n\n try:\n numbers = sane.cast(pyarrow.float64())\n except pyarrow.ArrowInvalid:\n # Some string somewhere wasn't a number\n return data\n\n # Test that there's no infinity. We'll use numpy. .to_numpy() with\n # zero_copy_only=False will convert nulls to NaN. That's fine, since we\n # know `numbers` has no NaN values (because `cast()` would have raised\n # rather than return a NaN.)\n for chunk in numbers.iterchunks():\n npchunk = chunk.to_numpy(zero_copy_only=False)\n if np.inf in npchunk or -np.inf in npchunk:\n # Numbers too large\n return data\n\n # Downcast integers, when possible.\n #\n # We even downcast float to int. Workbench semantics say a Number is a\n # Number; so we might as well store it efficiently.\n try:\n # Shrink as far as we can, until pyarrow complains.\n #\n # pyarrow will error \"Floating point value truncated\" if a conversion\n # from float to int would be lossy.\n #\n # We'll return the last _successful_ `numbers` result.\n numbers = numbers.cast(pyarrow.int32())\n numbers = numbers.cast(pyarrow.int16())\n numbers = numbers.cast(pyarrow.int8())\n except pyarrow.ArrowInvalid:\n pass\n\n return numbers",
"def _numba_not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:\n diff = np.abs(array - vector)\n for idx in range(array.shape[0]):\n localdiff = np.max(diff[idx, :])\n if localdiff < delta:\n return False\n\n return True",
"def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]",
"def is_array_type(an_array, atype):\n tmp = [i for i in an_array if not isinstance(i, atype)]\n return len(tmp) == 0",
"def isinf(data):\n return _make.isinf(data)"
] | [
"0.63142204",
"0.59511065",
"0.59251046",
"0.5863669",
"0.5700599",
"0.5661153",
"0.5581066",
"0.54970616",
"0.54685277",
"0.54147017",
"0.53897524",
"0.5384138",
"0.53668594",
"0.5293467",
"0.52856606",
"0.527953",
"0.5257239",
"0.5248469",
"0.5248469",
"0.5215622",
"0.5214552",
"0.5208751",
"0.5203614",
"0.5202484",
"0.5198838",
"0.5193919",
"0.5189366",
"0.5188746",
"0.5166378",
"0.5151065"
] | 0.60420185 | 1 |
Update the config information with new dropout values. | def update_dropout(info,
dropout,
dropout_type,
prop_name):
if dropout_type == "schnet_dropout":
info["model_params"]["schnet_dropout"] = dropout
elif dropout_type == "chemprop_dropout":
info["model_params"]["cp_dropout"] = dropout
elif dropout_type == "readout_dropout":
# if it's in the readout layers, find the dropout
# layers in the readout dictionary and update them
readout = info["model_params"]["readoutdict"]
layer_dics = readout[prop_name]
for layer_dic in layer_dics:
if layer_dic["name"] == "Dropout":
layer_dic["param"]["p"] = dropout
info["model_params"]["readoutdict"] = {prop_name: layer_dics}
elif dropout_type == "attention_dropout":
info["model_params"]["boltzmann_dict"]["dropout_rate"] = dropout
else:
info["model_params"][dropout_type] = dropout | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conf_update(self):\n pass",
"def update(self):\n self.save_config_file()",
"def updateConfig(self):\n # Make sure to keep the default values in place.\n if self.newConfig['sensor'] == 0:\n self.newConfig['sensor'] = self.config['sensor']\n if self.newConfig['camera'] == 0:\n self.newConfig['camera'] = self.config['camera']\n if not self.newConfig['auto']['times']:\n self.newConfig['auto']['times'] = self.config['auto']['times']\n if not self.newConfig['auto']['days']:\n self.newConfig['auto']['days'] = self.config['auto']['days']\n\n # Show the changes.\n if self.verbosity >= 1:\n print('%s: Updating configuration file...' % self.feederName)\n try:\n for key in self.config.keys():\n if type(self.config[key]) is dict:\n for subkey in self.config[key].keys():\n if self.config[key][subkey] != self.newConfig[key][subkey]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, subkey, self.config[key][subkey], self.newConfig[key][subkey]))\n elif self.config[key] != self.newConfig[key]:\n print('%s: Updating %s from %s to %s.' % (self.feederName, key, self.config[key], self.newConfig[key]))\n except ValueError:\n if self.verbosity >= 1:\n print('%s: Configuration file does not contain a valid JSON object.' % self.feederName)\n if self.verbosity == 2:\n print('%s: Overwriting configuration file to: %s.' % (self.feederName, self.config))\n\n # Change the configuration file.\n self.config = self.newConfig\n self.writeConfig()",
"def changeDropout(self,dropout):\n self.dropout = dropout",
"def with_config_update(self):\n original_config = self.load_config()\n\n config_data = original_config.json\n if str(self.ITEM_PUBLIC_ID) in config_data[f\"{self.ITEM_TYPE}s\"]:\n config_data[f\"{self.ITEM_TYPE}s\"].remove(str(self.ITEM_PUBLIC_ID))\n config_data[f\"{self.ITEM_TYPE}s\"].append(\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:0.0.1\"\n )\n self.dump_config(AgentConfig.from_json(config_data))\n try:\n yield\n finally:\n self.dump_config(original_config)",
"def update_global_config(self, config, **kwargs):\n pass",
"def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()",
"def refresh_configuration(self):\n pass",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)",
"def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))",
"def update_ranges(self):\n new_ranges = self.get_z_ranges()\n self.config.update_ranges(new_ranges)",
"def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count",
"def _on_config_changed(self, _):\n self._configure_pod()",
"def config_update(cls, **options) -> None:\n cls._logger.debug(\"[%s]: Update config from kwargs.\", cls.__name__)\n\n config_update: Dict = {k: options[k] for k in options.keys() if \"graph_\" in k}\n\n cls._config.update(config_update)\n\n cls._logger.debug(\"[%s]: Final config: %s\", cls.__name__, cls._config)",
"def config_updated(self):\n if callable(self.on_config_updated):\n self.on_config_updated(self.config())",
"def _update_params(self):\n log.debug(\"Updating parameter dict\")\n old_config = self._param_dict.get_config()\n self._get_config()\n new_config = self._param_dict.get_config() \n if (new_config != old_config):\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)",
"def apply_user_configuration(self, config):\n self.logDisplay.set_logging_level(config['log'].get('logging_level', fallback='Verbose'))\n\n # MIDI\n self.winchMidiInputCombo.select_item(config['midi'].get('winch_midi_input', fallback='<no selection>'))\n self.midiOutputCombo.select_item(config['midi'].get('midi_output', fallback='<no selection>'))\n\n # OSC\n oscdef = config['osc']\n self.oscListenerConfig.set_OSC_port(oscdef.get('listener_addr', fallback='localhost'),\n oscdef.getint('listener_port', fallback=3751))\n\n self.oscSenderConfig.set_OSC_port(oscdef.get('sender_addr', fallback='localhost'),\n oscdef.getint('sender_port', fallback=3752))\n\n # DMX\n self.dmxSelect.select_item(config['dmx'].get('dmx_output_serial_port', fallback='<no selection>'))\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n winchSelect.select_item(config['winches'].get(key, fallback = '<no selection>'))\n return",
"def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)",
"def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()",
"def _auto_update_configuration(self) -> None:\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)",
"def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])",
"async def async_update_config(self, config: ConfigType) -> None:\n self._config = config\n # just in case min/max values changed\n if self._current_value is None:\n return\n self._current_value = min(self._current_value, self._maximum)\n self._current_value = max(self._current_value, self._minimum)\n self.async_write_ha_state()",
"def _save_config(self, data):\n curr_conf = self.config_entry.options.copy()\n curr_conf.update(data)\n curr_conf.update(self._conf_devs_option)\n\n return self.async_create_entry(title=\"\", data=curr_conf)",
"def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config",
"def update_config(self, config):\n return self._update_config(\"config\", config)",
"def config(self, config_dict):\r\n self._cfg.config = config_dict",
"def configure(self, config: dict):\n self.config.update(config)",
"def update(self, obj):\n\n self.cfg.update(obj)",
"def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)"
] | [
"0.6544299",
"0.63342535",
"0.60116196",
"0.59151256",
"0.5909534",
"0.57759255",
"0.57704425",
"0.5765275",
"0.5730661",
"0.56408286",
"0.5635697",
"0.558882",
"0.55770063",
"0.5571904",
"0.5553866",
"0.5534613",
"0.5478377",
"0.546527",
"0.5463798",
"0.5436312",
"0.5427711",
"0.53996444",
"0.5395192",
"0.538703",
"0.5386605",
"0.53815395",
"0.5366553",
"0.5358142",
"0.5354595",
"0.5339216"
] | 0.63966775 | 1 |
Update the config information with the number of attention heads. | def update_heads(info,
heads):
info["model_params"]["boltzmann_dict"]["num_heads"] = heads
# Concatenate the fingerprints produced by the different heads
info["model_params"]["boltzmann_dict"]["head_pool"] = "concatenate"
readoutdict = info["model_params"]["readoutdict"]
feat_dim = info["model_params"]["mol_basis"]
for key, lst in readoutdict.items():
for i, dic in enumerate(lst):
if "param" in dic and "in_features" in dic.get("param", {}):
# make sure that the input dimension to the readout is equal to
# `heads * feat_dim`, where `feat_dim` is the feature dimension
# produced by each head
readoutdict[key][i]["param"]["in_features"] = feat_dim * heads
break
info["model_params"]["readoutdict"] = readoutdict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_config(self):\n self.channel_count = self.config_global['channel_count']\n self.pixel_count = self.config_global['pixel_count']\n self.pixel_index_max = self.pixel_count - 1\n self.repeat_count = self.config_global['repeat_count']\n self.repeat_snake = self.config_global['repeat_snake']\n\n self.update_interval = self.config_global['update_interval']\n self.mode_16bit = self.config_global['mode_16bit']\n\n self.color_channels = self.config_global['color_channels']\n # self.color_channels = collections.namedtuple(\n # 'color_channels',\n # **self.color_channels_dict\n # )\n self.color_channels_count = len(self.color_channels)\n if self.mode_16bit:\n self.color_channels_count = self.color_channels_count * 2\n\n self.total_channel_count = (\n self.pixel_count *\n self.color_channels_count\n )\n if self.repeat_count > 0:\n self.total_channel_count *= self.repeat_count",
"def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config",
"def update(self, num_of_updates=25) -> None:\n\t\tfor _ in range(num_of_updates):\n\t\t\tself.__find_joint_configurations()",
"def conf_update(self):\n pass",
"def get_config(self):\n config = {\n 'F_': self.F_,\n 'attn_heads': self.attn_heads,\n 'attn_heads_reduction': self.attn_heads_reduction,\n 'edge_type_reduction': self.edge_type_reduction,\n 'attention_type': self.attention_type,\n 'attn_dropout': self.attn_dropout,\n 'feature_dropout': self.feature_dropout,\n 'activation': self.activation,\n 'use_value_bias': self.use_value_bias,\n 'use_key_bias': self.use_key_bias,\n 'kernel_initializer': self.kernel_initializer,\n 'bias_initializer': self.bias_initializer,\n 'attn_kernel_initializer': self.attn_kernel_initializer,\n 'attn_bias_initalizer': self.attn_bias_initializer,\n 'kernel_regularizer': self.kernel_regularizer,\n 'bias_regularizer': self.bias_regularizer,\n 'attn_kernel_regularizer': self.attn_kernel_regularizer,\n 'attn_bias_regularizer': self.attn_bias_regularizer,\n 'activity_regularizer': self.activity_regularizer,\n 'kernel_constraint': self.kernel_constraint,\n 'bias_constraint': self.bias_constraint,\n 'attn_kernel_constraint': self.attn_kernel_constraint,\n 'attn_bias_constraint': self.attn_bias_constraint\n }\n base_config = super(BatchShawMultigraphAttention, self).get_config()\n return dict(list(base_config.items())) + list(config.items())",
"def update(self, config):\n self.n_topics = config['n_topics'] \n self.n_passes = config['n_passes'] \n self.min_docfreq = config['min_docfreq'] \n self.max_docfreq = config['max_docfreq']\n self.ngrams = config['ngrams'] \n self.n_words = config['n_words'] \n self.topic_range = config['topic_range'] \n self.ext_stop_words = config['ext_stop_words']",
"def __init__(self, **config):\n super(CNN, self).__init__()\n in_channel = [26] + config['cnn_target_filters']\n kernels = config['cnn_target_kernels']\n self.layer_size = len(config['cnn_target_filters'])\n self.visual_attention=config['visual_attention']\n self.concatenation=config['concatenation']\n self.convs = nn.ModuleList([nn.Conv1d(in_channels=in_channel[i],\n out_channels=in_channel[i + 1],\n kernel_size=kernels[i]) for i in range(self.layer_size)])\n self.convs = self.convs.float()\n self.attention = config['attention']\n protein_size = self.simulate_output((26, 1000))\n self.fc = nn.Linear(protein_size, config['hidden_dim_protein'])\n self.Attention=Attention(**config)",
"def n_configs(self, val):\n if val >= 1 and isinstance(val, int):\n if val != self._faux._n_configs:\n self._faux._n_configs = val\n self._faux._update()\n else:\n warn(\"`val` not valid, no update performed\")",
"def update(self, rxn_probs):\n pass",
"def _InitAttentionParams(self, atten_tpl):\n p = self.params\n\n if isinstance(p.num_heads, list) != isinstance(atten_tpl, list):\n raise ValueError('p.num_heads and p.atten_tpl should both be lists '\n f'or both scalars for {p.name} num_heads={p.num_heads}.')\n if isinstance(p.num_heads, list) and (len(p.num_heads) != len(atten_tpl)):\n raise ValueError('num_heads and atten_tpl should both be lists '\n 'of the equal sizes: '\n f'{len(p.num_heads)} vs {len(atten_tpl)}')\n\n def _SetCommonParams(params, name, num_heads):\n # Raise warning if self.params override params from atten_tpl\n for key in ['input_dim', 'hidden_dim', 'num_heads', 'atten_dropout_prob']:\n if params.Get(key) is not p.Get(key):\n tf.logging.warning('attention param {} overriding: {} -> {}'.format(\n key, params.Get(key), p.Get(key)))\n if params.name is not name:\n tf.logging.warning('attention param name overriding: {} -> {}'.format(\n params.name, name))\n params.name = name\n params.input_dim = p.input_dim\n params.hidden_dim = p.hidden_dim\n params.num_heads = num_heads\n params.atten_dropout_prob = p.atten_dropout_prob\n if isinstance(p.num_heads, list):\n params.proj_tpl.make_output_proj_no_op = True\n # Each dim per head is now divided among all heads\n dim_per_head = p.hidden_dim // sum(p.num_heads)\n params.proj_tpl.dim_per_head = dim_per_head\n params.dim_per_head = dim_per_head\n params.hidden_dim = p.hidden_dim // len(p.num_heads)\n return params\n\n if isinstance(p.num_heads, list):\n params_list = []\n for i in range(len(atten_tpl)):\n params = atten_tpl[i].Copy()\n params = _SetCommonParams(params, 'mixed_atten_{}'.format(i),\n p.num_heads[i])\n params_list.append(params)\n params = params_list\n else:\n params = atten_tpl.Copy()\n params = _SetCommonParams(params, 'multihead_atten', p.num_heads)\n return params",
"def update_count(self):\n pass",
"def n_configs(self):\n return self._faux._n_configs",
"def set_config(self, config):\n if 'symbols' in config:\n self.symbols = self.config['symbols'] = config['symbols']\n if 'update_frequency_milliseconds' in config:\n self.update_frequency_milliseconds = self.config['update_frequency_milliseconds'] = int(\n config['update_frequency_milliseconds']\n )\n if 'elements_per_update' in config:\n self.elements_per_update = self.config['elements_per_update'] = int(config['elements_per_update'])",
"def __init__(self, nheads, d_model):\n super(MultiheadAttention, self).__init__()\n assert d_model % nheads == 0\n self.d_head = d_model // nheads\n self.nheads = nheads\n self.Q_fc = nn.Linear(d_model, d_model, bias=False)\n self.K_fc = nn.Linear(d_model, d_model, bias=False)\n self.V_fc = nn.Linear(d_model, d_model, bias=False)\n self.output_fc = nn.Linear(d_model, d_model, bias=False)\n self.attn = None",
"def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir",
"def updateSizeHead(self, size): \n self.avatarConfiguration[\"headSize\"] = size\n self.paintHead()\n self.paintHair()\n if (self.avatarConfiguration[\"mask\"]):\n self.generateMask(\"imgUpload.png\")\n self.paintMask()",
"def onConfigureMessage(self, config):\n for adaptor in config[\"adaptors\"]:\n adtID = adaptor[\"id\"]\n if adtID not in self.devices:\n # Because configure may be re-called if devices are added\n name = adaptor[\"name\"]\n friendly_name = adaptor[\"friendly_name\"]\n logging.debug(\"%s Configure app. Adaptor name: %s\", ModuleName, name)\n self.idToName[adtID] = friendly_name.replace(\" \", \"_\")\n self.devices.append(adtID)\n self.dm = DataManager(self.bridge_id)\n self.setState(\"starting\")",
"def _update_count(self):\n self._count = len(self._items)",
"def updateBotCounts(self, nextCard):\n nextVal = dnUtil.getValue(nextCard)\n state = self.getState()\n counts = self.getCounts(state)\n newCount = counts.copy()\n for value in dnUtil.valuesList:\n if counts[value][2] == 0:\n continue\n update = self.updateCount(value, nextVal, counts[value])\n newCount[value] = update\n self.setCounts(newCount)",
"def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())",
"def update_count(self):\n pass # Do nothing",
"def _InitAttentionParams(self, atten_tpl):\n p = self.params\n source_atten_tpls = []\n # Set up each source attention.\n for i in range(p.num_source):\n src_key = 'source_%d' % i\n src_atten = atten_tpl.Copy()\n src_atten = super()._InitAttentionParams(src_atten)\n if isinstance(src_atten, list):\n raise ValueError(\n 'TransformerMultiSourceAttentionLayer does not support '\n 'num_heads > 1.')\n src_atten.name = 'multihead_atten_%s' % src_key\n source_atten_tpls.append((src_key, src_atten))\n\n # Initialize multi-source attention.\n msa = p.multi_source_atten.Copy()\n msa.name = 'multi_source_atten'\n msa.input_dim = p.input_dim\n msa.hidden_dim = p.hidden_dim\n msa.source_atten_tpls = source_atten_tpls\n msa.primary_source_key = 'source_%d' % p.primary_source_index\n return msa",
"def update_config(self, config) -> InferredConfig:\n categorical_dim = len(config.categorical_cols)\n continuous_dim = len(config.continuous_cols)\n if config.task == \"regression\":\n output_dim = len(config.target)\n elif config.task == \"classification\":\n output_dim = len(self.train[config.target[0]].unique())\n else:\n output_dim = None\n categorical_cardinality = None\n embedding_dims = None\n if not self.do_leave_one_out_encoder():\n categorical_cardinality = [\n int(self.train[col].fillna(\"NA\").nunique()) + 1 for col in config.categorical_cols\n ]\n embedding_dims = [(x, min(50, (x + 1) // 2)) for x in categorical_cardinality]\n if hasattr(config, \"embedding_dims\"):\n if config.embedding_dims is not None:\n embedding_dims = config.embedding_dims\n return InferredConfig(\n categorical_dim=categorical_dim,\n continuous_dim=continuous_dim,\n output_dim=output_dim,\n categorical_cardinality=categorical_cardinality,\n embedding_dims=embedding_dims,\n )",
"def config_count(self) -> int:\n return pulumi.get(self, \"config_count\")",
"def find_n(self):\n metadata_files = [\n file for file in self.cfg[\"input_files\"]\n if \"tas/metadata.yml\" in file\n ]\n self.cfg[\"N\"] = {}\n for meta_file in metadata_files:\n n_identifyer = meta_file.split(\"/tas/\")[0].split(\"/tas_\")[-1]\n metadata = group_metadata(get_cfg(meta_file).values(), \"dataset\")\n self.cfg[\"N\"][n_identifyer] = len(metadata.keys()) - 1",
"def setMancount(self, cnt):\n self.__mancount=cnt",
"def num_of_adaptors(self, num_of_adaptors):\n\n self._num_of_adaptors = num_of_adaptors",
"def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.",
"def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )",
"def updateInfo(self):\n\t\tif ( self.errorCount == 2 ):\n\t\t\tself.pitchText.text = \"Unclear microphone input...\"\n\n\t\tcurNote = self.listener.pitch.note\n\t\tcurFreq = self.listener.pitch.freq\n\t\tself.tuneDelta, self.tuneNeighbor = self.listener.pitch.inTune()\n\t\ttuneText = \"%0.2f Hz off from %s (%0.1f Hz)\" % (abs(self.tuneDelta), \n\t\t\t\t\t\t\t\t\t\t\t\tself.tuneNeighbor.note, \n\t\t\t\t\t\t\t\t\t\t\t\tcurFreq)\n\t\tself.pitchText.text = tuneText"
] | [
"0.5661511",
"0.5599164",
"0.54210174",
"0.53882116",
"0.5338775",
"0.5247799",
"0.5247248",
"0.5225227",
"0.51431704",
"0.5058479",
"0.49841285",
"0.49445143",
"0.49379683",
"0.48532596",
"0.4848556",
"0.48481622",
"0.4835506",
"0.48258802",
"0.48030823",
"0.48024145",
"0.47915727",
"0.47881028",
"0.4777855",
"0.4774145",
"0.47700423",
"0.47676536",
"0.4764091",
"0.47598007",
"0.47409284",
"0.4735868"
] | 0.5935313 | 0 |
Update a general parameter that's in the main info dictionary. | def update_general(info, key, val):
info["model_params"][key] = val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def change_general_param(self, param, val):\n assert param in self.params, '%s is not recognized as a valid parameter' % param\n self.params[param].change_value(val)",
"def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)",
"def update_parameter(self, param, val, force=False):\n self._update_dict[param] = val\n if force:\n self._cur_val[param] = None",
"def updateParameters(self, parameters):",
"def update_parameter(self, name, freq, value):\n if name not in self._parameters.keys():\n self.add_parameter(name, [freq], [value])\n else:\n param = self.get_parameter(name)\n param.update_value(freq, value)",
"def update_params(self):\n pass",
"def update_param(self, update_param):\n\n self._update_param = update_param",
"def updateParameters(self):\n\n return",
"def update_param_info(param_info, config, is_user_config=False):\n if 'parameters' not in config:\n return\n params = config['parameters']\n for name in params:\n val = params[name]\n if not is_user_config:\n # If this is not a user-provided configuration, we disallow parameter redefinition.\n if name in param_info:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter redefinition is not allowed for non-user configuration.\"\n \" This is a system configuration error that must not happen.\"\n \" Parameter %s=%s, new parameter definition (value) is %s\" % (name, str(param_info[name]), val)\n )\n if isinstance(val, dict):\n # This is a complete parameter definition with name, value and description.\n if 'val' not in val:\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter that is defined by a dictionary must contain 'val' field that\"\n \" defines its default value. Found this definition: %s=%s\" % (name, val)\n )\n if name not in param_info:\n param_info[name] = copy.deepcopy(val) # New parameter, set it info object.\n # TODO what about parameter type and description?\n else:\n logging.warn(\n \" Parameter (%s) entirely redefines existing parameter (%s).\"\n \" Normally, only value needs to be provided.\"\n \" We will proceed but you may want to fix this.\",\n json.dumps(val),\n json.dumps(param_info[name])\n )\n param_info[name]['val'] = val['val'] # Existing parameter from user configuration, update its value\n else:\n # Just parameter value\n val_type = 'str' if isinstance(val, basestring) or isinstance(val, list) else type(val).__name__\n if name not in param_info:\n param_info[name] = {\n 'val': val,\n 'type': val_type,\n 'desc': \"No description for this parameter provided (it was automatically converted from its value).\"\n }\n else:\n param_info[name]['val'] = val\n # Do final validations\n if 'type' in param_info[name] and param_info[name]['type'] not in ('int', 'str', 'float', 'bool'):\n raise ConfigurationError(\n \"Parameter info update error.\"\n \" Parameter has invalid type = '%s'.\"\n \" Parameter definition is %s = %s\" % (param_info[name]['type'], name, param_info[name])\n )\n if 'type' not in param_info[name] or 'desc' not in param_info[name]:\n logging.warn(\n \"Parameter definition does not contain type ('type') and/or description ('desc').\"\n \" You should fix this. Parameter definition is\"\n \" %s = %s\", name, param_info[name]\n )",
"def update_param(param, param_dict, alg=\"IID_LINEAR\", prefix=\"\"):\n default_len = len(param.defaults)\n if param.defaults:\n for index, value in enumerate(reversed(param.args)):\n if value not in [\"self\", \"W\", \"method\", \"causal_matrix\", \"topology_matrix\"]:\n if index < default_len:\n p_value = list(reversed(param.defaults))[index]\n else:\n p_value = None\n if value is \"sem_type\":\n p_value = sem_type_set(\"sem_type\", alg)[0]\n param_dict.update({prefix + value: p_value})",
"def update_settings(self, param):\n if param.name() == '':\n pass",
"def update(self, **params):\n self.parameters.update(params)",
"def _update_params(self):\n pass",
"def update(self, params):",
"def __adjust_param(self, option):\n # Get the name of the parameter.\n name = self.__option_params[option]\n\n # Ask the user for a new value.\n value = float(input(\"Enter value for {}: \".format(name)))\n self._params.update(name, value)\n\n # Update the description with the new value.\n desc = self.__make_description(name)\n self.update_description(option, desc)\n\n # Stay on the same menu.\n return self.get_name()",
"def _update_params(self):\n raise NotImplementedException()",
"def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here",
"def __updateParameter(self, currentParam, newParam):\n for i in xrange(len(currentParam)):\n for np in newParam:\n if np['name'] == currentParam[i]['name']:\n currentParam[i] = np",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()",
"def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()",
"def edit_parameter(request, parameter, **_kwargs):\n pass",
"def setParam(self,param,value):\n if param in self.params.keys():\n self.params[param] = value"
] | [
"0.7279819",
"0.71316004",
"0.70896465",
"0.68731415",
"0.6845889",
"0.68180555",
"0.6810109",
"0.67108864",
"0.6680052",
"0.6631445",
"0.6597182",
"0.6568276",
"0.65336627",
"0.65146816",
"0.64628476",
"0.64187586",
"0.64153326",
"0.63640064",
"0.63570213",
"0.63570213",
"0.63570213",
"0.63570213",
"0.63570213",
"0.63570213",
"0.63570213",
"0.63570213",
"0.6326515",
"0.632127",
"0.631701",
"0.62342215"
] | 0.7829526 | 0 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 9